code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from pywt import WaveletPacket2D
import pywt.data
arr = pywt.data.aero()
wp2 = WaveletPacket2D(arr, 'db2', 'symmetric', maxlevel=2)
# Show original figure
plt.imshow(arr, interpolation="nearest", cmap=plt.cm.gray)
path = ['d', 'v', 'h', 'a']
# Show level 1 nodes
fig = plt.figure()
for i, p2 in enumerate(path):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(np.sqrt(np.abs(wp2[p2].data)), origin='image',
interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(p2)
# Show level 2 nodes
for p1 in path:
fig = plt.figure()
for i, p2 in enumerate(path):
ax = fig.add_subplot(2, 2, i + 1)
p1p2 = p1 + p2
ax.imshow(np.sqrt(np.abs(wp2[p1p2].data)), origin='image',
interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(p1p2)
fig = plt.figure()
i = 1
for row in wp2.get_level(2, 'freq'):
for node in row:
ax = fig.add_subplot(len(row), len(row), i)
ax.set_title("%s=(%s row, %s col)" % (
(node.path,) + wp2.expand_2d_path(node.path)))
ax.imshow(np.sqrt(np.abs(node.data)), origin='image',
interpolation="nearest", cmap=plt.cm.gray)
i += 1
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.abs",
"pywt.WaveletPacket2D",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((181, 233), 'pywt.WaveletPacket2D', 'WaveletPacket2D', (['arr', '"""db2"""', '"""symmetric"""'], {'maxlevel': '(2)'}), "(arr, 'db2', 'symmetric', maxlevel=2)\n", (196, 233), False, 'from pywt import WaveletPacket2D\n'), ((258, 316), 'matplotlib.pyplot.imshow', 'plt.imshow', (['arr'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.gray'}), "(arr, interpolation='nearest', cmap=plt.cm.gray)\n", (268, 316), True, 'import matplotlib.pyplot as plt\n'), ((374, 386), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (384, 386), True, 'import matplotlib.pyplot as plt\n'), ((916, 928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (926, 928), True, 'import matplotlib.pyplot as plt\n'), ((1299, 1309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1307, 1309), True, 'import matplotlib.pyplot as plt\n'), ((642, 654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (652, 654), True, 'import matplotlib.pyplot as plt\n'), ((477, 497), 'numpy.abs', 'np.abs', (['wp2[p2].data'], {}), '(wp2[p2].data)\n', (483, 497), True, 'import numpy as np\n'), ((780, 802), 'numpy.abs', 'np.abs', (['wp2[p1p2].data'], {}), '(wp2[p1p2].data)\n', (786, 802), True, 'import numpy as np\n'), ((1186, 1203), 'numpy.abs', 'np.abs', (['node.data'], {}), '(node.data)\n', (1192, 1203), True, 'import numpy as np\n')] |
"""Utilities for the training module."""
import random
import numpy as np
import torch
__all__ = [
'manual_seed', 'compute_accuracy', 'AverageMeter', 'get_device_order',
'bounds_logits'
]
def manual_seed(value=None, benchmark_otherwise=False):
"""Seeds NumPy, PyTorch, and the builtin random number generators."""
if value is None:
if benchmark_otherwise:
torch.backends.cudnn.benchmark = False
else:
random.seed(value)
np.random.seed(value)
torch.manual_seed(value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@torch.no_grad()
def compute_accuracy(output, target, top_k=(1,)):
"""Compute the accuracy over the k top predictions."""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_device_order():
"""Get the cuda devices sorted from highest to lowest total memory."""
return sorted(
range(torch.cuda.device_count()),
key=lambda i: -torch.cuda.get_device_properties(i).total_memory,
)
class AverageMeter:
"""Computes and stores the average and current value."""
def __init__(self, name, fmt=':f'):
"""Initialize an average meter."""
self.name = name
self.fmt = fmt
self.val = self.avg = self.sum = self.count = 0
def reset(self):
"""Reset all the counters."""
self.val = self.avg = self.sum = self.count = 0
def update(self, val, n=1):
"""Update the counters."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
"""Nice representation."""
msg = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return msg.format(**self.__dict__)
def __str__(self):
"""Short representation."""
return f'{{{self.fmt}}}'.format(self.avg)
def bounds_logits(output, offset, target, dim=-1):
"""Compute the output logits for bounds loss."""
target = target.view(-1, 1)
upper_bound = output + offset
lower_bound = output.gather(dim, target) - offset.gather(dim, target)
return upper_bound.scatter(dim, target, lower_bound)
| [
"torch.manual_seed",
"torch.cuda.device_count",
"random.seed",
"numpy.random.seed",
"torch.no_grad",
"torch.cuda.get_device_properties"
] | [((636, 651), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (649, 651), False, 'import torch\n'), ((454, 472), 'random.seed', 'random.seed', (['value'], {}), '(value)\n', (465, 472), False, 'import random\n'), ((481, 502), 'numpy.random.seed', 'np.random.seed', (['value'], {}), '(value)\n', (495, 502), True, 'import numpy as np\n'), ((511, 535), 'torch.manual_seed', 'torch.manual_seed', (['value'], {}), '(value)\n', (528, 535), False, 'import torch\n'), ((1251, 1276), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1274, 1276), False, 'import torch\n'), ((1302, 1337), 'torch.cuda.get_device_properties', 'torch.cuda.get_device_properties', (['i'], {}), '(i)\n', (1334, 1337), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pytest
from ..categorical import SimpleCategoricalInitiator
from ...models.measurement.categorical import CategoricalMeasurementModel
from ...models.transition.tests.test_categorical import create_categorical, \
create_categorical_matrix
from ...types.detection import CategoricalDetection
from ...types.state import CategoricalState
from ...types.update import CategoricalStateUpdate
@pytest.mark.parametrize(
'measurement_model',
[CategoricalMeasurementModel(ndim_state=3,
emission_matrix=create_categorical_matrix(3, 3),
emission_covariance=0.1 * np.eye(3),
mapping=[0, 1, 2]),
CategoricalMeasurementModel(ndim_state=3,
emission_matrix=create_categorical_matrix(2, 2),
emission_covariance=0.1 * np.eye(3),
mapping=[0, 1]),
CategoricalMeasurementModel(ndim_state=3,
emission_matrix=create_categorical_matrix(2, 2),
emission_covariance=0.1 * np.eye(3),
mapping=[0, 2]),
CategoricalMeasurementModel(ndim_state=3,
emission_matrix=create_categorical_matrix(2, 2),
emission_covariance=0.1 * np.eye(3),
mapping=[2, 0])
],
ids=['[0, 1, 2]', '[0, 1]', '[0, 2]', '[2, 0]'])
def test_categorical_initiator(measurement_model):
now = datetime.now()
# Prior state information
prior_state = CategoricalState([1 / 3, 1 / 3, 1 / 3], category_names=['red', 'green', 'blue'])
ndim_meas = measurement_model.ndim_meas
measurements = [CategoricalDetection(create_categorical(ndim_meas), timestamp=now,
measurement_model=measurement_model),
CategoricalDetection(create_categorical(ndim_meas), timestamp=now)]
initiator = SimpleCategoricalInitiator(prior_state, measurement_model=measurement_model)
tracks = initiator.initiate(measurements, now)
assert len(tracks) == 2
for track in tracks:
assert len(track) == 1
assert isinstance(track.state, CategoricalStateUpdate)
assert set(measurements) == set(track.state.hypothesis.measurement for track in tracks)
| [
"datetime.datetime.now",
"numpy.eye"
] | [((1644, 1658), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1656, 1658), False, 'from datetime import datetime\n'), ((712, 721), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (718, 721), True, 'import numpy as np\n'), ((964, 973), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (970, 973), True, 'import numpy as np\n'), ((1213, 1222), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1219, 1222), True, 'import numpy as np\n'), ((1462, 1471), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1468, 1471), True, 'import numpy as np\n')] |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree():
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
m = Node('m', children=[p])
p = m['p']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 1
assert 'p' in m.children
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
c1.price == 105
c2.price == 95
i = 2
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.ix[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.ix[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.ix[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 == s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 == s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 == s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via master method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
master = Strategy('m', [do_nothing], [child1, 'a'])
child1 = master['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': pd.Series(data=1, index=dts, name='a'),
'b': pd.Series(data=2, index=dts, name='b'),
'c': pd.Series(data=3, index=dts, name='c')})
master.setup(data)
assert len(master.children) == 2
assert 'c1' in master.children
assert 'a' in master.children
assert len(master._universe.columns) == 2
assert 'c1' in master._universe.columns
assert 'a' in master._universe.columns
assert len(child1._universe.columns) == 2
assert 'b' in child1._universe.columns
assert 'c' in child1._universe.columns
def test_strategy_tree_paper():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['a'], data=100.)
data['a'].ix[dts[1]] = 101
data['a'].ix[dts[2]] = 102
s = Strategy('s',
[bt.algos.SelectWhere(data > 100),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
m = Strategy('m', [], [s])
s = m['s']
m.setup(data)
m.update(dts[0])
m.run()
assert m.price == 100
assert s.price == 100
assert s._paper_trade
assert s._paper.price == 100
s.update(dts[1])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert s.price == 100
s.update(dts[2])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert np.allclose(s.price, 100. * (102 / 101.))
def test_outlays():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
# out update
s.update(dts[i])
assert c1.data['outlay'][dts[0]] == (4 * 105)
assert c2.data['outlay'][dts[0]] == (5 * 95)
i = 1
s.update(dts[i], data.ix[dts[i]])
c1.allocate(-400)
c2.allocate(100)
# out update
s.update(dts[i])
#print(c1.data['outlay'])
assert c1.data['outlay'][dts[1]] == (-4 * 100)
assert c2.data['outlay'][dts[1]] == 100
def test_child_weight_above_1():
# check for child weights not exceeding 1
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(np.random.randn(3, 2) + 100,
index=dts, columns=['c1', 'c2'])
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1e6)
s.allocate(1e6, 'c1')
c1 = s['c1']
assert c1.weight <= 1
def test_fixed_commissions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
# fixed $1 commission per transaction
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
# out update
s.update(dts[i])
assert c1.value == 400
assert c2.value == 400
assert s.capital == 198
# de-alloc 100 from c1. This should force c1 to sell 2 units to raise at
# least 100 (because of commissions)
c1.allocate(-100)
s.update(dts[i])
assert c1.value == 200
assert s.capital == 198 + 199
# allocate 100 to c2. This should leave things unchaged, since c2 cannot
# buy one unit since the commission will cause total outlay to exceed
# allocation
c2.allocate(100)
s.update(dts[i])
assert c2.value == 400
assert s.capital == 198 + 199
# ok try again w/ 101 allocation. This time, it should work
c2.allocate(101)
s.update(dts[i])
assert c2.value == 500
assert s.capital == 198 + 199 - 101
# ok now let's close the whole position. Since we are closing, we expect
# the allocation to go through, even though the outlay > amount
c2.allocate(-500)
s.update(dts[i])
assert c2.value == 0
assert s.capital == 198 + 199 - 101 + 499
# now we are going to go short c2
# we want to 'raise' 100 dollars. Since we need at a minimum 100, but we
# also have commissions, we will actually short 2 units in order to raise
# at least 100
c2.allocate(-100)
s.update(dts[i])
assert c2.value == -200
assert s.capital == 198 + 199 - 101 + 499 + 199
def test_degenerate_shorting():
# can have situation where you short infinitely if commission/share > share
# price
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
# $1/share commission
s.set_commissions(lambda q, p: abs(q) * 1)
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
# c1 trades at 0.01
data = pd.DataFrame(index=dts, columns=['c1'], data=0.01)
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
try:
c1.allocate(-10)
assert False
except Exception as e:
assert 'full_outlay should always be approaching amount' in str(e)
def test_securitybase_allocate():
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
data['c1'][dts[0]] = 91.40246706608193
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# not integer positions
c1.integer_positions = False
# set the full_outlay and amount
full_outlay = 1999.693706988672
amount = 1999.6937069886717
c1.allocate(amount)
# the results that we want to be true
assert np.isclose(full_outlay ,amount,rtol=0.)
# check that the quantity wasn't decreased and the full_outlay == amount
# we can get the full_outlay that was calculated by
# original capital - current capital
assert np.isclose(full_outlay, original_capital - s._capital, rtol=0.)
def test_securitybase_allocate_commisions():
date_span = pd.DatetimeIndex(start='10/1/2017', end='10/11/2017', freq='B')
numper = len(date_span.values)
comms = 0.01
data = [[10, 15, 20, 25, 30, 35, 40, 45],
[10, 10, 10, 10, 20, 20, 20, 20],
[20, 20, 20, 30, 30, 30, 40, 40],
[20, 10, 20, 10, 20, 10, 20, 10]]
data = [[row[i] for row in data] for i in range(len(data[0]))] # Transpose
price = pd.DataFrame(data=data, index=date_span)
price.columns = ['a', 'b', 'c', 'd']
# price = price[['a', 'b']]
sig1 = pd.DataFrame(price['a'] >= price['b'] + 10, columns=['a'])
sig2 = pd.DataFrame(price['a'] < price['b'] + 10, columns=['b'])
signal = sig1.join(sig2)
signal1 = price.diff(1) > 0
signal2 = price.diff(1) < 0
tw = price.copy()
tw.loc[:,:] = 0 # Initialize Set everything to 0
tw[signal1] = -1.0
tw[signal2] = 1.0
s1 = bt.Strategy('long_short', [bt.algos.WeighTarget(tw),
bt.algos.RunDaily(),
bt.algos.Rebalance()])
####now we create the Backtest , commissions=(lambda q, p: abs(p * q) * comms)
t = bt.Backtest(s1, price, initial_capital=1000000, commissions=(lambda q, p: abs(p * q) * comms), progress_bar=False)
####and let's run it!
res = bt.run(t)
########################
| [
"bt.algos.WeighEqually",
"bt.core.AlgoStack",
"copy.deepcopy",
"pandas.date_range",
"bt.core.Node",
"nose.tools.assert_almost_equal",
"bt.algos.Rebalance",
"bt.core.SecurityBase",
"pandas.DataFrame",
"bt.algos.RunDaily",
"numpy.allclose",
"pandas.DatetimeIndex",
"unittest.mock.MagicMock",
... | [((345, 355), 'bt.core.Node', 'Node', (['"""c1"""'], {}), "('c1')\n", (349, 355), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((365, 375), 'bt.core.Node', 'Node', (['"""c2"""'], {}), "('c2')\n", (369, 375), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((384, 412), 'bt.core.Node', 'Node', (['"""p"""'], {'children': '[c1, c2]'}), "('p', children=[c1, c2])\n", (388, 412), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((602, 625), 'bt.core.Node', 'Node', (['"""m"""'], {'children': '[p]'}), "('m', children=[p])\n", (606, 625), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((947, 965), 'bt.core.SecurityBase', 'SecurityBase', (['"""s1"""'], {}), "('s1')\n", (959, 965), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((975, 993), 'bt.core.SecurityBase', 'SecurityBase', (['"""s2"""'], {}), "('s2')\n", (987, 993), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1002, 1029), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[s1, s2]'], {}), "('p', [s1, s2])\n", (1014, 1029), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1246, 1264), 'bt.core.SecurityBase', 'SecurityBase', (['"""s1"""'], {}), "('s1')\n", (1258, 1264), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1274, 1292), 'bt.core.SecurityBase', 'SecurityBase', (['"""s2"""'], {}), "('s2')\n", (1286, 1292), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1301, 1328), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[s1, s2]'], {}), "('p', [s1, s2])\n", (1313, 1328), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1679, 1697), 'bt.core.SecurityBase', 'SecurityBase', (['"""s1"""'], {}), "('s1')\n", (1691, 1697), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1707, 1725), 'bt.core.SecurityBase', 'SecurityBase', (['"""s2"""'], {}), "('s2')\n", (1719, 1725), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((1734, 1761), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[s1, s2]'], {}), "('p', [s1, s2])\n", (1746, 1761), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2021, 2039), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (2033, 2039), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2049, 2067), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (2061, 2067), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2076, 2103), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (2088, 2103), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2150, 2188), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (2163, 2188), True, 'import pandas as pd\n'), ((2200, 2255), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (2212, 2255), True, 'import pandas as pd\n'), ((2590, 2608), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (2602, 2608), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2618, 2636), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (2630, 2636), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2645, 2672), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (2657, 2672), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((2718, 2756), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (2731, 2756), True, 'import pandas as pd\n'), ((2768, 2823), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (2780, 2823), True, 'import pandas as pd\n'), ((3174, 3192), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (3186, 3192), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((3202, 3220), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (3214, 3220), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((3229, 3256), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (3241, 3256), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((3303, 3341), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (3316, 3341), True, 'import pandas as pd\n'), ((3353, 3408), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (3365, 3408), True, 'import pandas as pd\n'), ((3812, 3830), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (3824, 3830), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((3840, 3858), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (3852, 3858), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((3867, 3894), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (3879, 3894), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((3906, 3944), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (3919, 3944), True, 'import pandas as pd\n'), ((3956, 4011), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (3968, 4011), True, 'import pandas as pd\n'), ((4315, 4333), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (4327, 4333), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((4343, 4361), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (4355, 4361), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((4370, 4397), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (4382, 4397), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((4409, 4447), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (4422, 4447), True, 'import pandas as pd\n'), ((4459, 4514), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (4471, 4514), True, 'import pandas as pd\n'), ((4930, 4948), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (4942, 4948), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((4960, 4998), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (4973, 4998), True, 'import pandas as pd\n'), ((5010, 5059), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1']", 'data': '(100)'}), "(index=dts, columns=['c1'], data=100)\n", (5022, 5059), True, 'import pandas as pd\n'), ((5810, 5828), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (5822, 5828), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((5838, 5856), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (5850, 5856), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((5865, 5892), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (5877, 5892), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((5939, 5977), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (5952, 5977), True, 'import pandas as pd\n'), ((5989, 6044), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (6001, 6044), True, 'import pandas as pd\n'), ((6683, 6701), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (6695, 6701), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((6711, 6729), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (6723, 6729), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((6738, 6765), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (6750, 6765), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((6811, 6849), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (6824, 6849), True, 'import pandas as pd\n'), ((6861, 6916), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (6873, 6916), True, 'import pandas as pd\n'), ((7535, 7553), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (7547, 7553), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((7564, 7581), 'copy.deepcopy', 'copy.deepcopy', (['c1'], {}), '(c1)\n', (7577, 7581), False, 'import copy\n'), ((7591, 7609), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (7603, 7609), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((7620, 7637), 'copy.deepcopy', 'copy.deepcopy', (['c2'], {}), '(c2)\n', (7633, 7637), False, 'import copy\n'), ((7647, 7675), 'bt.core.StrategyBase', 'StrategyBase', (['"""s1"""', '[c1, c2]'], {}), "('s1', [c1, c2])\n", (7659, 7675), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((7685, 7715), 'bt.core.StrategyBase', 'StrategyBase', (['"""s2"""', '[c12, c22]'], {}), "('s2', [c12, c22])\n", (7697, 7715), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((7724, 7751), 'bt.core.StrategyBase', 'StrategyBase', (['"""m"""', '[s1, s2]'], {}), "('m', [s1, s2])\n", (7736, 7751), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((7873, 7911), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (7886, 7911), True, 'import pandas as pd\n'), ((7923, 7978), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (7935, 7978), True, 'import pandas as pd\n'), ((9005, 9023), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (9017, 9023), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((9033, 9051), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (9045, 9051), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((9060, 9087), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (9072, 9087), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((9134, 9172), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (9147, 9172), True, 'import pandas as pd\n'), ((9184, 9239), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (9196, 9239), True, 'import pandas as pd\n'), ((10198, 10216), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (10210, 10216), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((10226, 10244), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (10238, 10244), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((10253, 10280), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (10265, 10280), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((10327, 10365), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (10340, 10365), True, 'import pandas as pd\n'), ((10377, 10432), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (10389, 10432), True, 'import pandas as pd\n'), ((11104, 11131), 'numpy.allclose', 'np.allclose', (['s.price', '(102.5)'], {}), '(s.price, 102.5)\n', (11115, 11131), True, 'import numpy as np\n'), ((11176, 11193), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (11188, 11193), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((11205, 11243), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (11218, 11243), True, 'import pandas as pd\n'), ((11255, 11310), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (11267, 11310), True, 'import pandas as pd\n'), ((11722, 11739), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (11734, 11739), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((11751, 11789), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (11764, 11789), True, 'import pandas as pd\n'), ((11801, 11856), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (11813, 11856), True, 'import pandas as pd\n'), ((12152, 12169), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (12164, 12169), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((12181, 12219), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (12194, 12219), True, 'import pandas as pd\n'), ((12231, 12286), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (12243, 12286), True, 'import pandas as pd\n'), ((12627, 12644), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (12639, 12644), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((12656, 12694), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (12669, 12694), True, 'import pandas as pd\n'), ((12706, 12761), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (12718, 12761), True, 'import pandas as pd\n'), ((13205, 13222), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (13217, 13222), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((13234, 13272), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(5)'}), "('2010-01-01', periods=5)\n", (13247, 13272), True, 'import pandas as pd\n'), ((13284, 13339), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (13296, 13339), True, 'import pandas as pd\n'), ((18324, 18342), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (18336, 18342), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((18352, 18370), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (18364, 18370), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((18379, 18406), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""', '[c1, c2]'], {}), "('s', [c1, c2])\n", (18391, 18406), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((18453, 18491), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(5)'}), "('2010-01-01', periods=5)\n", (18466, 18491), True, 'import pandas as pd\n'), ((18503, 18558), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (18515, 18558), True, 'import pandas as pd\n'), ((23419, 23436), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (23431, 23436), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((23486, 23524), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(5)'}), "('2010-01-01', periods=5)\n", (23499, 23524), True, 'import pandas as pd\n'), ((23536, 23591), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (23548, 23591), True, 'import pandas as pd\n'), ((27089, 27128), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(21)'}), "('2010-01-01', periods=21)\n", (27102, 27128), True, 'import pandas as pd\n'), ((27358, 27407), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'data': 'rawd', 'columns': "['a']"}), "(index=dts, data=rawd, columns=['a'])\n", (27370, 27407), True, 'import pandas as pd\n'), ((27417, 27434), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (27429, 27434), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((27899, 27924), 'nose.tools.assert_almost_equal', 'aae', (['s.price', '(99.92628)', '(5)'], {}), '(s.price, 99.92628, 5)\n', (27902, 27924), True, 'from nose.tools import assert_almost_equal as aae\n'), ((28167, 28192), 'nose.tools.assert_almost_equal', 'aae', (['s.price', '(101.3638)', '(4)'], {}), '(s.price, 101.3638, 4)\n', (28170, 28192), True, 'from nose.tools import assert_almost_equal as aae\n'), ((28219, 28244), 'nose.tools.assert_almost_equal', 'aae', (['s.price', '(104.3863)', '(4)'], {}), '(s.price, 104.3863, 4)\n', (28222, 28244), True, 'from nose.tools import assert_almost_equal as aae\n'), ((28271, 28296), 'nose.tools.assert_almost_equal', 'aae', (['s.price', '(102.5802)', '(4)'], {}), '(s.price, 102.5802, 4)\n', (28274, 28296), True, 'from nose.tools import assert_almost_equal as aae\n'), ((28431, 28461), 'nose.tools.assert_almost_equal', 'aae', (['s.prices[-1]', '(95.02396)', '(5)'], {}), '(s.prices[-1], 95.02396, 5)\n', (28434, 28461), True, 'from nose.tools import assert_almost_equal as aae\n'), ((28466, 28496), 'nose.tools.assert_almost_equal', 'aae', (['s.prices[-2]', '(98.67306)', '(5)'], {}), '(s.prices[-2], 98.67306, 5)\n', (28469, 28496), True, 'from nose.tools import assert_almost_equal as aae\n'), ((28547, 28564), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (28559, 28564), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((28575, 28613), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (28588, 28613), True, 'import pandas as pd\n'), ((28625, 28680), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (28637, 28680), True, 'import pandas as pd\n'), ((28899, 28917), 'bt.core.StrategyBase', 'StrategyBase', (['"""c1"""'], {}), "('c1')\n", (28911, 28917), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((28926, 28958), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {'children': '[c1]'}), "('s', children=[c1])\n", (28938, 28958), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((29223, 29261), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (29236, 29261), True, 'import pandas as pd\n'), ((29273, 29328), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (29285, 29328), True, 'import pandas as pd\n'), ((29467, 29485), 'bt.core.StrategyBase', 'StrategyBase', (['"""c1"""'], {}), "('c1')\n", (29479, 29485), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((29494, 29526), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {'children': '[c1]'}), "('s', children=[c1])\n", (29506, 29526), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((29900, 29918), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (29912, 29918), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((29928, 29946), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (29940, 29946), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((29955, 29982), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (29967, 29982), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((30067, 30105), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (30080, 30105), True, 'import pandas as pd\n'), ((30117, 30172), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (30129, 30172), True, 'import pandas as pd\n'), ((30723, 30741), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (30735, 30741), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((30751, 30769), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (30763, 30769), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((30778, 30805), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (30790, 30805), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((30887, 30925), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (30900, 30925), True, 'import pandas as pd\n'), ((30937, 30992), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (30949, 30992), True, 'import pandas as pd\n'), ((31144, 31166), 'nose.tools.assert_almost_equal', 'aae', (['c1.value', '(420.084)'], {}), '(c1.value, 420.084)\n', (31147, 31166), True, 'from nose.tools import assert_almost_equal as aae\n'), ((31171, 31193), 'nose.tools.assert_almost_equal', 'aae', (['c2.value', '(580.116)'], {}), '(c2.value, 580.116)\n', (31174, 31193), True, 'from nose.tools import assert_almost_equal as aae\n'), ((31198, 31230), 'nose.tools.assert_almost_equal', 'aae', (['(c1.value + c2.value)', '(1000.2)'], {}), '(c1.value + c2.value, 1000.2)\n', (31201, 31230), True, 'from nose.tools import assert_almost_equal as aae\n'), ((31281, 31298), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""'], {}), "('p')\n", (31293, 31298), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((31310, 31348), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (31323, 31348), True, 'import pandas as pd\n'), ((31360, 31415), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (31372, 31415), True, 'import pandas as pd\n'), ((31780, 31798), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (31792, 31798), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((31808, 31826), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (31820, 31826), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((31835, 31862), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (31847, 31862), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((31909, 31947), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (31922, 31947), True, 'import pandas as pd\n'), ((31959, 32014), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (31971, 32014), True, 'import pandas as pd\n'), ((32768, 32786), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (32780, 32786), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((32797, 32814), 'copy.deepcopy', 'copy.deepcopy', (['c1'], {}), '(c1)\n', (32810, 32814), False, 'import copy\n'), ((32824, 32842), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (32836, 32842), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((32853, 32870), 'copy.deepcopy', 'copy.deepcopy', (['c2'], {}), '(c2)\n', (32866, 32870), False, 'import copy\n'), ((32880, 32908), 'bt.core.StrategyBase', 'StrategyBase', (['"""s1"""', '[c1, c2]'], {}), "('s1', [c1, c2])\n", (32892, 32908), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((32918, 32948), 'bt.core.StrategyBase', 'StrategyBase', (['"""s2"""', '[c12, c22]'], {}), "('s2', [c12, c22])\n", (32930, 32948), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((32957, 32984), 'bt.core.StrategyBase', 'StrategyBase', (['"""m"""', '[s1, s2]'], {}), "('m', [s1, s2])\n", (32969, 32984), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((33107, 33145), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (33120, 33145), True, 'import pandas as pd\n'), ((33157, 33212), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (33169, 33212), True, 'import pandas as pd\n'), ((34303, 34325), 'nose.tools.assert_almost_equal', 'aae', (['m.capital', '(200)', '(1)'], {}), '(m.capital, 200, 1)\n', (34306, 34325), True, 'from nose.tools import assert_almost_equal as aae\n'), ((34820, 34838), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (34832, 34838), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((34848, 34866), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (34860, 34866), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((34875, 34902), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (34887, 34902), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((34987, 35025), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (35000, 35025), True, 'import pandas as pd\n'), ((35037, 35092), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (35049, 35092), True, 'import pandas as pd\n'), ((36638, 36671), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (36652, 36671), False, 'from unittest import mock\n'), ((36681, 36715), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (36695, 36715), False, 'from unittest import mock\n'), ((36725, 36758), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (36739, 36758), False, 'from unittest import mock\n'), ((36867, 36888), 'bt.core.AlgoStack', 'AlgoStack', (['a1', 'a2', 'a3'], {}), '(a1, a2, a3)\n', (36876, 36888), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((36902, 36918), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (36916, 36918), False, 'from unittest import mock\n'), ((37072, 37105), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (37086, 37105), False, 'from unittest import mock\n'), ((37115, 37149), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (37129, 37149), False, 'from unittest import mock\n'), ((37159, 37192), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (37173, 37192), False, 'from unittest import mock\n'), ((37281, 37302), 'bt.core.AlgoStack', 'AlgoStack', (['a1', 'a2', 'a3'], {}), '(a1, a2, a3)\n', (37290, 37302), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((37316, 37332), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (37330, 37332), False, 'from unittest import mock\n'), ((37463, 37480), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (37475, 37480), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((37492, 37530), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (37505, 37530), True, 'import pandas as pd\n'), ((37542, 37597), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (37554, 37597), True, 'import pandas as pd\n'), ((37905, 37923), 'bt.core.StrategyBase', 'StrategyBase', (['"""s1"""'], {}), "('s1')\n", (37917, 37923), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((37933, 37951), 'bt.core.StrategyBase', 'StrategyBase', (['"""s2"""'], {}), "('s2')\n", (37945, 37951), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((37960, 37987), 'bt.core.StrategyBase', 'StrategyBase', (['"""m"""', '[s1, s2]'], {}), "('m', [s1, s2])\n", (37972, 37987), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((38034, 38072), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (38047, 38072), True, 'import pandas as pd\n'), ((38084, 38139), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (38096, 38139), True, 'import pandas as pd\n'), ((39530, 39570), 'bt.core.Strategy', 'Strategy', (['"""c1"""', '[do_nothing]', "['b', 'c']"], {}), "('c1', [do_nothing], ['b', 'c'])\n", (39538, 39570), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((39584, 39626), 'bt.core.Strategy', 'Strategy', (['"""m"""', '[do_nothing]', "[child1, 'a']"], {}), "('m', [do_nothing], [child1, 'a'])\n", (39592, 39626), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((39665, 39703), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (39678, 39703), True, 'import pandas as pd\n'), ((40333, 40371), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (40346, 40371), True, 'import pandas as pd\n'), ((40383, 40433), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['a']", 'data': '(100.0)'}), "(index=dts, columns=['a'], data=100.0)\n", (40395, 40433), True, 'import pandas as pd\n'), ((40663, 40685), 'bt.core.Strategy', 'Strategy', (['"""m"""', '[]', '[s]'], {}), "('m', [], [s])\n", (40671, 40685), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((41120, 41163), 'numpy.allclose', 'np.allclose', (['s.price', '(100.0 * (102 / 101.0))'], {}), '(s.price, 100.0 * (102 / 101.0))\n', (41131, 41163), True, 'import numpy as np\n'), ((41193, 41211), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (41205, 41211), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((41221, 41239), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (41233, 41239), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((41248, 41275), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (41260, 41275), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((41322, 41360), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (41335, 41360), True, 'import pandas as pd\n'), ((41372, 41427), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (41384, 41427), True, 'import pandas as pd\n'), ((42202, 42219), 'bt.core.StrategyBase', 'StrategyBase', (['"""s"""'], {}), "('s')\n", (42214, 42219), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((42231, 42269), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (42244, 42269), True, 'import pandas as pd\n'), ((42560, 42578), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (42572, 42578), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((42588, 42606), 'bt.core.SecurityBase', 'SecurityBase', (['"""c2"""'], {}), "('c2')\n", (42600, 42606), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((42615, 42642), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1, c2]'], {}), "('p', [c1, c2])\n", (42627, 42642), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((42769, 42807), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (42782, 42807), True, 'import pandas as pd\n'), ((42819, 42874), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1', 'c2']", 'data': '(100)'}), "(index=dts, columns=['c1', 'c2'], data=100)\n", (42831, 42874), True, 'import pandas as pd\n'), ((44633, 44651), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (44645, 44651), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((44660, 44683), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1]'], {}), "('p', [c1])\n", (44672, 44683), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((44786, 44824), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (44799, 44824), True, 'import pandas as pd\n'), ((44860, 44910), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1']", 'data': '(0.01)'}), "(index=dts, columns=['c1'], data=0.01)\n", (44872, 44910), True, 'import pandas as pd\n'), ((45202, 45220), 'bt.core.SecurityBase', 'SecurityBase', (['"""c1"""'], {}), "('c1')\n", (45214, 45220), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((45229, 45252), 'bt.core.StrategyBase', 'StrategyBase', (['"""p"""', '[c1]'], {}), "('p', [c1])\n", (45241, 45252), False, 'from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy\n'), ((45282, 45320), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': '(3)'}), "('2010-01-01', periods=3)\n", (45295, 45320), True, 'import pandas as pd\n'), ((45332, 45383), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dts', 'columns': "['c1']", 'data': '(100.0)'}), "(index=dts, columns=['c1'], data=100.0)\n", (45344, 45383), True, 'import pandas as pd\n'), ((45855, 45896), 'numpy.isclose', 'np.isclose', (['full_outlay', 'amount'], {'rtol': '(0.0)'}), '(full_outlay, amount, rtol=0.0)\n', (45865, 45896), True, 'import numpy as np\n'), ((46081, 46145), 'numpy.isclose', 'np.isclose', (['full_outlay', '(original_capital - s._capital)'], {'rtol': '(0.0)'}), '(full_outlay, original_capital - s._capital, rtol=0.0)\n', (46091, 46145), True, 'import numpy as np\n'), ((46209, 46272), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', ([], {'start': '"""10/1/2017"""', 'end': '"""10/11/2017"""', 'freq': '"""B"""'}), "(start='10/1/2017', end='10/11/2017', freq='B')\n", (46225, 46272), True, 'import pandas as pd\n'), ((46602, 46642), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'date_span'}), '(data=data, index=date_span)\n', (46614, 46642), True, 'import pandas as pd\n'), ((46728, 46786), 'pandas.DataFrame', 'pd.DataFrame', (["(price['a'] >= price['b'] + 10)"], {'columns': "['a']"}), "(price['a'] >= price['b'] + 10, columns=['a'])\n", (46740, 46786), True, 'import pandas as pd\n'), ((46798, 46855), 'pandas.DataFrame', 'pd.DataFrame', (["(price['a'] < price['b'] + 10)"], {'columns': "['b']"}), "(price['a'] < price['b'] + 10, columns=['b'])\n", (46810, 46855), True, 'import pandas as pd\n'), ((47496, 47505), 'bt.run', 'bt.run', (['t'], {}), '(t)\n', (47502, 47505), False, 'import bt\n'), ((39743, 39781), 'pandas.Series', 'pd.Series', ([], {'data': '(1)', 'index': 'dts', 'name': '"""a"""'}), "(data=1, index=dts, name='a')\n", (39752, 39781), True, 'import pandas as pd\n'), ((39797, 39835), 'pandas.Series', 'pd.Series', ([], {'data': '(2)', 'index': 'dts', 'name': '"""b"""'}), "(data=2, index=dts, name='b')\n", (39806, 39835), True, 'import pandas as pd\n'), ((39851, 39889), 'pandas.Series', 'pd.Series', ([], {'data': '(3)', 'index': 'dts', 'name': '"""c"""'}), "(data=3, index=dts, name='c')\n", (39860, 39889), True, 'import pandas as pd\n'), ((40536, 40568), 'bt.algos.SelectWhere', 'bt.algos.SelectWhere', (['(data > 100)'], {}), '(data > 100)\n', (40556, 40568), False, 'import bt\n'), ((40588, 40611), 'bt.algos.WeighEqually', 'bt.algos.WeighEqually', ([], {}), '()\n', (40609, 40611), False, 'import bt\n'), ((40631, 40651), 'bt.algos.Rebalance', 'bt.algos.Rebalance', ([], {}), '()\n', (40649, 40651), False, 'import bt\n'), ((42294, 42315), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)'], {}), '(3, 2)\n', (42309, 42315), True, 'import numpy as np\n'), ((47110, 47134), 'bt.algos.WeighTarget', 'bt.algos.WeighTarget', (['tw'], {}), '(tw)\n', (47130, 47134), False, 'import bt\n'), ((47172, 47191), 'bt.algos.RunDaily', 'bt.algos.RunDaily', ([], {}), '()\n', (47189, 47191), False, 'import bt\n'), ((47229, 47249), 'bt.algos.Rebalance', 'bt.algos.Rebalance', ([], {}), '()\n', (47247, 47249), False, 'import bt\n')] |
import dask.array as da
import numpy as np
import uuid
def initialize_weights(xds, data_col, weight_col_name, sigma_col_name):
"""Given an input dataset, initializes the weights based on ms_opts.
Initialises the weights. Data column is required in order to stat up unity
weights.
Inputs:
xds: xarray.dataset on which the weight columns live.
data_col: Chunked dask.array containing the data.
weight_col_name: String containing name of input weight column.
Outputs:
weight_col: A chunked dask.array containing the weights.
"""
if not (weight_col_name or sigma_col_name):
# No weight or sigma column provided - assume unity weights.
n_row, n_chan, n_corr = data_col.shape
weight_col = da.ones((n_row, n_chan, n_corr),
chunks=data_col.chunks,
name="weights-" + uuid.uuid4().hex,
dtype=np.float32)
elif sigma_col_name:
weight_col = da.map_blocks(sigma_to_weight, xds[sigma_col_name].data)
else:
weight_col = xds[weight_col_name].data
# The following handles the fact that the chosen weight column might
# not have a frequency axis.
if weight_col.ndim == 2:
weight_col = da.broadcast_to(weight_col[:, None, :],
data_col.shape,
chunks=data_col.chunks)
return weight_col
def sigma_to_weight(sigma_col):
weight = np.zeros_like(sigma_col)
sel = sigma_col != 0
weight[sel] = 1/(sigma_col[sel])**2
return weight
| [
"dask.array.broadcast_to",
"dask.array.map_blocks",
"numpy.zeros_like",
"uuid.uuid4"
] | [((1514, 1538), 'numpy.zeros_like', 'np.zeros_like', (['sigma_col'], {}), '(sigma_col)\n', (1527, 1538), True, 'import numpy as np\n'), ((1289, 1368), 'dask.array.broadcast_to', 'da.broadcast_to', (['weight_col[:, None, :]', 'data_col.shape'], {'chunks': 'data_col.chunks'}), '(weight_col[:, None, :], data_col.shape, chunks=data_col.chunks)\n', (1304, 1368), True, 'import dask.array as da\n'), ((1017, 1073), 'dask.array.map_blocks', 'da.map_blocks', (['sigma_to_weight', 'xds[sigma_col_name].data'], {}), '(sigma_to_weight, xds[sigma_col_name].data)\n', (1030, 1073), True, 'import dask.array as da\n'), ((906, 918), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (916, 918), False, 'import uuid\n')] |
__author__ = "@tino-michael"
import logging
import numpy as np
from astropy import units as u
from numba import jit
from ctapipe.instrument import CameraGeometry
logger = logging.getLogger(__name__)
__all__ = [
"convert_geometry_hex1d_to_rect2d",
"convert_geometry_rect2d_back_to_hexe1d"
]
def unskew_hex_pixel_grid(pix_x, pix_y, cam_angle=0 * u.deg,
base_angle=60 * u.deg):
r"""transform the pixel coordinates of a hexagonal image into an
orthogonal image
Parameters
----------
pix_x, pix_y : 1D numpy arrays
the list of x and y coordinates of the hexagonal pixel grid
cam_angle : astropy.Quantity (default: 0 degrees)
The skewing is performed along the y-axis, therefore, one of the slanted
base-vectors needs to be parallel to the y-axis.
Some camera grids are rotated in a way that this is not the case.
This needs to be corrected.
base_angle : astropy.Quantity (default: 60 degrees)
the skewing angle of the hex-grid. should be 60° for regular hexagons
Returns
-------
pix_x, pix_y : 1D numpy arrays
the list of x and y coordinates of the slanted, orthogonal pixel grid
Notes
-----
The correction on the pixel position r can be described by a rotation R around
one angle and a sheer S along a certain axis:
.. math::
r' = S \cdot R \cdot r
.. math::
\begin{pmatrix}
x' \\
y'
\end{pmatrix}
=
\begin{pmatrix}
1 & 0 \\
-1/\tan & 1
\end{pmatrix}
\cdot
\begin{pmatrix}
\cos & -\sin \\
\sin & \cos
\end{pmatrix}
\cdot
\begin{pmatrix}
x \\
y
\end{pmatrix}
.. math::
\begin{pmatrix}
x' \\
y'
\end{pmatrix}
=
\begin{pmatrix}
\cos & -\sin \\
\sin-\cos/\tan & \sin/\tan+\cos
\end{pmatrix}
\cdot
\begin{pmatrix}
x \\
y
\end{pmatrix}
"""
tan_angle = np.tan(base_angle)
# If the camera-rotation angle is non-zero, create a rotation+sheering
# matrix for the pixel coordinates
if cam_angle != 0 * u.deg:
sin_angle = np.sin(cam_angle)
cos_angle = np.cos(cam_angle)
# the correction on the pixel position r can be described by a
# rotation R around one angle and a sheer S along a certain axis:
#
# r' = S * R * r
# (x') = ( 1 0) * (cos -sin) * (x) = ( cos -sin ) * (x)
# (y') (-1/tan 1) (sin cos) (y) (sin-cos/tan sin/tan+cos) * (y)
rot_mat = np.array(
[[cos_angle, -sin_angle],
[sin_angle - cos_angle / tan_angle,
sin_angle / tan_angle + cos_angle]])
else:
# if we don't rotate the camera, only perform the sheer
rot_mat = np.array([[1, 0], [-1 / tan_angle, 1]])
rotated = np.dot(rot_mat, [pix_x.value, pix_y.value])
rot_x = rotated[0] * pix_x.unit
rot_y = rotated[1] * pix_x.unit
return rot_x, rot_y
def reskew_hex_pixel_grid(pix_x, pix_y, cam_angle=0 * u.deg,
base_angle=60 * u.deg):
r"""skews the orthogonal coordinates back to the hexagonal ones
Parameters
----------
pix_x, pix_y : 1D numpy arrays
the list of x and y coordinates of the slanted, orthogonal pixel grid
cam_angle : astropy.Quantity (default: 0 degrees)
The skewing is performed along the y-axis, therefore, one of the slanted
base-vectors needs to be parallel to the y-axis.
Some camera grids are rotated in a way that this is not the case.
This needs to be corrected.
base_angle : astropy.Quantity (default: 60 degrees)
the skewing angle of the hex-grid. should be 60° for regular hexagons
Returns
-------
pix_x, pix_y : 1D numpy arrays
the list of x and y coordinates of the hexagonal pixel grid
Notes
-----
To revert the rotation, we need to find matrices S' and R' with
:math:`S' \cdot S = 1` and :math:`R' \cdot R = 1`,
so that :math:`r = R' \cdot S' \cdot S \cdot R \cdot r = R' \cdot S' \cdot r'`:
.. math::
\begin{pmatrix}
x \\
y
\end{pmatrix}
=
\begin{pmatrix}
\cos & \sin \\
-\sin & \cos
\end{pmatrix}
\cdot
\begin{pmatrix}
1 & 0 \\
1/\tan & 1
\end{pmatrix}
\cdot
\begin{pmatrix}
x' \\
y'
\end{pmatrix}
.. math::
\begin{pmatrix}
x \\
y
\end{pmatrix}
=
\begin{pmatrix}
\cos+\sin/\tan & \sin \\
\cos/\tan-\sin & \cos
\end{pmatrix}
\cdot
\begin{pmatrix}
x' \\
y'
\end{pmatrix}
"""
tan_angle = np.tan(base_angle)
# If the camera-rotation angle is non-zero, create a rotation+sheering
# matrix for the pixel coordinates
if cam_angle != 0 * u.deg:
sin_angle = np.sin(cam_angle)
cos_angle = np.cos(cam_angle)
# to revert the rotation, we need to find matrices S' and R'
# S' * S = 1 and R' * R = 1
# so that
# r = R' * S' * S * R * r = R' * S' * r'
#
# (x) = ( cos sin) * ( 1 0) * (x') = (cos+sin/tan sin) * (x')
# (y) (-sin cos) (1/tan 1) (y') (cos/tan-sin cos) (y')
rot_mat = np.array(
[[cos_angle + sin_angle / tan_angle, sin_angle],
[cos_angle / tan_angle - sin_angle, cos_angle]])
else:
# if we don't rotate the camera, only perform the sheer
rot_mat = np.array([[1, 0], [1 / tan_angle, 1]])
rotated = np.dot(rot_mat, [pix_x.value, pix_y.value])
rot_x = rotated[0] * pix_x.unit
rot_y = rotated[1] * pix_x.unit
return rot_x, rot_y
@jit
def reskew_hex_pixel_from_orthogonal_edges(x_edges, y_edges, square_mask):
"""extracts and skews the pixel coordinates from a 2D orthogonal
histogram (i.e. the bin-edges) and skews them into the hexagonal
image while selecting only the pixel that are selected by the
given mask
Parameters
----------
x_edges, y_edges : 1darrays
the bin edges of the 2D histogram
square_mask : 2darray
mask that selects the pixels actually belonging to the camera
Returns
-------
unrot_x, unrot_y : 1darrays
pixel coordinated reskewed into the hexagonal camera grid
"""
unrot_x, unrot_y = [], []
for i, x in enumerate((x_edges[:-1] + x_edges[1:]) / 2):
for j, y in enumerate((y_edges[:-1] + y_edges[1:]) / 2):
if square_mask[i][j]:
x_unrot, y_unrot = reskew_hex_pixel_grid(x, y)
unrot_x.append(x_unrot)
unrot_y.append(y_unrot)
return unrot_x, unrot_y
@jit
def get_orthogonal_grid_edges(pix_x, pix_y, scale_aspect=True):
"""calculate the bin edges of the slanted, orthogonal pixel grid to
resample the pixel signals with np.histogramdd right after.
Parameters
----------
pix_x, pix_y : 1D numpy arrays
the list of x and y coordinates of the slanted, orthogonal pixel grid
scale_aspect : boolean (default: True)
if True, rescales the x-coordinates to create square pixels
(instead of rectangular ones)
Returns
--------
x_edges, y_edges : 1D numpy arrays
the bin edges for the slanted, orthogonal pixel grid
x_scale : float
factor by which the x-coordinates have been scaled
"""
# finding the size of the square patches
d_x = 99 * u.meter # TODO: @jit may have troubles interpreting astropy.Quantities
d_y = 99 * u.meter
x_base = pix_x[0]
y_base = pix_y[0]
for x, y in zip(pix_x, pix_y):
if abs(y - y_base) < abs(x - x_base):
d_x = min(d_x, abs(x - x_base))
if abs(y - y_base) > abs(x - x_base):
d_y = min(d_y, abs(y - y_base))
# for x, y in zip(pix_x, pix_y):
# if abs(y - y_base) > abs(x - x_base):
# d_y = min(d_y, abs(y - y_base))
x_scale = 1
if scale_aspect:
x_scale = d_y / d_x
pix_x *= x_scale
d_x = d_y
# with the maximal extension of the axes and the size of the pixels,
# determine the number of bins in each direction
n_bins_x = (np.around(abs(max(pix_x) - min(pix_x)) / d_x) + 2).astype(int)
n_bins_y = (np.around(abs(max(pix_y) - min(pix_y)) / d_y) + 2).astype(int)
x_edges = np.linspace(min(pix_x).value, max(pix_x).value, n_bins_x)
y_edges = np.linspace(min(pix_y).value, max(pix_y).value, n_bins_y)
return x_edges, y_edges, x_scale
rot_buffer = {}
def convert_geometry_hex1d_to_rect2d(geom, signal, key=None, add_rot=0):
"""converts the geometry object of a camera with a hexagonal grid into
a square grid by slanting and stretching the 1D arrays of pixel x
and y positions and signal intensities are converted to 2D
arrays. If the signal array contains a time-dimension it is
conserved.
Parameters
----------
geom : CameraGeometry object
geometry object of hexagonal cameras
signal : ndarray
1D (no timing) or 2D (with timing) array of the pmt signals
key : (default: None)
arbitrary key (float, string) to store the transformed geometry in a buffer
The geometries (hex and rect) will be stored in a buffer.
The key is necessary to make the conversion back from rect to hex.
add_rot : int/float (default: 0)
parameter to apply an additional rotation of `add_rot` times 60°
Returns
-------
new_geom : CameraGeometry object
geometry object of the slanted picture now with a rectangular
grid and a 2D grid for the pixel positions. contains now a 2D
masking array signifying which of the pixels came from the
original geometry and which are simply fillers from the
rectangular grid
rot_img : ndarray 2D (no timing) or 3D (with timing)
the rectangular signal image
Examples
--------
camera = event.inst.subarray.tel[tel_id].camera
image = event.r0.tel[tel_id].image[0]
key = camera.cam_id
square_geom, square_image = convert_geometry_hex1d_to_rect2d(camera, image, key=key)
"""
if key in rot_buffer:
# if the conversion with this key was done before and stored,
# just read it in
(geom, new_geom, hex_to_rect_map) = rot_buffer[key]
else:
# otherwise, we have to do the conversion first now,
# skew all the coordinates of the original geometry
# extra_rot is the angle to get back to aligned hexagons with flat
# tops. Note that the pixel rotation angle brings the camera so that
# hexagons have a point at the top, so need to go 30deg back to
# make them flat
extra_rot = geom.pix_rotation - 30 * u.deg
# total rotation angle:
rot_angle = (add_rot * 60 * u.deg) - extra_rot
logger.debug("geom={}".format(geom))
logger.debug("rot={}, extra={}".format(rot_angle, extra_rot))
rot_x, rot_y = unskew_hex_pixel_grid(geom.pix_x, geom.pix_y,
cam_angle=rot_angle)
# with all the coordinate points, we can define the bin edges
# of a 2D histogram
x_edges, y_edges, x_scale = get_orthogonal_grid_edges(rot_x, rot_y)
# this histogram will introduce bins that do not correspond to
# any pixel from the original geometry. so we create a mask to
# remember the true camera pixels by simply throwing all pixel
# positions into numpy.histogramdd: proper pixels contain the
# value 1, false pixels the value 0.
square_mask = np.histogramdd([rot_y, rot_x],
bins=(y_edges, x_edges))[0].astype(bool)
# to be consistent with the pixel intensity, instead of saving
# only the rotated positions of the true pixels (rot_x and
# rot_y), create 2D arrays of all x and y positions (also the
# false ones).
grid_x, grid_y = np.meshgrid((x_edges[:-1] + x_edges[1:]) / 2.,
(y_edges[:-1] + y_edges[1:]) / 2.)
ids = []
# instead of blindly enumerating all pixels, let's instead
# store a list of all valid -- i.e. picked by the mask -- 2D
# indices
for i, row in enumerate(square_mask):
for j, val in enumerate(row):
if val is True:
ids.append((i, j))
# the area of the pixels (note that this is still a deformed
# image)
pix_area = (np.ones_like(grid_x)
* (x_edges[1] - x_edges[0])
* (y_edges[1] - y_edges[0]))
# creating a new geometry object with the attributes we just determined
new_geom = CameraGeometry(
cam_id=geom.cam_id + "_rect",
pix_id=ids, # this is a list of all the valid coordinate pairs now
pix_x=grid_x * u.meter,
pix_y=grid_y * u.meter,
pix_area=pix_area * u.meter ** 2,
neighbors=geom.neighbors,
pix_type='rectangular', apply_derotation=False)
# storing the pixel mask for later use
new_geom.mask = square_mask
# create a transfer map by enumerating all pixel positions in a 2D histogram
hex_to_rect_map = np.histogramdd([rot_y, rot_x],
bins=(y_edges, x_edges),
weights=np.arange(len(signal)))[
0].astype(int)
# bins that do not correspond to the original image get an entry of `-1`
hex_to_rect_map[~square_mask] = -1
if signal.ndim > 1:
long_map = []
for i in range(signal.shape[-1]):
tmp_map = hex_to_rect_map + i * (np.max(hex_to_rect_map) + 1)
tmp_map[~square_mask] = -1
long_map.append(tmp_map)
hex_to_rect_map = np.array(long_map)
if key is not None:
# if a key is given, store the essential objects in a buffer
rot_buffer[key] = (geom, new_geom, hex_to_rect_map)
# done `if key in rot_buffer`
# create the rotated rectangular image by applying `hex_to_rect_map` to the flat,
# extended input image
# `input_img_ext` is the flattened input image extended by one entry that contains NaN
# since `hex_to_rect_map` contains `-1` for "fake" pixels, it maps this extra NaN
# value at the last array position to any bin that does not correspond to a pixel of
# the original image
input_img_ext = np.full(np.prod(signal.shape) + 1, np.nan)
# the way the map is produced, it has the time dimension as axis=0;
# but `signal` has it as axis=-1, so we need to roll the axes back and forth a bit.
# if there is no time dimension, `signal` is a 1d array and `rollaxis` has no effect.
input_img_ext[:-1] = np.rollaxis(signal, axis=-1, start=0).ravel()
# now apply the transfer map
rot_img = input_img_ext[hex_to_rect_map]
# if there is a time dimension, roll the time axis back to the last position
try:
rot_img = np.rollaxis(rot_img, 0, 3)
except ValueError:
pass
return new_geom, rot_img
def convert_geometry_rect2d_back_to_hexe1d(geom, signal, key=None,
add_rot=None):
"""reverts the geometry distortion performed by convert_geometry_hexe1d_to_rect_2d
back to a hexagonal grid stored in 1D arrays
Parameters
----------
geom : CameraGeometry
geometry object where pixel positions are stored in a 2D
rectangular camera grid
signal : ndarray
pixel intensity stored in a 2D rectangular camera grid
key:
key to retrieve buffered geometry information
(see `convert_geometry_hex1d_to_rect2d`)
add_rot:
not used -- only here for backwards compatibility
Returns
-------
old_geom : CameraGeometry
the original geometry of the image
signal : ndarray
1D (no timing) or 2D (with timing) array of the pmt signals
Notes
-----
The back-conversion works with an internal buffer to store the transfer map (which
was produced in the first conversion). If `key` is not found in said buffer, this
function tries to perform a mock conversion. For this, it needs a `CameraGeometry`
instance of the original camera layout, which it tries to load by name (i.e.
the `cam_id`). The function assumes the original `cam_id` can be inferred from the
given, modified one by: `geom.cam_id.split('_')[0]`.
Examples
--------
camera = event.inst.subarray.tel[tel_id].camera
image = event.r0.tel[tel_id].image[0]
key = camera.cam_id
square_geom, square_image = convert_geometry_hex1d_to_rect2d(camera, image, key=key)
hex_geom, hex_image = convert_geometry_rect2d_back_to_hexe1d(square_geom,
square_image, key = key)
"""
if key not in rot_buffer:
# if the key is not in the buffer from the initial conversion (maybe
# because you did it in another process?), perform a mock conversion
# here ATTENTION assumes the original cam_id can be inferred from the
# given, modified one by by `geom.cam_id.split('_')[0]`
try:
orig_geom = CameraGeometry.from_name(geom.cam_id.split('_')[0])
except:
raise ValueError(
"could not deduce `CameraGeometry` from given `geom`...\n"
"please provide a `geom`, so that "
"`geom.cam_id.split('_')[0]` is a known `cam_id`")
orig_signal = np.zeros(len(orig_geom.pix_x))
convert_geometry_hex1d_to_rect2d(geom=orig_geom, signal=orig_signal,
key=key, add_rot=add_rot)
(old_geom, new_geom, hex_square_map) = rot_buffer[key]
# the output image has as many entries as there are non-negative values in the
# transfer map (this accounts for time as well)
unrot_img = np.zeros(np.count_nonzero(hex_square_map >= 0))
# rearrange input `signal` according to the mask and map
# (the dots in the brackets expand the mask to account for a possible time dimension)
# `atleast_3d` ensures that there is a third axis that we can roll to the front
# even if there is no time; if we'd use `axis=-1` instead, in cas of no time
# dimensions, we would rotate the x and y axes, resulting in a mirrored image
# `squeeze` reduces the added axis again in the no-time-slices cases
unrot_img[hex_square_map[..., new_geom.mask]] = \
np.squeeze(np.rollaxis(np.atleast_3d(signal), 2, 0))[..., new_geom.mask]
# if `signal` has a third dimension, that is the time
# and we need to roll some axes again...
if signal.ndim == 3:
# unrot_img[hex_square_map[..., new_geom.mask]] = \
# np.rollaxis(signal, -1, 0)[..., new_geom.mask]
# reshape the image so that the time is the first axis
# and then roll the time to the back
unrot_img = unrot_img.reshape((signal.shape[2],
np.count_nonzero(new_geom.mask)))
unrot_img = np.rollaxis(unrot_img, -1, 0)
# else:
# unrot_img[hex_square_map[new_geom.mask]] = \
# signal[new_geom.mask]
return old_geom, unrot_img
| [
"logging.getLogger",
"numpy.prod",
"numpy.ones_like",
"ctapipe.instrument.CameraGeometry",
"numpy.tan",
"numpy.histogramdd",
"numpy.rollaxis",
"numpy.max",
"numpy.count_nonzero",
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"numpy.atleast_3d"
] | [((175, 202), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (192, 202), False, 'import logging\n'), ((2183, 2201), 'numpy.tan', 'np.tan', (['base_angle'], {}), '(base_angle)\n', (2189, 2201), True, 'import numpy as np\n'), ((3087, 3130), 'numpy.dot', 'np.dot', (['rot_mat', '[pix_x.value, pix_y.value]'], {}), '(rot_mat, [pix_x.value, pix_y.value])\n', (3093, 3130), True, 'import numpy as np\n'), ((5096, 5114), 'numpy.tan', 'np.tan', (['base_angle'], {}), '(base_angle)\n', (5102, 5114), True, 'import numpy as np\n'), ((5970, 6013), 'numpy.dot', 'np.dot', (['rot_mat', '[pix_x.value, pix_y.value]'], {}), '(rot_mat, [pix_x.value, pix_y.value])\n', (5976, 6013), True, 'import numpy as np\n'), ((2368, 2385), 'numpy.sin', 'np.sin', (['cam_angle'], {}), '(cam_angle)\n', (2374, 2385), True, 'import numpy as np\n'), ((2406, 2423), 'numpy.cos', 'np.cos', (['cam_angle'], {}), '(cam_angle)\n', (2412, 2423), True, 'import numpy as np\n'), ((2791, 2903), 'numpy.array', 'np.array', (['[[cos_angle, -sin_angle], [sin_angle - cos_angle / tan_angle, sin_angle /\n tan_angle + cos_angle]]'], {}), '([[cos_angle, -sin_angle], [sin_angle - cos_angle / tan_angle, \n sin_angle / tan_angle + cos_angle]])\n', (2799, 2903), True, 'import numpy as np\n'), ((3032, 3071), 'numpy.array', 'np.array', (['[[1, 0], [-1 / tan_angle, 1]]'], {}), '([[1, 0], [-1 / tan_angle, 1]])\n', (3040, 3071), True, 'import numpy as np\n'), ((5281, 5298), 'numpy.sin', 'np.sin', (['cam_angle'], {}), '(cam_angle)\n', (5287, 5298), True, 'import numpy as np\n'), ((5319, 5336), 'numpy.cos', 'np.cos', (['cam_angle'], {}), '(cam_angle)\n', (5325, 5336), True, 'import numpy as np\n'), ((5690, 5800), 'numpy.array', 'np.array', (['[[cos_angle + sin_angle / tan_angle, sin_angle], [cos_angle / tan_angle -\n sin_angle, cos_angle]]'], {}), '([[cos_angle + sin_angle / tan_angle, sin_angle], [cos_angle /\n tan_angle - sin_angle, cos_angle]])\n', (5698, 5800), True, 'import numpy as np\n'), ((5916, 5954), 'numpy.array', 'np.array', (['[[1, 0], [1 / tan_angle, 1]]'], {}), '([[1, 0], [1 / tan_angle, 1]])\n', (5924, 5954), True, 'import numpy as np\n'), ((12421, 12509), 'numpy.meshgrid', 'np.meshgrid', (['((x_edges[:-1] + x_edges[1:]) / 2.0)', '((y_edges[:-1] + y_edges[1:]) / 2.0)'], {}), '((x_edges[:-1] + x_edges[1:]) / 2.0, (y_edges[:-1] + y_edges[1:]\n ) / 2.0)\n', (12432, 12509), True, 'import numpy as np\n'), ((13196, 13417), 'ctapipe.instrument.CameraGeometry', 'CameraGeometry', ([], {'cam_id': "(geom.cam_id + '_rect')", 'pix_id': 'ids', 'pix_x': '(grid_x * u.meter)', 'pix_y': '(grid_y * u.meter)', 'pix_area': '(pix_area * u.meter ** 2)', 'neighbors': 'geom.neighbors', 'pix_type': '"""rectangular"""', 'apply_derotation': '(False)'}), "(cam_id=geom.cam_id + '_rect', pix_id=ids, pix_x=grid_x * u.\n meter, pix_y=grid_y * u.meter, pix_area=pix_area * u.meter ** 2,\n neighbors=geom.neighbors, pix_type='rectangular', apply_derotation=False)\n", (13210, 13417), False, 'from ctapipe.instrument import CameraGeometry\n'), ((15559, 15585), 'numpy.rollaxis', 'np.rollaxis', (['rot_img', '(0)', '(3)'], {}), '(rot_img, 0, 3)\n', (15570, 15585), True, 'import numpy as np\n'), ((18454, 18491), 'numpy.count_nonzero', 'np.count_nonzero', (['(hex_square_map >= 0)'], {}), '(hex_square_map >= 0)\n', (18470, 18491), True, 'import numpy as np\n'), ((19604, 19633), 'numpy.rollaxis', 'np.rollaxis', (['unrot_img', '(-1)', '(0)'], {}), '(unrot_img, -1, 0)\n', (19615, 19633), True, 'import numpy as np\n'), ((14361, 14379), 'numpy.array', 'np.array', (['long_map'], {}), '(long_map)\n', (14369, 14379), True, 'import numpy as np\n'), ((15014, 15035), 'numpy.prod', 'np.prod', (['signal.shape'], {}), '(signal.shape)\n', (15021, 15035), True, 'import numpy as np\n'), ((15325, 15362), 'numpy.rollaxis', 'np.rollaxis', (['signal'], {'axis': '(-1)', 'start': '(0)'}), '(signal, axis=-1, start=0)\n', (15336, 15362), True, 'import numpy as np\n'), ((12978, 12998), 'numpy.ones_like', 'np.ones_like', (['grid_x'], {}), '(grid_x)\n', (12990, 12998), True, 'import numpy as np\n'), ((19050, 19071), 'numpy.atleast_3d', 'np.atleast_3d', (['signal'], {}), '(signal)\n', (19063, 19071), True, 'import numpy as np\n'), ((19550, 19581), 'numpy.count_nonzero', 'np.count_nonzero', (['new_geom.mask'], {}), '(new_geom.mask)\n', (19566, 19581), True, 'import numpy as np\n'), ((12055, 12110), 'numpy.histogramdd', 'np.histogramdd', (['[rot_y, rot_x]'], {'bins': '(y_edges, x_edges)'}), '([rot_y, rot_x], bins=(y_edges, x_edges))\n', (12069, 12110), True, 'import numpy as np\n'), ((14218, 14241), 'numpy.max', 'np.max', (['hex_to_rect_map'], {}), '(hex_to_rect_map)\n', (14224, 14241), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# Design and Programming by Lead TA: <NAME> @ Data Analytics Lab - UWaterloo.ca
# COURSE: CS 486/686 - Artificial Intelligence - University of Waterloo - Spring 2020 - Alice Gao
# Please let me know if you find any bugs in the code: <EMAIL>
# The code will be available at https://github.com/mojivalipour/nnscratch
# Version: 0.9.0
# Implement a neural network from scratch
''' Sources:
- http://neuralnetworksanddeeplearning.com/chap2.html
'''
print('Life is easy, you just need to do your best to find your place!')
# Libraries
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn import datasets
from sklearn.manifold import TSNE # visualization for data with more than two features
from os import path
import pandas as pd
import csv
import copy
import random
# Helper functions
def fixSeed(seed=1010):
np.random.seed(seed)
random.seed(seed)
# The hyper-parameters for the neural network
nSamples = None # use None if you want to use full sample size
# frogsSmall is the same dataset in Q1 that you have to use for comparision
dataset = '2moons' # 2moons/frogsSmall/frogs
noise = 0.05 # Noise in artificial datasets
visNumSamples = 500 # number of samples to visualize
# for regression, we use mean squared error.
# for classification, we use cross entropy.
# for now only mse is supported!
lossFunction = 'mse'
gdMethod = 'batch' # batch gradient descent method
batchSize = 64 # only for minibatch gradient descent
numEpochs = 200 # number of epochs
learningRate = [0.5,0.05,0.005] # learning rates
# for now only relu and sigmoid is supported
lastActivationFunc = 'sigmoid' # relu/sigmoid/softmax
# last layer activation function, this one is important
# because we need to use it for classification later
crossValidationFlag = True # if you like to run cross validation, set this flag to True
kFold = 3 # k-fold cross validation, at least need to be 2
seed = 6565 # Do not change the seed for Assignment
fixSeed(seed=seed) # fix the seed of random generator to make sure comparision is possible
# Some Useful Notes for those students who are interested to know more:
'''
- Neural networks are prone to overfitting. Increasing the number of parameters
could lead to models that have complexity bigger than data.
- Regularization, Normalization and Dropout are popular solutions to overfitting!
- In a neural network, we usually use the softmax function as last layer
activation for multi-class classification and sigmoid for single class
classification.
- For regression problems, we usually use Relu as last layer activation function
and MSE as the loss function that we want to minimize.
- Cross-entropy is the most useful loss function for multi-class classification.
- Sometimes we need to use multiple neurons in the output layer, which means
that we consider a neuron for each class. In this case, we need to use
one-hot vectors to encode the labels.
- Weight initialization is important! Gradient descent is not robust to
weight initialization! Xavier initialization is the most popular method
to initialize weights in neural networks.
'''
# Load data
colorBox = ['#377eb8','#FA0000','#344AA7', '#1EFA39','#00FBFF','#C500FF','#000000','#FFB600']
if dataset == '2moons':
nSamples = 1000 if nSamples is None else nSamples
X,y = datasets.make_moons(n_samples=nSamples, noise=noise, random_state=seed)
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], 2
# shuffle X,y
idxList = list(range(nSamples))
random.shuffle(idxList) # inplace
X, y = X[idxList,:], y[idxList]
elif dataset == 'frogsSmall' or dataset == 'frogs':
if dataset == 'frogs':
# original dataset
name = 'Frogs_MFCCs.csv'
else:
# a small subset of frogs original dataset, same as A2Q1
name = 'frogs-small.csv'
# check if we already have the file in the directory
if not path.isfile(name):
# otherwise ask user to upload it
print("Please put this {} file in the current directory using choose files ...".format(name))
# just load the csv file
X = pd.read_csv(name, sep=',')
X["Family"] = X["Family"].astype('category')
X["FamilyCat"] = X["Family"].cat.codes # added to the last column
X, y = X.iloc[:,0:22].to_numpy(), X.iloc[:,-1].to_numpy()
nSamples = X.shape[0] if nSamples is None else nSamples
X, y = X[:nSamples,:], y[:nSamples] # filter number of samples
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], len(np.unique(y))
print('#INFO: N (Number of Samples): {}, D (Number of Features): {}, C (Number of Classes): {}'.format(numSamples, numFeatures, numClasses))
plt.figure()
# if y min is not zero, make it zero
y = y - y.min()
assert y.min() == 0
# sample required sample for visualization
indices = list(range(numSamples))
selectedIndices = np.random.choice(indices, visNumSamples)
colors = [colorBox[y[idx]] for idx in selectedIndices]
if numFeatures == 2:
XR = X[selectedIndices, :]
else:
# use tsne to reduce dimensionality for visualization
XR = TSNE(n_components=2).fit_transform(X[selectedIndices,:])
plt.scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
plt.savefig('dataset.png')
if len(y.shape) < 2:
y = np.expand_dims(y,-1) # shape of y should be N x 1
# Define the network structure
# # 2-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 30], True, 'relu'], # w1
# 'Fully Connected': [[30, 1], True, lastActivationFunc] # w2
# }
# overfit network example
config = {
# Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
'Hidden Layer 0': [[numFeatures, 1000], True, 'sigmoid'], # w1
'Fully Connected': [[1000, 1], True, lastActivationFunc] # w2
}
# 3-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 3], True, 'sigmoid'], # w1
# 'Hidden Layer 1': [[3, 5], True, 'sigmoid'], # w2
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w2
# }
# 4-layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 100], True, 'relu'], # w1
# 'Hidden Layer 1': [[100, 50], True, 'relu'], # w2
# 'Hidden Layer 2': [[50, 5], True, 'relu'], # w3
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w4
# }
# Fully Connected Neural Network Class
class neuralNetwork():
# initializing network
def __init__(self, config=None, numClass=2, learningRate=0.005,
numEpochs=10, batchSize= 64, lossFunction='mse'):
self.config = config
self.configKeyList = list(self.config.keys())
self.lossFunction = lossFunction
self.numLayers = len(self.config)
self.layers = {}
self.layerShapes = {}
self.learningRate = learningRate
self.numEpochs = numEpochs
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
self.batchSize = batchSize
self.numClass = numClass
self.initWeights()
# random init
def initWeights(self):
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
if self.config != None:
for key in config:
# w is parameters, b is bias, a is activation function
self.layers[key] = {'W':np.random.randn(self.config[key][0][0],
self.config[key][0][1])/np.sqrt(self.config[key][0][1]),
'b':np.random.randn(self.config[key][0][1],
) if self.config[key][1]==True else [], 'a':self.config[key][2]}
# keep track of shape only for better understanding
self.layerShapes[key] = {'IS':self.config[key][0][0],'OS':self.config[key][0][1],
'NP':np.prod(self.layers[key]['W'].shape)+len(self.layers[key]['b'])}
else:
raise '#Err: Make sure you set a configuration correctly!'
# activation functions
def relu(self, X):
return np.maximum(0, X)
def sigmoid(self, X):
#TODO: fix the overflow problem in Numpy exp function
return 1./(1. + np.exp(-X))
def activationFunc(self, X, type='sigmoid'):
if type == 'sigmoid':
return self.sigmoid(X)
elif type == 'relu':
return self.relu(X)
elif type == 'None':
return X # do nothing
else:
raise '#Err: Not implemented activation function!'
# objective/loss/cost functions
def mse(self, y, yPred): # mean square error
return np.mean(np.power(y-yPred,2))
def lossFunc(self, y, yPred, type='mse'):
if type == 'mse':
return self.mse(y, yPred)
else:
raise '#Err: Not implemented objective function!'
# back-propagation learning
# forward pass
def forward(self, X):
# apply a(W.T x X + b) for each layer
for key in config:
#print(X.shape, self.layers[key]['W'].shape)
# save input of each layer for backward pass
self.layers[key]['i'] = X
z = np.dot(X, self.layers[key]['W'])
z = z + self.layers[key]['b'] if len(self.layers[key]['b'])!=0 else z
# save middle calculation for backward pass
self.layers[key]['z'] = z
X = self.activationFunc(z, type=self.layers[key]['a'])
# save middle calculation for backward pass
self.layers[key]['o'] = X
return X # yPred
# backward pass
def backward(self, y, yPred):
# derivative of sigmoid
def sigmoidPrime(x):
return self.sigmoid(x) * (1-self.sigmoid(x))
# derivative of relu
def reluPrime(x):
return np.where(x <= 0, 0, 1)
def identity(x):
return x
#TODO: It's not necessary to use double for,
# it is possible to implement faster and more efficient version
# for each parameter (weights and bias) in each layer
for idx, key in enumerate(config):
# calculate derivatives
if self.layers[key]['a'] == 'sigmoid':
fPrime = sigmoidPrime
elif self.layers[key]['a'] == 'relu':
fPrime = reluPrime
elif self.layers[key]['a'] == 'softmax':
fPrime = softmaxPrime
else: # None
fPrime = identity
deWRTdyPred = -(y-yPred) if self.lossFunction == 'mse' else 1 # de/dyPred
# print('de/dy')
# dyPred/dyPredBeforeActivation # in case of sigmoid g(x) x (1-g(x))
dyPredWRTdyPredPre = fPrime(self.layers[self.configKeyList[-1]]['o'])
# print('dy/dz')
# element wise multiplication/ hadamard product
delta = np.multiply(deWRTdyPred, dyPredWRTdyPredPre)
for idxW in range(len(config),idx,-1): # reverse
if idxW-1 == idx:
# calculating the derivative for the last one is different
# because it is respected to that specific weight
#print('\nWeights of layer',idx)
deltaB = delta
dxWRTdW = self.layers[key]['i'].T # dxWRTdW
delta = np.dot(dxWRTdW,delta)
#print('dz/dw')
else:
# this loop is depended to the number of layers in the configuration
# print('\nWeights of layer',idxW-1)
# the weights of current layer
# how fast the cost is changing as a function of the output activation
dxWRTdh = self.layers[self.configKeyList[idxW-1]]['W'].T # dxPreWRTdx-1
# print('dz/da')
# print('output of layer',idxW-1-1)
# the output of previous layer
# how fast the activation function is changing
dhWRTdhPre = fPrime(self.layers[self.configKeyList[idxW-1-1]]['o']) # dx-1WRTdx-1Pre
# print('da/dz')
delta = np.dot(delta, dxWRTdh) * dhWRTdhPre
# sanity check: Numerical Gradient Checking
# f'(x) = lim (f(x+deltax)-f(x))/deltax when deltax -> 0
# update parameters
# W = W - Gamma * dL/dW
self.layers[key]['djWRTdw'] = delta
self.layers[key]['W'] = self.layers[key]['W'] - self.learningRate/y.shape[0] * delta
# b = b - Gamma * dL/db
self.layers[key]['djWRTdb'] = deltaB
if len(self.layers[key]['b'])!=0:
self.layers[key]['b'] = self.layers[key]['b'] - self.learningRate/y.shape[0] * np.sum(deltaB, axis=0)
# Utility Functions
def summary(self, space=20):
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format("Layer Name", space,
"Input Shape", space,
"Output Shape", space,
"Number of Parameters",space))
for key in config:
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format(key, space,
self.layerShapes[key]['IS'], space,
self.layerShapes[key]['OS'], space,
self.layerShapes[key]['NP'], space))
def fit(self, X, y, XT=None, yT=None, method='batch', batchSize=None, numEpochs=None,
learningRate=None, initialState=None):
if numEpochs is None: # overwrite
numEpochs = self.numEpochs
if learningRate is not None:
self.learningRate = learningRate
if batchSize is not None:
self.batchSize = batchSize
# if initialState is not None:
# # use the given initial parameters (weights and bias)
# self.layers = initialState
if method == 'batch':
# this is infact mini-batch gradient descent, just for consistency in course material
# same as batched gradient descent in class to make it easier for you
pBar = tqdm(range(numEpochs))
for edx in pBar:
for idx in range(0, X.shape[0], self.batchSize):
start = idx
end = start + self.batchSize
end = end if end < X.shape[0] else X.shape[0]
#TODO: Support variable batchsize
if end-start != self.batchSize:
continue
x_, y_ = X[start:end, :], y[start:end, :]
yPred = self.forward(x_)
loss = self.lossFunc(y_, yPred, type=self.lossFunction)
self.backward(y_, yPred)
yPred,yPredOrig = self.predict(X)
loss = self.lossFunc(y, yPredOrig, type=self.lossFunction)
self.loss.append(loss)
acc = self.accuracy(y, yPred)
self.acc.append(acc)
if XT is not None:
yPred, yPredOrig = self.predict(XT)
loss = self.lossFunc(yT, yPredOrig, type=self.lossFunction)
self.lossT.append(loss)
acc = self.accuracy(yT, yPred)
self.accT.append(acc)
else:
raise '#Err: {} Gradient Descent Method is Not implemented!'.format(method)
def predict(self, X):
yPred = self.forward(X)
yPredOrigin = copy.deepcopy(yPred)
# last layer activation function, class prediction should be single
# and the output is between zero and one
if self.config[self.configKeyList[-1]][-1] == 'sigmoid':
yPred[yPred < 0.5] = 0
yPred[yPred >= 0.5] = 1
# multi-class problem
elif self.config[self.configKeyList[-1]][-1] == 'softmax':
raise '#Err: Prediction is not supported for softmax yet!'
# single/multi class problem, single node and it can be anything greater than 0
elif self.config[self.configKeyList[-1]][-1] == 'relu':
yPred = np.round(yPred)
yPred = np.clip(yPred, 0, self.numClass-1) # sanity check
return yPred, yPredOrigin
def error(self, y, yPred):
return self.lossFunc(y, yPred, type=self.lossFunction)
def accuracy(self, y, yPred):
return 100*np.sum(y==yPred)/y.shape[0]
def plotLoss(self, loss=None, ax=None):
if loss is None:
loss = self.loss
if ax is None:
plt.plot(loss)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Loss Per Epoch")
plt.show()
else:
ax.plot(loss)
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("Loss Per Epoch")
def crossValidationIndices(self, index, k=5):
# index is a list of indexes
cvList = []
for idx in range(k): # iterate over k-folds
interval = int(len(index)/k)
start = idx * interval
end = start + interval
testIndexes = list(range(start,end))
trainIndexes = list(range(0,start)) + list(range(end,len(index)))
cvList.append((trainIndexes, testIndexes))
return cvList
if crossValidationFlag:
if len(learningRate) == 1:
fig, ax = plt.subplots(3,len(learningRate),figsize=(8,15))
else:
fig, ax = plt.subplots(3,len(learningRate),figsize=(30,3*(len(learningRate)+2)))
else:
fig, ax = plt.subplots(1,1+len(learningRate),figsize=(30,1+len(learningRate)))
for ldx, lr in enumerate(learningRate):
nn = neuralNetwork(config=config, numClass=numClasses, numEpochs=numEpochs,
learningRate=lr, lossFunction=lossFunction)
# Initialize the network and the weights
nn.initWeights()
if crossValidationFlag:
indexes = list(range(X.shape[0]))
cvIndices = nn.crossValidationIndices(indexes, k=kFold)
accList = []
accTList = []
lossList = []
lossTList = []
for k in range(kFold):
nn.initWeights()
XTrain, yTrain = X[cvIndices[k][0],:], y[cvIndices[k][0],:]
XTest, yTest = X[cvIndices[k][1],:], y[cvIndices[k][1],:]
# Train the network
nn.fit(XTrain, yTrain, XTest, yTest, method=gdMethod, batchSize=batchSize,
numEpochs=numEpochs, learningRate=lr)
accList.append(nn.acc)
accTList.append(nn.accT)
lossList.append(nn.loss)
lossTList.append(nn.lossT)
acc = np.mean(accList, axis=0)
accT = np.mean(accTList, axis=0)
loss = np.mean(lossList, axis=0)
lossT = np.mean(lossTList, axis=0)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
if len(learningRate) == 1:
ax[2].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2].set_xlabel("X1")
ax[2].set_ylabel("X2")
ax[2].set_title("Data, LR: {}".format(lr))
ax[0].plot(acc)
ax[0].plot(accT)
ax[0].legend(['Train','Test'])
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[0].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1].plot(loss)
ax[1].plot(lossT)
ax[1].legend(['Train','Test'])
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
ax[1].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
ax[2,ldx].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2,ldx].set_xlabel("X1")
ax[2,ldx].set_ylabel("X2")
ax[2,ldx].set_title("Data, LR: {}".format(lr))
ax[0,ldx].plot(acc)
ax[0,ldx].plot(accT)
ax[0,ldx].legend(['Train','Test'])
ax[0,ldx].set_xlabel("Epochs")
ax[0,ldx].set_ylabel("Accuracy")
ax[0,ldx].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1,ldx].plot(loss)
ax[1,ldx].plot(lossT)
ax[1,ldx].legend(['Train','Test'])
ax[1,ldx].set_xlabel("Epochs")
ax[1,ldx].set_ylabel("Loss")
ax[1,ldx].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
# Perform a single run for visualization.
nn.fit(X, y, method=gdMethod, batchSize=batchSize, numEpochs=numEpochs,
learningRate=lr)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
ax[ldx+1].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[ldx+1].set_xlabel("X1")
ax[ldx+1].set_ylabel("X2")
ax[ldx+1].set_title("LR: {}".format(lr))
# Plot the mean squared error with respect to the nu
nn.plotLoss(ax=ax[0])
# train accuracy
acc = nn.accuracy(y.squeeze(-1),yPred.squeeze(-1))
print('#INFO: Train Accuracy is {}'.format(acc))
if not crossValidationFlag:
ax[0].legend(["LR: "+str(lr) for lr in learningRate])
# please feel free to save subplots for a better report
fig.savefig('results.png')
| [
"numpy.clip",
"numpy.prod",
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"copy.deepcopy",
"numpy.mean",
"numpy.multiply",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.manifold.TSNE",
"numpy.exp",
"numpy.dot",
"numpy.random.seed",
"matplo... | [((4674, 4686), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4684, 4686), True, 'import matplotlib.pyplot as plt\n'), ((4857, 4897), 'numpy.random.choice', 'np.random.choice', (['indices', 'visNumSamples'], {}), '(indices, visNumSamples)\n', (4873, 4897), True, 'import numpy as np\n'), ((5130, 5181), 'matplotlib.pyplot.scatter', 'plt.scatter', (['XR[:, 0]', 'XR[:, 1]'], {'s': '(10)', 'color': 'colors'}), '(XR[:, 0], XR[:, 1], s=10, color=colors)\n', (5141, 5181), True, 'import matplotlib.pyplot as plt\n'), ((5182, 5208), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dataset.png"""'], {}), "('dataset.png')\n", (5193, 5208), True, 'import matplotlib.pyplot as plt\n'), ((886, 906), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (900, 906), True, 'import numpy as np\n'), ((911, 928), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (922, 928), False, 'import random\n'), ((3372, 3443), 'sklearn.datasets.make_moons', 'datasets.make_moons', ([], {'n_samples': 'nSamples', 'noise': 'noise', 'random_state': 'seed'}), '(n_samples=nSamples, noise=noise, random_state=seed)\n', (3391, 3443), False, 'from sklearn import datasets\n'), ((3564, 3587), 'random.shuffle', 'random.shuffle', (['idxList'], {}), '(idxList)\n', (3578, 3587), False, 'import random\n'), ((5236, 5257), 'numpy.expand_dims', 'np.expand_dims', (['y', '(-1)'], {}), '(y, -1)\n', (5250, 5257), True, 'import numpy as np\n'), ((4116, 4142), 'pandas.read_csv', 'pd.read_csv', (['name'], {'sep': '""","""'}), "(name, sep=',')\n", (4127, 4142), True, 'import pandas as pd\n'), ((8279, 8295), 'numpy.maximum', 'np.maximum', (['(0)', 'X'], {}), '(0, X)\n', (8289, 8295), True, 'import numpy as np\n'), ((15653, 15673), 'copy.deepcopy', 'copy.deepcopy', (['yPred'], {}), '(yPred)\n', (15666, 15673), False, 'import copy\n'), ((18633, 18657), 'numpy.mean', 'np.mean', (['accList'], {'axis': '(0)'}), '(accList, axis=0)\n', (18640, 18657), True, 'import numpy as np\n'), ((18669, 18694), 'numpy.mean', 'np.mean', (['accTList'], {'axis': '(0)'}), '(accTList, axis=0)\n', (18676, 18694), True, 'import numpy as np\n'), ((18706, 18731), 'numpy.mean', 'np.mean', (['lossList'], {'axis': '(0)'}), '(lossList, axis=0)\n', (18713, 18731), True, 'import numpy as np\n'), ((18744, 18770), 'numpy.mean', 'np.mean', (['lossTList'], {'axis': '(0)'}), '(lossTList, axis=0)\n', (18751, 18770), True, 'import numpy as np\n'), ((3927, 3944), 'os.path.isfile', 'path.isfile', (['name'], {}), '(name)\n', (3938, 3944), False, 'from os import path\n'), ((5073, 5093), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5077, 5093), False, 'from sklearn.manifold import TSNE\n'), ((8864, 8886), 'numpy.power', 'np.power', (['(y - yPred)', '(2)'], {}), '(y - yPred, 2)\n', (8872, 8886), True, 'import numpy as np\n'), ((9410, 9442), 'numpy.dot', 'np.dot', (['X', "self.layers[key]['W']"], {}), "(X, self.layers[key]['W'])\n", (9416, 9442), True, 'import numpy as np\n'), ((10061, 10083), 'numpy.where', 'np.where', (['(x <= 0)', '(0)', '(1)'], {}), '(x <= 0, 0, 1)\n', (10069, 10083), True, 'import numpy as np\n'), ((11104, 11148), 'numpy.multiply', 'np.multiply', (['deWRTdyPred', 'dyPredWRTdyPredPre'], {}), '(deWRTdyPred, dyPredWRTdyPredPre)\n', (11115, 11148), True, 'import numpy as np\n'), ((16706, 16720), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {}), '(loss)\n', (16714, 16720), True, 'import matplotlib.pyplot as plt\n'), ((16729, 16749), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (16739, 16749), True, 'import matplotlib.pyplot as plt\n'), ((16758, 16776), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (16768, 16776), True, 'import matplotlib.pyplot as plt\n'), ((16785, 16812), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss Per Epoch"""'], {}), "('Loss Per Epoch')\n", (16794, 16812), True, 'import matplotlib.pyplot as plt\n'), ((16821, 16831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16829, 16831), True, 'import matplotlib.pyplot as plt\n'), ((4518, 4530), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4527, 4530), True, 'import numpy as np\n'), ((8408, 8418), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (8414, 8418), True, 'import numpy as np\n'), ((16555, 16573), 'numpy.sum', 'np.sum', (['(y == yPred)'], {}), '(y == yPred)\n', (16561, 16573), True, 'import numpy as np\n'), ((11533, 11555), 'numpy.dot', 'np.dot', (['dxWRTdW', 'delta'], {}), '(dxWRTdW, delta)\n', (11539, 11555), True, 'import numpy as np\n'), ((16274, 16289), 'numpy.round', 'np.round', (['yPred'], {}), '(yPred)\n', (16282, 16289), True, 'import numpy as np\n'), ((16308, 16344), 'numpy.clip', 'np.clip', (['yPred', '(0)', '(self.numClass - 1)'], {}), '(yPred, 0, self.numClass - 1)\n', (16315, 16344), True, 'import numpy as np\n'), ((7523, 7586), 'numpy.random.randn', 'np.random.randn', (['self.config[key][0][0]', 'self.config[key][0][1]'], {}), '(self.config[key][0][0], self.config[key][0][1])\n', (7538, 7586), True, 'import numpy as np\n'), ((7628, 7659), 'numpy.sqrt', 'np.sqrt', (['self.config[key][0][1]'], {}), '(self.config[key][0][1])\n', (7635, 7659), True, 'import numpy as np\n'), ((7702, 7741), 'numpy.random.randn', 'np.random.randn', (['self.config[key][0][1]'], {}), '(self.config[key][0][1])\n', (7717, 7741), True, 'import numpy as np\n'), ((8059, 8095), 'numpy.prod', 'np.prod', (["self.layers[key]['W'].shape"], {}), "(self.layers[key]['W'].shape)\n", (8066, 8095), True, 'import numpy as np\n'), ((12293, 12315), 'numpy.dot', 'np.dot', (['delta', 'dxWRTdh'], {}), '(delta, dxWRTdh)\n', (12299, 12315), True, 'import numpy as np\n'), ((12949, 12971), 'numpy.sum', 'np.sum', (['deltaB'], {'axis': '(0)'}), '(deltaB, axis=0)\n', (12955, 12971), True, 'import numpy as np\n')] |
import logging
import sys
import numpy
from processing_library.image.operations import create_empty_image_like
from rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits
import matplotlib.pyplot as plt
log = logging.getLogger()
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
import pprint
pp = pprint.PrettyPrinter()
from scipy import interpolate
# x = np.arange(0, 10)
# y = np.exp(-x/3.0)
# f = interpolate.interp1d(x, y)
#
# xnew = np.arange(0,9, 0.1)
# ynew = f(xnew) # use interpolation function returned by `interp1d`
# plt.plot(x, y, 'o', xnew, ynew, '-')
# plt.show()
elevations_in = numpy.array([15, 45, 90], dtype='float')
elevations_out = numpy.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90], dtype='float')
elevations_out = numpy.arange(15.0, 90, 1.0)
default = 1
nchan = 1
npol = 4
ny = 1024
nx = 1024
array_in = numpy.zeros([nchan, npol, ny, ny, len(elevations_in)])
array_out = numpy.zeros([nchan, npol, ny, ny, len(elevations_out)])
im_in = "../B1_{el:d}_0565_{type}.fits"
im_out = "B1_{el:d}_0565_{type}_interpolated.fits"
im_diff_out = "B1_{el:d}_0565_{type}_interpolated_difference.fits"
im_template = None
for type in ['real', 'imag']:
for iel, el in enumerate(elevations_in):
print("Reading elevation %s part elevation %.0f" % (type, el))
im_in_file = im_in.format(el=int(el), type=type)
im = import_image_from_fits(im_in_file)
array_in[..., iel] = im.data
if im_template is None:
im_template = create_empty_image_like(im)
f = interpolate.interp1d(elevations_in, array_in, axis=4, kind='quadratic')
array_out = f(elevations_out)
rms_vp = []
max_vp = []
min_vp = []
rms_diff = []
max_diff = []
min_diff = []
for iel, el in enumerate(elevations_out):
print("Writing elevation %s part %.0f" % (type, el))
im_template.data = array_out[..., iel]
im_out_file = im_out.format(el=int(el), type=type)
export_image_to_fits(im_template, im_out_file)
rms_vp.append(numpy.std(im_template.data[0,0:1,...]))
max_vp.append(numpy.max(im_template.data[0,0:1,...]))
min_vp.append(numpy.min(im_template.data[0,0:1,...]))
im_template.data -= array_in[..., default]
im_diff_out_file = im_diff_out.format(el=int(el), type=type)
export_image_to_fits(im_template, im_diff_out_file)
rms_diff.append(numpy.std(im_template.data[0,0:1,...]))
max_diff.append(numpy.max(im_template.data[0,0:1,...]))
min_diff.append(numpy.min(im_template.data[0,0:1,...]))
plt.clf()
plt.plot(elevations_out, rms_vp, '-', color='r', label='VP rms')
if type == 'imag':
plt.plot(elevations_out, max_vp, '.', color='g', label='VP max')
plt.plot(elevations_out, min_vp, '-', color='b', label='VP min')
plt.plot(elevations_out, rms_diff, '.', color='r', label='VP diff rms')
plt.plot(elevations_out, max_diff, '.', color='g', label='VP diff max')
plt.plot(elevations_out, min_diff, '.', color='b', label='VP diff min')
plt.xlabel('Elevation')
plt.ylabel('Value')
plt.title('Statistics in %s part of 565MHz voltage pattern' % type)
plt.legend()
plt.savefig('%s_vp_statistics.png' % type)
plt.show(block=False) | [
"logging.getLogger",
"logging.StreamHandler",
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"processing_library.image.operations.create_empty_image_like",
"numpy.max",
"pprint.PrettyPrinter",
"nump... | [((256, 275), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (273, 275), False, 'import logging\n'), ((366, 397), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (383, 397), False, 'import logging\n'), ((456, 478), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (476, 478), False, 'import pprint\n'), ((759, 799), 'numpy.array', 'numpy.array', (['[15, 45, 90]'], {'dtype': '"""float"""'}), "([15, 45, 90], dtype='float')\n", (770, 799), False, 'import numpy\n'), ((817, 914), 'numpy.array', 'numpy.array', (['[15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]'], {'dtype': '"""float"""'}), "([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90\n ], dtype='float')\n", (828, 914), False, 'import numpy\n'), ((927, 954), 'numpy.arange', 'numpy.arange', (['(15.0)', '(90)', '(1.0)'], {}), '(15.0, 90, 1.0)\n', (939, 954), False, 'import numpy\n'), ((318, 351), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (339, 351), False, 'import logging\n'), ((1708, 1779), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['elevations_in', 'array_in'], {'axis': '(4)', 'kind': '"""quadratic"""'}), "(elevations_in, array_in, axis=4, kind='quadratic')\n", (1728, 1779), False, 'from scipy import interpolate\n'), ((2750, 2759), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2757, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2764, 2828), 'matplotlib.pyplot.plot', 'plt.plot', (['elevations_out', 'rms_vp', '"""-"""'], {'color': '"""r"""', 'label': '"""VP rms"""'}), "(elevations_out, rms_vp, '-', color='r', label='VP rms')\n", (2772, 2828), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2993), 'matplotlib.pyplot.plot', 'plt.plot', (['elevations_out', 'min_vp', '"""-"""'], {'color': '"""b"""', 'label': '"""VP min"""'}), "(elevations_out, min_vp, '-', color='b', label='VP min')\n", (2937, 2993), True, 'import matplotlib.pyplot as plt\n'), ((2998, 3069), 'matplotlib.pyplot.plot', 'plt.plot', (['elevations_out', 'rms_diff', '"""."""'], {'color': '"""r"""', 'label': '"""VP diff rms"""'}), "(elevations_out, rms_diff, '.', color='r', label='VP diff rms')\n", (3006, 3069), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3145), 'matplotlib.pyplot.plot', 'plt.plot', (['elevations_out', 'max_diff', '"""."""'], {'color': '"""g"""', 'label': '"""VP diff max"""'}), "(elevations_out, max_diff, '.', color='g', label='VP diff max')\n", (3082, 3145), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3221), 'matplotlib.pyplot.plot', 'plt.plot', (['elevations_out', 'min_diff', '"""."""'], {'color': '"""b"""', 'label': '"""VP diff min"""'}), "(elevations_out, min_diff, '.', color='b', label='VP diff min')\n", (3158, 3221), True, 'import matplotlib.pyplot as plt\n'), ((3226, 3249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Elevation"""'], {}), "('Elevation')\n", (3236, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (3264, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3278, 3345), 'matplotlib.pyplot.title', 'plt.title', (["('Statistics in %s part of 565MHz voltage pattern' % type)"], {}), "('Statistics in %s part of 565MHz voltage pattern' % type)\n", (3287, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3362), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3360, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3367, 3409), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s_vp_statistics.png' % type)"], {}), "('%s_vp_statistics.png' % type)\n", (3378, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3414, 3435), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3422, 3435), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1571), 'rascil.processing_components.image.operations.import_image_from_fits', 'import_image_from_fits', (['im_in_file'], {}), '(im_in_file)\n', (1559, 1571), False, 'from rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits\n'), ((2140, 2186), 'rascil.processing_components.image.operations.export_image_to_fits', 'export_image_to_fits', (['im_template', 'im_out_file'], {}), '(im_template, im_out_file)\n', (2160, 2186), False, 'from rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits\n'), ((2501, 2552), 'rascil.processing_components.image.operations.export_image_to_fits', 'export_image_to_fits', (['im_template', 'im_diff_out_file'], {}), '(im_template, im_diff_out_file)\n', (2521, 2552), False, 'from rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits\n'), ((2860, 2924), 'matplotlib.pyplot.plot', 'plt.plot', (['elevations_out', 'max_vp', '"""."""'], {'color': '"""g"""', 'label': '"""VP max"""'}), "(elevations_out, max_vp, '.', color='g', label='VP max')\n", (2868, 2924), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1694), 'processing_library.image.operations.create_empty_image_like', 'create_empty_image_like', (['im'], {}), '(im)\n', (1690, 1694), False, 'from processing_library.image.operations import create_empty_image_like\n'), ((2209, 2249), 'numpy.std', 'numpy.std', (['im_template.data[0, 0:1, ...]'], {}), '(im_template.data[0, 0:1, ...])\n', (2218, 2249), False, 'import numpy\n'), ((2271, 2311), 'numpy.max', 'numpy.max', (['im_template.data[0, 0:1, ...]'], {}), '(im_template.data[0, 0:1, ...])\n', (2280, 2311), False, 'import numpy\n'), ((2333, 2373), 'numpy.min', 'numpy.min', (['im_template.data[0, 0:1, ...]'], {}), '(im_template.data[0, 0:1, ...])\n', (2342, 2373), False, 'import numpy\n'), ((2577, 2617), 'numpy.std', 'numpy.std', (['im_template.data[0, 0:1, ...]'], {}), '(im_template.data[0, 0:1, ...])\n', (2586, 2617), False, 'import numpy\n'), ((2641, 2681), 'numpy.max', 'numpy.max', (['im_template.data[0, 0:1, ...]'], {}), '(im_template.data[0, 0:1, ...])\n', (2650, 2681), False, 'import numpy\n'), ((2705, 2745), 'numpy.min', 'numpy.min', (['im_template.data[0, 0:1, ...]'], {}), '(im_template.data[0, 0:1, ...])\n', (2714, 2745), False, 'import numpy\n')] |
from dolo.numeric.decision_rules_states import CDR
import numpy as np
from numpy import column_stack, row_stack, eye, zeros
from numpy import dot
def approximate_controls(model, return_dr=True):
# get steady_state
import numpy
p = model.calibration['parameters']
sigma = model.calibration['covariances']
s = model.calibration['states'][:,None]
x = model.calibration['controls'][:,None]
e = model.calibration['shocks'][:,None]
from numpy.linalg import solve
g = model.functions['transition']
f = model.functions['arbitrage']
l = g(s,x,e,p, derivs=True)
[junk, g_s, g_x, g_e] = [el[...,0] for el in l]
if model.model_type == "fg2":
l = f(s,x,e,s,x,p, derivs=True)
[res, f_s, f_x, f_e, f_S, f_X] = [el[...,0] for el in l]
else:
l = f(s,x,s,x,p, derivs=True)
[res, f_s, f_x, f_S, f_X] = [el[...,0] for el in l]
n_s = g_s.shape[0] # number of controls
n_x = g_x.shape[1] # number of states
n_e = g_e.shape[1]
n_v = n_s + n_x
A = row_stack([
column_stack( [ eye(n_s), zeros((n_s,n_x)) ] ),
column_stack( [ -f_S , -f_X ] )
])
B = row_stack([
column_stack( [ g_s, g_x ] ),
column_stack( [ f_s, f_x ] )
])
from dolo.numeric.extern.qz import qzordered
[S,T,Q,Z,eigval] = qzordered(A,B,n_s)
Q = Q.real # is it really necessary ?
Z = Z.real
Z11 = Z[:n_s,:n_s]
Z12 = Z[:n_s,n_s:]
Z21 = Z[n_s:,:n_s]
Z22 = Z[n_s:,n_s:]
S11 = S[:n_s,:n_s]
T11 = T[:n_s,:n_s]
# first order solution
C = solve(Z11.T, Z21.T).T
P = np.dot(solve(S11.T, Z11.T).T , solve(Z11.T, T11.T).T )
Q = g_e
s = s.ravel()
x = x.ravel()
A = g_s + dot( g_x, C )
B = g_e
dr = CDR([s, x, C])
dr.A = A
dr.B = B
dr.sigma = sigma
return dr
| [
"numpy.eye",
"numpy.linalg.solve",
"dolo.numeric.extern.qz.qzordered",
"numpy.column_stack",
"numpy.dot",
"numpy.zeros",
"dolo.numeric.decision_rules_states.CDR"
] | [((1352, 1372), 'dolo.numeric.extern.qz.qzordered', 'qzordered', (['A', 'B', 'n_s'], {}), '(A, B, n_s)\n', (1361, 1372), False, 'from dolo.numeric.extern.qz import qzordered\n'), ((1788, 1802), 'dolo.numeric.decision_rules_states.CDR', 'CDR', (['[s, x, C]'], {}), '([s, x, C])\n', (1791, 1802), False, 'from dolo.numeric.decision_rules_states import CDR\n'), ((1603, 1622), 'numpy.linalg.solve', 'solve', (['Z11.T', 'Z21.T'], {}), '(Z11.T, Z21.T)\n', (1608, 1622), False, 'from numpy.linalg import solve\n'), ((1752, 1763), 'numpy.dot', 'dot', (['g_x', 'C'], {}), '(g_x, C)\n', (1755, 1763), False, 'from numpy import dot\n'), ((1122, 1148), 'numpy.column_stack', 'column_stack', (['[-f_S, -f_X]'], {}), '([-f_S, -f_X])\n', (1134, 1148), False, 'from numpy import column_stack, row_stack, eye, zeros\n'), ((1205, 1229), 'numpy.column_stack', 'column_stack', (['[g_s, g_x]'], {}), '([g_s, g_x])\n', (1217, 1229), False, 'from numpy import column_stack, row_stack, eye, zeros\n'), ((1243, 1267), 'numpy.column_stack', 'column_stack', (['[f_s, f_x]'], {}), '([f_s, f_x])\n', (1255, 1267), False, 'from numpy import column_stack, row_stack, eye, zeros\n'), ((1640, 1659), 'numpy.linalg.solve', 'solve', (['S11.T', 'Z11.T'], {}), '(S11.T, Z11.T)\n', (1645, 1659), False, 'from numpy.linalg import solve\n'), ((1664, 1683), 'numpy.linalg.solve', 'solve', (['Z11.T', 'T11.T'], {}), '(Z11.T, T11.T)\n', (1669, 1683), False, 'from numpy.linalg import solve\n'), ((1082, 1090), 'numpy.eye', 'eye', (['n_s'], {}), '(n_s)\n', (1085, 1090), False, 'from numpy import column_stack, row_stack, eye, zeros\n'), ((1092, 1109), 'numpy.zeros', 'zeros', (['(n_s, n_x)'], {}), '((n_s, n_x))\n', (1097, 1109), False, 'from numpy import column_stack, row_stack, eye, zeros\n')] |
import os
import cv2
import numpy as np
import random
import torch
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.tensor as Tensor
import dl_modules.transforms as trf
import dl_modules.realsr as realsr
import cm_modules.utils as utils
from torch.utils.data import Dataset as BaseDataset
from torch.utils.data import Subset
# import torch.nn.functional as F
# import dl_modules.loss as loss
# import time
def imshow(img: Tensor) -> None:
if len(img.shape) > 3:
img = img.squeeze()
img = torch.clamp(img / 2 + 0.5, 0, 1)
npimg = img.cpu().numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Dataset(BaseDataset):
"""Images Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
scale (int): downscaling parameter
normalization (torchvision.transforms.transform): image normalization
transform (torchvision.transforms.transform): image transform (typically crop)
augmentation (albumentations.Compose): data transfromation
downscaling (str): downscaling method (possible 'bicubic', 'kernel', 'kernel_even', 'none')
aspect_ratio (float): change pixel aspect ratio of lr image to width / heigth
extra_scale (float): additional lr scaling for non-integer SR upscaling
min_var (float): minimum sample variance
"""
def __init__(
self,
images_dir,
scale,
normalization=None,
transform=None,
augmentation=None,
downscaling='bicubic',
aspect_ratio=1.0,
extra_scale=1.0
):
self.ids = [name for name in os.listdir(images_dir) if
name.lower().endswith('.png') or
name.lower().endswith('.jpg') or
name.lower().endswith('.jpeg') or
name.lower().endswith('.gif') or
name.lower().endswith('.bmp')]
self.ids.sort()
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.transform = transform
self.augmentation = augmentation
if normalization is None:
self.normalization = get_normalization()
else:
self.normalization = normalization
self.scale = scale
self.downscaling = downscaling
self.ar = aspect_ratio
self.es = extra_scale
def random_n_samples(self, count: int):
inputs = []
gts = []
for i in range(count):
inp, gt = self.__getitem__(random.randrange(0, self.__len__()))
inputs.append(inp)
gts.append(gt)
return torch.stack(inputs), torch.stack(gts)
def __getitem__(self, i):
# read data
gt = cv2.imread(self.images_fps[i])
gt = cv2.cvtColor(gt, cv2.COLOR_BGR2RGB)
if self.transform is not None:
gt = self.transform(image=gt, uid=self.ids[i])["image"]
in_image = gt
in_image = self.normalization(in_image)
gt = self.normalization(gt)
if self.downscaling == 'bicubic':
in_image = utils.scale(in_image, aspect_ratio=self.ar,
extra_scale=self.es / self.scale)
elif self.downscaling == 'kernel':
in_image = utils.scale(in_image, aspect_ratio=self.ar,
extra_scale=self.es)
in_image = realsr.apply_kernel(in_image, kernel_storage)
elif self.downscaling == 'kernel_even':
in_image = utils.scale(in_image, aspect_ratio=self.ar,
extra_scale=self.es, even_rounding=True)
in_image = realsr.apply_kernel(in_image, kernel_storage)
if self.augmentation is not None:
in_image = cv2.cvtColor(utils.convert_to_cv_8bit(in_image), cv2.COLOR_BGR2RGB)
in_image = self.augmentation(image=in_image)["image"]
in_image = self.normalization(in_image)
return in_image, gt
def __len__(self):
return len(self.ids)
class ValidDataset(BaseDataset):
"""Images Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
hr_dir (str): path to HR images folder
lr_dir (str): path to LR images folder
normalization (torchvision.transforms.transform): image normalization
transform (torchvision.transforms.transform): ground truth transform
"""
def __init__(
self,
hr_dir,
lr_dir,
normalization=None,
transform=None
):
self.ids = os.listdir(hr_dir)
self.hr_fps = [os.path.join(hr_dir, image_id) for image_id in self.ids]
self.lr_fps = [os.path.join(lr_dir, image_id) for image_id in self.ids]
self.transform = transform
if normalization is None:
self.normalization = get_normalization()
else:
self.normalization = normalization
def __getitem__(self, i):
# read data
gt = cv2.imread(self.hr_fps[i])
gt = cv2.cvtColor(gt, cv2.COLOR_BGR2RGB)
in_image = cv2.imread(self.lr_fps[i])
in_image = cv2.cvtColor(in_image, cv2.COLOR_BGR2RGB)
if self.transform is not None:
gt = self.transform(image=gt)["image"]
gt = self.normalization(gt)
in_image = self.normalization(in_image)
return in_image, gt
def __len__(self):
return len(self.ids)
def get_normalization() -> torch.nn.Module:
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def init_data():
global train_set, train_loader, valid_set, valid_loader, \
noise_set, kernel_storage, predict_set, predict_loader
train_set = Dataset(train_dir, scale=scale,
transform=trf.get_training_transform(crop_size, crop_kernel_size, bg_prob),
augmentation=trf.get_input_image_augmentation(),
downscaling='kernel_even',
aspect_ratio=aspect_ratio,
extra_scale=extra_scale)
if train_set_size != 0:
train_set = Subset(train_set, list(range(train_set_size)))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=train_batch_size,
shuffle=True, num_workers=2)
valid_set = ValidDataset(hr_dir=valid_hr_dir, lr_dir=valid_lr_dir)
if valid_set_size != 0:
valid_set = Subset(valid_set, list(range(valid_set_size)))
valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=valid_batch_size,
shuffle=False, num_workers=0)
noise_patch_size = utils.even_round(crop_size * extra_scale * aspect_ratio,
crop_size * extra_scale)
noise_patch_size[0] //= scale
noise_patch_size[1] //= scale
noise_set = Dataset(noise_train_dir, scale=scale,
normalization=realsr.get_noise_normalization(),
transform=trf.get_training_noise_transform(*noise_patch_size),
downscaling='none')
kernel_storage = realsr.Kernels(kernel_train_dir, scale=scale, count=realsr.kernel_count)
predict_set = Dataset(predict_dir, scale=scale,
transform=trf.get_predict_transform(*predict_res),
downscaling='none')
predict_loader = torch.utils.data.DataLoader(predict_set, batch_size=valid_batch_size,
shuffle=False, num_workers=0)
# Look at images we have
# not_trf_set = Dataset(train_dir, scale=scale,
# augmentation=get_input_image_augmentation())
#
# image_in, image_out = not_trf_set[0] # get some sample
# imshow(image_in)
# imshow(image_out)
# Visualize augmented images
# n_img = 3
# # idxs = [51, 484, 488]
# idxs = [random.randrange(len(train_set)) for _ in range(n_img)]
# start = time.perf_counter()
# for i in range(n_img * 2):
# image_in, image_out = train_set[idxs[i % n_img]]
# # image_in = realsr.inject_noise(image_in.unsqueeze(0), noise_set)
# image_in = image_in.unsqueeze(0)
# utils.imwrite(
# SAVE_DIR + 'data/output/%d_lr_scaled.png' % i,
# F.interpolate(
# image_in, size=(crop_size // scale, crop_size // scale), mode='bicubic', align_corners=True
# )
# )
# utils.imwrite(
# SAVE_DIR + 'data/output/%d_lr.png' % i,
# image_in
# )
# utils.imwrite(
# SAVE_DIR + 'data/output/%d_hr.png' % i,
# image_out
# )
# if i % n_img == n_img - 1:
# print(time.perf_counter() - start)
# start = time.perf_counter()
# edge_loss = loss.EdgeLoss()
# for i in range(19, 20):
# image_in, image_out = train_set[random.randrange(len(train_set))]
# lr = F.interpolate(
# image_in.unsqueeze(0), size=(crop_size, crop_size), mode='bicubic', align_corners=True
# )
# utils.imwrite(
# SAVE_DIR + 'data/output/%d_lr.png' % i,
# lr
# )
# utils.imwrite(
# SAVE_DIR + 'data/output/%d_hr.png' % i,
# image_out
# )
# print(edge_loss(lr, image_out.unsqueeze(0)))
# SAVE_DIR = ''
SAVE_DIR = '../drive/MyDrive/'
# SAVE_DIR = '/cache/shipilov/'
# train_dir = os.path.join(SAVE_DIR, 'data/Cossette/Cossette_train_HR')
# valid_hr_dir = os.path.join(SAVE_DIR, 'data/Cossette/Cossette_valid_HR')
# valid_lr_dir = os.path.join(SAVE_DIR, 'data/Cossette/Cossette_valid_LR')
train_dir = os.path.join(SAVE_DIR, 'data/Bakemonogatari_1000/Bakemonogatari_train_HR')
valid_hr_dir = os.path.join(SAVE_DIR, 'data/Bakemonogatari_1000/Bakemonogatari_valid_HR')
valid_lr_dir = os.path.join(SAVE_DIR, 'data/Bakemonogatari_1000/Bakemonogatari_valid_LR')
kernel_train_dir = os.path.join(SAVE_DIR, 'data/AniBoters/SoulTaker_train_kernel')
kernel_valid_dir = os.path.join(SAVE_DIR, 'data/AniBoters/SoulTaker_valid_kernel')
noise_train_dir = os.path.join(SAVE_DIR, 'data/AniBoters/Filtered/SoulTaker_train_noise')
noise_valid_dir = os.path.join(SAVE_DIR, 'data/AniBoters/Filtered/SoulTaker_valid_noise')
predict_dir = os.path.join(SAVE_DIR, 'data/predict')
# Load datasets
train_batch_size = 32
valid_batch_size = 1 # Better leave it 1, otherwise many things won't work)
crop_size = 64 # Training crop HR size
scale = 2 # General SR upscaling parameter
extra_scale = 480 / (1080 / 2) # Extra downscaling in training
aspect_ratio = (712 / 480) / (16 / 9) # Aspect ratio change (anamorphic encoding)
predict_res = (1920 // scale, 1080 // scale) # Prediction resolution
# predict_res = (712, 480)
crop_kernel_size = 61 # Content-wise crop parameter, larger value - more distributed crop
bg_prob = 0.0 # Content crop probability of background
train_set_size = 0
valid_set_size = 0
train_set = None
train_loader = None
valid_set = None
valid_loader = None
noise_set = None
kernel_storage = None
predict_set = None
predict_loader = None
| [
"cm_modules.utils.even_round",
"dl_modules.realsr.Kernels",
"dl_modules.transforms.get_predict_transform",
"os.listdir",
"cm_modules.utils.convert_to_cv_8bit",
"dl_modules.realsr.apply_kernel",
"dl_modules.transforms.get_training_transform",
"cm_modules.utils.scale",
"torchvision.transforms.ToTensor... | [((9955, 10029), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/Bakemonogatari_1000/Bakemonogatari_train_HR"""'], {}), "(SAVE_DIR, 'data/Bakemonogatari_1000/Bakemonogatari_train_HR')\n", (9967, 10029), False, 'import os\n'), ((10045, 10119), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/Bakemonogatari_1000/Bakemonogatari_valid_HR"""'], {}), "(SAVE_DIR, 'data/Bakemonogatari_1000/Bakemonogatari_valid_HR')\n", (10057, 10119), False, 'import os\n'), ((10135, 10209), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/Bakemonogatari_1000/Bakemonogatari_valid_LR"""'], {}), "(SAVE_DIR, 'data/Bakemonogatari_1000/Bakemonogatari_valid_LR')\n", (10147, 10209), False, 'import os\n'), ((10229, 10292), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/AniBoters/SoulTaker_train_kernel"""'], {}), "(SAVE_DIR, 'data/AniBoters/SoulTaker_train_kernel')\n", (10241, 10292), False, 'import os\n'), ((10312, 10375), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/AniBoters/SoulTaker_valid_kernel"""'], {}), "(SAVE_DIR, 'data/AniBoters/SoulTaker_valid_kernel')\n", (10324, 10375), False, 'import os\n'), ((10395, 10466), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/AniBoters/Filtered/SoulTaker_train_noise"""'], {}), "(SAVE_DIR, 'data/AniBoters/Filtered/SoulTaker_train_noise')\n", (10407, 10466), False, 'import os\n'), ((10486, 10557), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/AniBoters/Filtered/SoulTaker_valid_noise"""'], {}), "(SAVE_DIR, 'data/AniBoters/Filtered/SoulTaker_valid_noise')\n", (10498, 10557), False, 'import os\n'), ((10572, 10610), 'os.path.join', 'os.path.join', (['SAVE_DIR', '"""data/predict"""'], {}), "(SAVE_DIR, 'data/predict')\n", (10584, 10610), False, 'import os\n'), ((547, 579), 'torch.clamp', 'torch.clamp', (['(img / 2 + 0.5)', '(0)', '(1)'], {}), '(img / 2 + 0.5, 0, 1)\n', (558, 579), False, 'import torch\n'), ((661, 671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (669, 671), True, 'import matplotlib.pyplot as plt\n'), ((6413, 6514), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': 'train_batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(train_set, batch_size=train_batch_size, shuffle\n =True, num_workers=2)\n', (6440, 6514), False, 'import torch\n'), ((6743, 6845), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_set'], {'batch_size': 'valid_batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(valid_set, batch_size=valid_batch_size, shuffle\n =False, num_workers=0)\n', (6770, 6845), False, 'import torch\n'), ((6912, 6997), 'cm_modules.utils.even_round', 'utils.even_round', (['(crop_size * extra_scale * aspect_ratio)', '(crop_size * extra_scale)'], {}), '(crop_size * extra_scale * aspect_ratio, crop_size *\n extra_scale)\n', (6928, 6997), True, 'import cm_modules.utils as utils\n'), ((7380, 7452), 'dl_modules.realsr.Kernels', 'realsr.Kernels', (['kernel_train_dir'], {'scale': 'scale', 'count': 'realsr.kernel_count'}), '(kernel_train_dir, scale=scale, count=realsr.kernel_count)\n', (7394, 7452), True, 'import dl_modules.realsr as realsr\n'), ((7650, 7753), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['predict_set'], {'batch_size': 'valid_batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(predict_set, batch_size=valid_batch_size,\n shuffle=False, num_workers=0)\n', (7677, 7753), False, 'import torch\n'), ((625, 655), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (637, 655), True, 'import numpy as np\n'), ((2876, 2906), 'cv2.imread', 'cv2.imread', (['self.images_fps[i]'], {}), '(self.images_fps[i])\n', (2886, 2906), False, 'import cv2\n'), ((2920, 2955), 'cv2.cvtColor', 'cv2.cvtColor', (['gt', 'cv2.COLOR_BGR2RGB'], {}), '(gt, cv2.COLOR_BGR2RGB)\n', (2932, 2955), False, 'import cv2\n'), ((4736, 4754), 'os.listdir', 'os.listdir', (['hr_dir'], {}), '(hr_dir)\n', (4746, 4754), False, 'import os\n'), ((5163, 5189), 'cv2.imread', 'cv2.imread', (['self.hr_fps[i]'], {}), '(self.hr_fps[i])\n', (5173, 5189), False, 'import cv2\n'), ((5203, 5238), 'cv2.cvtColor', 'cv2.cvtColor', (['gt', 'cv2.COLOR_BGR2RGB'], {}), '(gt, cv2.COLOR_BGR2RGB)\n', (5215, 5238), False, 'import cv2\n'), ((5259, 5285), 'cv2.imread', 'cv2.imread', (['self.lr_fps[i]'], {}), '(self.lr_fps[i])\n', (5269, 5285), False, 'import cv2\n'), ((5305, 5346), 'cv2.cvtColor', 'cv2.cvtColor', (['in_image', 'cv2.COLOR_BGR2RGB'], {}), '(in_image, cv2.COLOR_BGR2RGB)\n', (5317, 5346), False, 'import cv2\n'), ((2099, 2133), 'os.path.join', 'os.path.join', (['images_dir', 'image_id'], {}), '(images_dir, image_id)\n', (2111, 2133), False, 'import os\n'), ((2774, 2793), 'torch.stack', 'torch.stack', (['inputs'], {}), '(inputs)\n', (2785, 2793), False, 'import torch\n'), ((2795, 2811), 'torch.stack', 'torch.stack', (['gts'], {}), '(gts)\n', (2806, 2811), False, 'import torch\n'), ((3238, 3315), 'cm_modules.utils.scale', 'utils.scale', (['in_image'], {'aspect_ratio': 'self.ar', 'extra_scale': '(self.es / self.scale)'}), '(in_image, aspect_ratio=self.ar, extra_scale=self.es / self.scale)\n', (3249, 3315), True, 'import cm_modules.utils as utils\n'), ((4778, 4808), 'os.path.join', 'os.path.join', (['hr_dir', 'image_id'], {}), '(hr_dir, image_id)\n', (4790, 4808), False, 'import os\n'), ((4858, 4888), 'os.path.join', 'os.path.join', (['lr_dir', 'image_id'], {}), '(lr_dir, image_id)\n', (4870, 4888), False, 'import os\n'), ((5689, 5710), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5708, 5710), True, 'import torchvision.transforms as transforms\n'), ((5720, 5774), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (5740, 5774), True, 'import torchvision.transforms as transforms\n'), ((6009, 6073), 'dl_modules.transforms.get_training_transform', 'trf.get_training_transform', (['crop_size', 'crop_kernel_size', 'bg_prob'], {}), '(crop_size, crop_kernel_size, bg_prob)\n', (6035, 6073), True, 'import dl_modules.transforms as trf\n'), ((6112, 6146), 'dl_modules.transforms.get_input_image_augmentation', 'trf.get_input_image_augmentation', ([], {}), '()\n', (6144, 6146), True, 'import dl_modules.transforms as trf\n'), ((7194, 7226), 'dl_modules.realsr.get_noise_normalization', 'realsr.get_noise_normalization', ([], {}), '()\n', (7224, 7226), True, 'import dl_modules.realsr as realsr\n'), ((7262, 7313), 'dl_modules.transforms.get_training_noise_transform', 'trf.get_training_noise_transform', (['*noise_patch_size'], {}), '(*noise_patch_size)\n', (7294, 7313), True, 'import dl_modules.transforms as trf\n'), ((7542, 7581), 'dl_modules.transforms.get_predict_transform', 'trf.get_predict_transform', (['*predict_res'], {}), '(*predict_res)\n', (7567, 7581), True, 'import dl_modules.transforms as trf\n'), ((1758, 1780), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (1768, 1780), False, 'import os\n'), ((3417, 3481), 'cm_modules.utils.scale', 'utils.scale', (['in_image'], {'aspect_ratio': 'self.ar', 'extra_scale': 'self.es'}), '(in_image, aspect_ratio=self.ar, extra_scale=self.es)\n', (3428, 3481), True, 'import cm_modules.utils as utils\n'), ((3540, 3585), 'dl_modules.realsr.apply_kernel', 'realsr.apply_kernel', (['in_image', 'kernel_storage'], {}), '(in_image, kernel_storage)\n', (3559, 3585), True, 'import dl_modules.realsr as realsr\n'), ((3925, 3959), 'cm_modules.utils.convert_to_cv_8bit', 'utils.convert_to_cv_8bit', (['in_image'], {}), '(in_image)\n', (3949, 3959), True, 'import cm_modules.utils as utils\n'), ((3657, 3745), 'cm_modules.utils.scale', 'utils.scale', (['in_image'], {'aspect_ratio': 'self.ar', 'extra_scale': 'self.es', 'even_rounding': '(True)'}), '(in_image, aspect_ratio=self.ar, extra_scale=self.es,\n even_rounding=True)\n', (3668, 3745), True, 'import cm_modules.utils as utils\n'), ((3800, 3845), 'dl_modules.realsr.apply_kernel', 'realsr.apply_kernel', (['in_image', 'kernel_storage'], {}), '(in_image, kernel_storage)\n', (3819, 3845), True, 'import dl_modules.realsr as realsr\n')] |
"from pdb structure extract feature include: ca mask, ca distance, angle, hsea, hseb, residue depth. save .npy file "
#from Bio.PDB.MMCIF2Dict import MMCIF2Dict
#from Bio.PDB.MMCIFParser import MMCIFParser
#from Bio import SeqIO
from Bio.PDB.PDBParser import PDBParser
import numpy as np
#import math
import sys
import time
import Bio
from module import *
t_dic={'ALA':'A','VAL':'V','LEU':'L','ILE':'I','PHE':'F','TRP':'W','MET':'M','PRO':'P',\
'GLY':'G','SER':'S','THR':'T','CYS':'C','TYR':'Y','ASN':'N','GLN':'Q','HIS':'H',\
'LYS':'K','ARG':'R','ASP':'D','GLU':'E'}
path = "/home/cxy/旧电脑/PycharmProjects/gsf/pdb_/"
pdb_list_file = "file/cullpdb_pc25_res2.0_R0.25_d181126_chains9311"
if __name__ == '__main__':
p = PDBParser(PERMISSIVE=0)
pdb_id, pdb_chain = get_id_chain_name(pdb_list_file)
for i in range(len(pdb_id)):
if len(pdb_id[i]) !=4:
continue
pdb_name=path + "pdb"+pdb_id[i].lower()+'.ent'
print(pdb_name)
try:
s = p.get_structure("1",pdb_name) #read pdb struture
s = s[0][pdb_chain[i]] #choose chain
res_list = PDB.Selection.unfold_entities(s, 'R') #read aminoacid
except:
print("read %s fail! " % pdb_name)
continue
aa_list = get_aa_list(res_list)
aa_list_full = check_aa_id(aa_list)
if not aa_list_full:
print("aa_list error!")
continue
dps = cal_depth(s, aa_list_full)
hse_a, hse_b = cal_hseab(s, aa_list_full)
seq_list = get_seq(aa_list_full)
ca_list = get_atom_list(aa_list_full,'CA')
cb_list = get_atom_list(aa_list_full,'CB')
c_list = get_atom_list(aa_list_full,'C')
n_list = get_atom_list(aa_list_full,'N')
ca_dist = cal_dist(ca_list)
mask = get_mask(ca_list)
ids=ca_dist==None
ca_dist[ids]=100 #算不出来距离的设置为100
ca_dist_cs=[]
angle_cs=[]
num_cs=[]
for j in range(len(ca_dist)):
t = ca_dist[j]
s=t.argsort()
aa_num24 = s[1:25]
ca_dist_cs.append(t[s[1:25]])
angle_d = get_angle5_ceshi(aa_num24, ca_list, cb_list, n_list, c_list, j)
angle_d = np.array(list(angle_d))
angle_cs.append(angle_d)
#angle_cs.append(angle_d[j][s[1:17]])
#print(angle_d[j][s[1:17]])
num_cs.append(s[1:25])
dic_r={}
dic_r['dis']=ca_dist_cs #距离
dic_r['angle']=angle_cs #角度
dic_r['mask']=mask #标记ca原子,1有,0无
dic_r['ids']=num_cs # 氨基酸序号
dic_r['seq']=seq_list #序列
dic_r['dps']=dps #氨基酸深度
dic_r['hsea']=hse_a #裸球暴露面积
dic_r['hseb']=hse_b
out_name='pdb_other_cb/'+pdb_id[i].lower()+pdb_chain[i]+'_all_c.npy'
np.save(out_name,dic_r)
print("cal finish!")
| [
"Bio.PDB.PDBParser.PDBParser",
"numpy.save"
] | [((738, 761), 'Bio.PDB.PDBParser.PDBParser', 'PDBParser', ([], {'PERMISSIVE': '(0)'}), '(PERMISSIVE=0)\n', (747, 761), False, 'from Bio.PDB.PDBParser import PDBParser\n'), ((2889, 2913), 'numpy.save', 'np.save', (['out_name', 'dic_r'], {}), '(out_name, dic_r)\n', (2896, 2913), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created 2018/12 by Shintaro
Modified 2021/02 by Hermann for usage at Wodan; look for "HE:"
"""
from qcodes import Instrument, validators as vals
from qcodes.instrument.channel import InstrumentChannel, ChannelList
from qcodes.utils.validators import Validator
from qcodes.instrument.parameter import ArrayParameter
from typing import List, Dict, Callable, Union
from nifpga import Session
from nifpga import nifpga
import time
import numpy as np
import logging
log = logging.getLogger(__name__)
bit_file = '..\\tools\\drivers\\fpgabatchhewodan_sbRIO9612RIO0_hewodan_kUFBPXPrLOs.lvbitx'
ip_address = '192.168.0.3'
channels_per_panel = 8
"""-------------------------
Utility functions
-------------------------"""
def split_number(a, size = 32):
"""
Split for example 32bit uint to 2 16bit uint.
Args:
a: Input number
size: bit size of the input number
Returns:
b: from upper bits
c: from lower bits
"""
b = 0
c = 0
for i in range(size):
if i < size//2:
c += a & 2**i
else:
if (a & 2**i) != 0:
b += 2**(i-size//2)
if size == 64:
b = np.uint32(b)
c = np.uint32(c)
elif size == 32:
b = np.uint16(b)
c = np.uint16(c)
elif size == 16:
b = np.uint8(b)
c = np.uint8(c)
return b, c
def join_numbers(a, b, final_size=32):
"""
Join 2 numbers and make a number with double bit size
Args:
a: input1 (Becomes upper bits)
b: input2 (Becomes lower bits)
final_size: bit size of the returned number
Returns:
c: Joined number
"""
if final_size == 64:
a = np.uint32(a)
b = np.uint32(b)
c = (a << 32) + b
c = np.uint64(c)
elif final_size == 32:
a = np.uint16(a)
b = np.uint16(b)
c = (a << 16) + b
c = np.uint32(c)
elif final_size == 16:
a = np.uint8(a)
b = np.uint8(b)
c = (a << 8) + b
c = np.uint16(c)
return c
def join_8_8bit264bit(a,b,c,d,e,f,g,h):
"""
Join 8 8bit unsigned integer into 64bit unsigned integer.
Args:
a,b,c,d,: 8bit unsigned integers
(a: uuu, b: uul, c: ulu, d: ull, ...)
Returns:
result: 64 bit unsined integer
"""
i = join_numbers(a,b,16)
j = join_numbers(c,d,16)
k = join_numbers(e,f,16)
l = join_numbers(g,h,16)
m = join_numbers(i,j,32)
n = join_numbers(k,l,32)
result = join_numbers(m,n,64)
return result
def ms2FS_divider(ms:Union[int, float] = 3.0) -> int:
"""
Convert duration (ms) of pulse for ramp mode.
Typical values: 3 ms -> 6661, 20 ms -> 44439
Args:
ms (float): Duration between each trigger pulse for ramp mode (trigger 1, active when it is off).
Return:
divider (int)
"""
if ms < 0:
# Make minimum to be about 100 us.
ms = 220
elif ms < 10.0:
ms = int(ms /3 * 6661)
else:
ms = int(ms / 20 * 44439)
return ms
"""----------------
Define classes
------------------"""
class NEEL_DAC_channel(InstrumentChannel):
"""
This class holds information about each DAC channel.
Args:
parent (InstrumentChannel): NEEL_DAC_Bus
name (str): name of the channel
channel (int): channel number (0 ~ 7)
value (float): output value of the DAC.
"""
def __init__(self,
parent: InstrumentChannel,
name:str,
channel:int,
value:float=-0.0003,
vmax:float=5.0,
vmin:float=-5.0,
alias:str=None,
**kwargs) -> None:
super().__init__(parent, name, **kwargs)
self.dac = self._parent.dac
self.panel = self._parent.bus_number
self.channel = channel
self.val = value
self.alias = alias
self.add_parameter('v',
label='Value',
unit='V',
scale = 1.0,
get_cmd = self.get_value,
set_cmd = self.set_value,
get_parser = float,
set_parser = float,
vals = vals.Numbers(vmin, vmax),
)
def get_value(self):
return self.val
def set_value(self, val:float):
#print(self.panel,self.channel,val)
# Set DAC value if it is not np.nan.
if not np.isnan(val):
self.dac.DAC_set_value(panel_channel={'panel':self.panel, 'channel':self.channel},
DAC_goto_value=val)
#self.dac.move() # HE: let it move when set.
self.val = val
class NEEL_DAC_Bus(InstrumentChannel):
"""
This class holds information about a bus containing 8 DAC channels.
Args:
parent (Instrument): NEEL_DAC
name (str): name of the bus
bus_number (int): bus_number (typically 0 ~ 4, max 7)
"""
def __init__(self, parent: Instrument, name:str, bus_number:int, **kwargs) -> None:
super().__init__(parent, name, **kwargs)
self.dac = self._parent
self.bus_number = bus_number
# Add dummy parameter since we get error with snapshot without it.
self.add_parameter('dummy',
label='dummy',
get_cmd = self.get_dummy,
get_parser = int,
)
for channel in range(8):
s = 'c{:d}'.format(channel)
channel_instance = NEEL_DAC_channel(self, s, channel)
self.add_submodule(s, channel_instance)
def get_dummy(self):
return 0
class NEEL_DAC(Instrument):
"""
This is the qcodes driver for NEEL DAC controlled by National Instruments single board RIO 9612.
Args:
name (str): name of the instrument
bitFilePath(str): path to the bit file
address (str): IP address of NI sbrio9612 (can be checked by NI MAX)
LI_frequency (float): lock-in frequency
LI_amplitude (float): lock-in amplitude
LI_channel (int): panel = N // 8, channel = N % 8
LI_status (bool): status of lock-in (On: True, Off: False)
used_buses (List[int]): list of DAC buses to be used
ms2wait (int): wait time between each DAC bit movement
v (dict): dictionary of short-cut-references to NEEL_DAC_CHANNELs via alias-name
FS_divider (Union[float, int]): For fast sequence ramp mode it determines time between each DAC step (ms). (trigger from DIO1/panel 9)
For fast sequence mode it determines time of pulse from DIO1/panel 9.
FS_ramp (bool): ramp mode (True) or not (False)
FS_pulse_len (int): Length of trigger (check minimum trigger length of each instrument, which accept the trigger.)
FS_chan_list (List[int]): List of fast sequence channel (up to 16 channels). Pannel = N // 8, channel = N % 8, Dummy = 255
FS_status (bool): whether fast sequence is running (True) or not (False).
FS_sample_count (int): Length of the fast sequence slot
FS_move_limit (List[float, float]): minimum and maximum for the dac movement for fast ramp and sequence.
init_zero (bool): (True) initialize all DAC channels to zero or (False) keep the current configuration
"""
def __init__(self, name:str,
bitFilePath:str=bit_file,
address:str=ip_address,
LI_frequency:float=23.3,
LI_amplitude:float=0.0,
# LI_channel:int=0,
LI_channel:list=[1,0], # HE
LI_status:bool=False,
used_buses:List[int]=[1,2,4,6],
ms2wait:int=1,
FS_divider:Union[int, float]=3,
FS_ramp:bool=True,
FS_pulse_len:int=100,
FS_chan_list:List[int]=list(range(16)),
FS_status:bool=False,
FS_sample_count:int=10,
FS_move_limit:List[float]=[-0.5, 0.3],
init_zero:bool=False,
**kwargs) -> None:
super().__init__(name, **kwargs)
# Address information
self.bitFilePath = bitFilePath
self.address =address
# Define reference to access FPGA.
self.ref = None
self.openRef()
# lock-in related parameters
self._LI_status = LI_status
self._LI_frequency = LI_frequency
self._LI_amplitude = LI_amplitude
self._LI_channel = LI_channel
# DAC related parameters
self._used_buses = used_buses
self._ms2wait = ms2wait
self.v = dict()
# Fast sequence realted parameters
self._FS_divider = FS_divider
self._FS_ramp = FS_ramp
self._FS_pulse_len = FS_pulse_len
self._FS_chan_list = FS_chan_list
self._FS_status = FS_status
self._FS_sample_count = FS_sample_count
self._FS_move_limit = FS_move_limit
seq = np.zeros((2,10), dtype=float)
seq[:, 0] = [101, 0]
seq[:, 9] = [103, 9]
self._FS_slots = seq
if init_zero:
self.initialise()
self.add_parameter('LI_status',
label='Lock-in status',
get_cmd=self.get_lock_in_status,
set_cmd=self.set_lock_in_status,
initial_value=LI_status,
)
self.add_parameter('LI_frequency',
label='Lock-in frequency',
unit='Hz',
get_cmd=self.get_lock_in_frequency,
set_cmd=self.set_lock_in_frequency,
get_parser=float,
set_parser=float,
post_delay=0.45, # HE: wait after move such that the lock-in-detector can follow
vals=vals.Numbers(0.0, 50000.0),
initial_value=LI_frequency,
)
self.add_parameter('LI_amplitude',
label='Lock-in amplitude',
unit='V',
get_cmd=self.get_lock_in_amplitude,
set_cmd=self.set_lock_in_amplitude,
get_parser=float,
set_parser=float,
post_delay=0.45, # HE: wait after move such that the lock-in-detector can follow
vals=vals.Numbers(0.0, 2.0),
initial_value=LI_amplitude,
)
# self.add_parameter('LI_channel',
# label='Lock-in channel',
# get_cmd=self.get_lock_in_channel,
# set_cmd=self.set_lock_in_channel,
# get_parser=int,
# set_parser=int,
# vals=vals.Ints(0, 63),
# initial_value=LI_channel,
# )
self.add_parameter('LI_channel', # HE
label='Lock-in channel',
get_cmd=self.get_lock_in_channel,
set_cmd=self.set_lock_in_channel,
get_parser=list,
set_parser=list,
vals=vals.Lists(vals.Ints(0,7)),
initial_value=LI_channel,
)
self.add_parameter('used_buses',
label='Used DAC buses',
get_cmd=self.get_used_buses,
set_cmd=self.set_used_buses,
initial_value=used_buses,
)
self.add_parameter('ms2wait',
label='Wait time of DAC bit movement',
unit = 'ms',
get_cmd=self.get_ms2wait,
set_cmd=self.set_ms2wait,
get_parser=int,
set_parser=int,
vals=vals.Ints(0,5),
initial_value=ms2wait,
)
self.add_parameter('FS_divider',
label='Fast sequence divider',
unit = 'ms',
get_cmd = self.get_FS_divider,
set_cmd = self.set_FS_divider,
get_parser=float,
set_parser=float,
vals=vals.Numbers(4.6e-4, 450),
initial_value=FS_divider,
)
self.add_parameter('FS_ramp',
label='Fast sequence ramp mode',
get_cmd = self.get_FS_ramp,
set_cmd = self.set_FS_ramp,
get_parser = bool,
set_parser = bool,
initial_value=FS_ramp,
)
self.add_parameter('FS_pulse_len',
label='Fast sequence pulse length',
get_cmd = self.get_FS_pulse_len,
set_cmd = self.set_FS_pulse_len,
get_parser = int,
set_parser = int,
vals=vals.Ints(100, 10000),
initial_value=FS_pulse_len,
)
self.add_parameter('FS_chan_list',
label='Fast sequence channel list',
get_cmd = self.get_FS_chan_list,
set_cmd = self.set_FS_chan_list,
initial_value=FS_chan_list,
)
self.add_parameter('FS_status',
label='Fast sequence status',
get_cmd = self.get_FS_status,
set_cmd = self.set_FS_status,
get_parser=bool,
set_parser=bool,
initial_value=FS_status,
)
self.add_parameter('FS_sample_count',
label='Fast sequence sample count',
get_cmd = self.get_FS_sample_count,
set_cmd = self.set_FS_sample_count,
get_parser=int,
set_parser=int,
vals=vals.Ints(1, 100000),
initial_value=FS_sample_count,
)
self.add_parameter('FS_move_limit',
label='Fast sequence DAC move limit',
unit = 'V',
get_cmd = self.get_FS_move_limit,
set_cmd = self.set_FS_move_limit,
initial_value=FS_move_limit,
)
self.add_parameter('FS_slots',
label = 'Fast sequence slots',
get_cmd = self.get_FS_slots,
set_cmd = self.set_FS_slots,
snapshot_get = False,
snapshot_value = False,
)
# Initialize used buses
self.set_used_buses(used_buses)
self.set_ms2wait(ms2wait)
# Define Buses
for n in self._used_buses:
if 0 <= n <=7:
s = 'p{:d}'.format(n)
bus = NEEL_DAC_Bus(self, s, n)
self.add_submodule(s, bus)
def get_lock_in_status(self):
return self._LI_status
def set_lock_in_status(self, val: bool):
self._LI_status = val
self.lock_in_send_order(order=3,
inhibate = not val)
def get_lock_in_frequency(self):
return self._LI_frequency
def set_lock_in_frequency(self, val: float):
self._LI_frequency = val
if self._LI_status:
# If lock-in is running, once stop it and restart after change.
self.set_lock_in_status(False)
self.lock_in_send_order(order=0,
frequency = val)
self.set_lock_in_status(True)
else:
self.lock_in_send_order(order=0,
frequency = val)
def get_lock_in_amplitude(self):
return self._LI_amplitude
def set_lock_in_amplitude(self, val: float):
self._LI_amplitude = np.abs(val)
if self._LI_status:
# If lock-in is running, once stop it and restart after change.
self.set_lock_in_status(False)
self.lock_in_send_order(order=2,
amplitude = val)
self.set_lock_in_status(True)
else:
self.lock_in_send_order(order=2,
amplitude = val)
def get_lock_in_channel(self):
return self._LI_channel
# def set_lock_in_channel(self, val: int):
# self._LI_channel = val
# panel = val // 8
# channel = val % 8
# LI_panel_channel = {'panel':panel, 'channel':channel}
# if self._LI_status:
# # If lock-in is running, once stop it and restart after change.
# self.set_lock_in_status(False)
# self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
# self.set_lock_in_status(True)
# else:
# self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
def set_lock_in_channel(self, val: int): #HE
panel = val[0]
channel = val[1]
LI_panel_channel = {'panel':panel, 'channel':channel}
if self._LI_status:
# If lock-in is running, once stop it and restart after change.
self.set_lock_in_status(False)
self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
self.set_lock_in_status(True)
else:
self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
def get_used_buses(self):
return self._used_buses
def set_used_buses(self, val: List[int]):
self._used_buses = val
busses_to_use = [False]*8
for n in val:
if n > 7:
print('Bus{:d} is out of range.'.format(n))
else:
busses_to_use[n] = True
self.DAC_send_order(order=1,
busses_to_use=busses_to_use)
def get_ms2wait(self):
return self._ms2wait
def set_ms2wait(self, val: int):
self._ms2wait = val
self.DAC_send_order(order=2,
delay_between_steps_ms = val)
def get_FS_divider(self):
return self._FS_divider
def set_FS_divider(self, val: Union[int, float]):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_divider = val
self.fastseq_set_orders(order = 1,
divider = ms2FS_divider(val))
def get_FS_ramp(self):
return self._FS_ramp
def set_FS_ramp(self, val: bool):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_ramp = val
if val:
# When ramp mode, unset stop count.
self.fastseq_set_orders(order=3)
else:
# When fast cycle mode ('start'), unset ramp.
self.fastseq_set_orders(order=2)
def get_FS_pulse_len(self):
return self._FS_pulse_len
def set_FS_pulse_len(self, val:int):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_pulse_len = val
self.fastseq_set_orders(order=4,
pulse_length=val)
def get_FS_chan_list(self):
return self._FS_chan_list
def set_FS_chan_list(self, val:List[int]):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_chan_list = val
size = len(val)
# for i in range(16):
for i in range(32): # HE 32
if i < size:
v = val[i]
if 0 <= v < 64:
panel = v // 8
channel = v % 8
self.fastseq_set_fastChannel(fast_chan_number=i,
panel_channel={'panel':panel, 'channel':channel},
is_dummy = False)
else:
# set dummy
self.fastseq_set_fastChannel(fast_chan_number=i,
panel_channel={'panel':0, 'channel':0},
is_dummy = True)
else:
self.fastseq_set_fastChannel(fast_chan_number=i,
panel_channel={'panel':0, 'channel':0},
is_dummy = True)
def get_FS_status(self):
return self._FS_status
def set_FS_status(self, val:bool, sample_count=True):
# Control start and stop of fast sequence.
# When we start the fast sequence, each time we have to set sample count.
# Therefore I include it from the beggining.
if val:
if sample_count:
# Set sample count.
self.FS_sample_count(self.FS_sample_count())
# Start fast sequence
self.fastseq_set_orders(order=6)
else:
# Stop fast sequence
self.fastseq_set_orders(order=0)
self._FS_status = val
def get_FS_sample_count(self):
return self._FS_sample_count
def set_FS_sample_count(self, val:int):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_sample_count = val
if self._FS_ramp:
# Ramp mode
#- For ramp mode we add trigger count +2 (make sure that ADC obtain enough amount of trigger pulse.)
self.fastseq_set_orders(order=5,
sample_count=val+2)
else:
# Fast cycle mode
self.DAC_set_stop_sample_count(sample_count = val)
def get_FS_move_limit(self):
return self._FS_move_limit
def set_FS_move_limit(self, val:List[float]):
self._FS_move_limit = val
def get_FS_slots(self):
return self._FS_slots
def set_FS_slots(self, val:np.ndarray, store_seq2meta=True):
shape = val.shape
# Check shape of the input variable
if (not len(shape) == 2) or (not shape[0]==2):
raise ValueError('Shape of fast sequence array is invalid.')
self.fast_seq_set_slots(val)
if store_seq2meta:
self.FS_slots.metadata['fast_seq'] = [list(val[0,:]), list(val[1,:])]
self._FS_slots = val
def get_DAC_values(self, mode:int=1, fill_modules:bool = False):
"""
Get all the DAC values from FPGA.
Args:
mode (int): 0: returns 8 by 8 array,
1: returns information only for used buses
fill_modules (bool): whether we set obtained values to sub-modules or not
It is useful when we first define the instrument.
"""
dac_values = self.DAC_current_values()
if mode==1:
a = np.zeros((len(self._used_buses), 8), dtype=float)
for i, n in enumerate(self._used_buses):
a[i,:] = dac_values[n,:]
dac_values = a
# Set values to submodules
if fill_modules:
for n in self._used_buses:
panel = getattr(self, 'p{:d}'.format(n))
for c in range(8):
ch = getattr(panel, 'c{:d}'.format(c))
ch.v(dac_values[n,c])
return dac_values
"""-----------------------
Control functions
------------------------"""
def DAC_start_movement(self):
"""
Start DAC movement
"""
self.DAC_send_order(order=0)
def init(self, value:float=0.0):
"""
Initialize all the DAC values in the used buses to "value".
For the procedure once move all the DAC to -0.1 V and come back
to the given "value".
"""
self.move_all_to(-0.01)
self.move_all_to(value)
initialize=init; initialise=init; DAC_init_values=init
"""===================================
FPGA control functions from LabVIEW
==================================="""
def openRef(self):
# Open FPGA reference and return it.
self.ref = Session(bitfile=self.bitFilePath, resource='rio://'+self.address+'/RIO0')
# if not (self.ref.fpga_vi_state==nifpga.FpgaViState.Running):
# # If not run, run.
# self.ref.run()
# perform lock-in-configure
self.lock_in_configure_analysis()
def close(self):
# Close FPGA reference
self.ref.close()
"""---------------------
Lock-in related functions
------------------------"""
def lock_in_configure_analysis(self):
"""
Function to setup FPGA at the beggining.
"""
# Data set to host
self.lock_in_send_analysis(order = {'NULL':0, 'Data_sent_to_host':1, 'dt/tau':2, 'Voltage_range':3}['Data_sent_to_host'],
voltage_range = {'10V':0, '5V':1, '1V':2}['10V'],
dt_over_tau = 0.0,
data_sent_back = {'LI':0, 'average':1}['average'],
)
# dt/tau
self.lock_in_send_analysis(order = {'NULL':0, 'Data_sent_to_host':1, 'dt/tau':2, 'Voltage_range':3}['dt/tau'],
voltage_range = {'10V':0, '5V':1, '1V':2}['10V'],
dt_over_tau = 8.00006091594696044921875000000000E-6,
data_sent_back = {'LI':0, 'average':1}['average'],
)
def lock_in_send_analysis(self,
order = {'NULL':0, 'Data_sent_to_host':1, 'dt/tau':2, 'Voltage_range':3}['Data_sent_to_host'],
voltage_range = {'10V':0, '5V':1, '1V':2}['10V'],
dt_over_tau = 0.0,
data_sent_back = {'LI':0, 'average':1}['average'],
):
"""
Function to perform initial setup of FPGA.
Args:
order (int): selection of operation
votage_range (int): voltage range
dt_over_tau (float): ??
data_sent_back (int): ??
"""
# 1st frame of LabVIEW program
if order == 0:
# NULL
order_number = join_8_8bit264bit(3,0,0,0,0,0,0,0)
elif order == 1:
# Data set to host
order_number = join_8_8bit264bit(3,1,0,0,0,0,0,data_sent_back)
elif order == 2:
# dt/tau
dt_over_tau = dt_over_tau * (2**32) # Convert Fixed point to 32 bit integer
order_number = join_numbers(3,2,16)
order_number = join_numbers(order_number, 0, 32)
order_number = join_numbers(order_number, dt_over_tau, 64)
elif order == 3:
# Voltage range
order_number = join_8_8bit264bit(3,3,0,0,0,0,0,voltage_range)
# 2nd frame of LabVIEW program
order_in = self.ref.registers['order in']
order_in.write(np.uint64(order_number))
orderXmitted = self.ref.registers['order Xmitted']
orderXmitted.write(True)
# 3rd frame of LabVIEW program
time.sleep(0.01)
orderXmitted.write(False)
# 4th frame of LabVIEW program
if order == 2:
# dt/tau
# Wait until move bus gets ready.
move_bus_ready = self.ref.registers['move bus ready'].read()
while move_bus_ready == False:
move_bus_ready = self.ref.registers['move bus ready'].read()
def lock_in_send_order(self,
order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['inhibate'],
frequency = 0.0,
amplitude = 0.0,
inhibate = False,
panel_channel = {'panel':0, 'channel':0},
):
"""
Send order to lock-in sub-system.
"""
if order == 0:
# Frequency (Hz)
f = 25000/frequency
if f < 1:
f = 1
elif f > 4e9:
f = 4e9
f = np.uint32(f)
a,b = split_number(f, size=32)
c,d = split_number(a, size=16)
e,f = split_number(b, size=16)
order_number = join_8_8bit264bit(2,4,0,0,c,d,e,f)
elif order == 1:
# channel
order_number = join_8_8bit264bit(2,1,0,0,0,0,panel_channel['panel'],panel_channel['channel'])
elif order == 2:
# Amplitude
if amplitude < -5:
amplitude = -5
elif amplitude > 5:
amplitude = 5
# a = amplitude/5.0*(2**16)
a = amplitude/10.0*(2**16)
a = np.uint16(a)
b,c = split_number(a, 16)
order_number = join_8_8bit264bit(2,2,0,0,0,0,b,c)
elif order == 3:
# Inhibate
if inhibate:
v = 1
else:
v = 0
order_number = join_8_8bit264bit(2,3,0,0,0,0,0,v)
self.DAC_Xmit_order(order = order_number)
def DAC_lock_in_init(self,
frequency = 0.0,
amplitude = 0.0,
inhibate = True,
panel_channel = {'panel':0, 'channel':0},
):
"""
Initialize lock-in
"""
# Stop lock-in before changing the setup.
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['inhibate'],
frequency = frequency,
amplitude = amplitude,
inhibate = True,
panel_channel = panel_channel,
)
# Set panel and channel
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['channel'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
# Set frequency
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['frequency'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
# Set amplitude
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['amplitude'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
# Start or not
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['inhibate'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
"""===================
DAC related functions
==================="""
def DAC_set_use_buses(self,
busses_to_use = [False]*8,
delay_between_steps_ms = 2,
):
if True in busses_to_use:
# Buses to use
self.DAC_send_order(order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['busses to use'],
busses_to_use = busses_to_use,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
delay_between_steps_ms = delay_between_steps_ms,
)
# delay between each DAC movement
self.DAC_send_order(order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['delay'],
busses_to_use = busses_to_use,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
delay_between_steps_ms = delay_between_steps_ms,
)
def DAC_send_order(self,
order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['busses to use'],
busses_to_use = [False]*8,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
delay_between_steps_ms = 2,
):
"""
This function is used to send an order to DAC.
Security for DAC go to value will be implemented at different location.
"""
if order == 0:
# Start movement
order_number = join_8_8bit264bit(1,2,0,0,0,0,0,0)
elif order == 1:
# buses to use
bus = 0
for i, b in enumerate(busses_to_use):
if b:
bus += 2**i
order_number = join_8_8bit264bit(1,1,0,0,0,0,0,bus)
elif order == 2:
# delay
order_number = join_8_8bit264bit(1,3,0,0,0,0,0,delay_between_steps_ms)
elif order == 3:
# value
value = np.int16(DAC_goto_value/5.0*32768) + 32768
a,b = split_number(value, size=16)
order_number = join_8_8bit264bit(1,4,0,0,panel_channel['panel'],panel_channel['channel'],a,b)
elif order == 4:
# stop
order_number = join_8_8bit264bit(1,5,0,0,0,0,0,0)
self.DAC_Xmit_order(order=order_number)
def DAC_Xmit_order(self,
order=0):
"""
Main program to send an order to FPGA.
Arg:
order: uint64
"""
order_in = self.ref.registers['order in']
order_Xmitted = self.ref.registers['order Xmitted']
order_in.write(order)
order_Xmitted.write(True)
i=0
while order_Xmitted.read()==True:
i+=1
def DAC_set_value(self,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
):
"""
Set goto value of DAC.
Note:
Meanwhile I do not implement safety check here since for QuCoDeS there is another safety chaeck.
"""
self.DAC_send_order(order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['value'],
busses_to_use = [False]*8,
panel_channel = panel_channel,
DAC_goto_value = DAC_goto_value,
delay_between_steps_ms = 2,
)
def DAC_wait_end_of_move(self):
"""
Wait until all the DAC movement finishes.
"""
move_bus_ready = self.ref.registers['move bus ready']
i=0
while move_bus_ready.read()==False:
i += 1
def move(self):
self.DAC_start_movement()
self.DAC_wait_end_of_move()
DAC_move=move
def move_all_to(self, value:float=0.0):
"""
Move all DAC values in the used buses to "value".
"""
for i in self._used_buses:
for j in range(8):
self.DAC_set_value(panel_channel={'panel':i, 'channel':j},
DAC_goto_value=value)
self.move()
def DAC_current_values(self,precision=4):
"""
Get current values of DAC
"""
# Get rid of an eventual unfinished retrieving sequence
get_DAC_value = self.ref.registers['get DAC value']
got_DAC_value = self.ref.registers['got DAC value']
got_DAC_value.write(True)
while get_DAC_value.read()==True:
got_DAC_value.write(True)
# Read values
values = np.zeros((8,8),dtype=float)
DAC_to_retrieve = self.ref.registers['DAC to retrieve']
DAC_data = self.ref.registers['DAC data']
for i in range(64):
DAC_to_retrieve.write(i)
got_DAC_value.write(True)
get_DAC_value.write(True)
j=0
while got_DAC_value.read()==True:
j+=1
data = DAC_data.read()
panel_channel, value = split_number(data, size=32)
panel = int(panel_channel)//8
channel = int(panel_channel) % 8
value = (value - 32768)/32768*5.0 # Convert to real unit
values[panel, channel] = value
#print(panel,channel,value)
got_DAC_value.write(True)
return np.round(values,precision)
values = get_DAC_values
"""========================================
Fast sequence related functions
========================================"""
def fastseq_set_orders(self,
order={'stop':0, 'set divider':1, 'unset ramp mode':2, 'unset stop count':3, 'set pulse length':4, 'set ramp':5, 'start':6}['stop'],
divider = 6661,
pulse_length=0,
sample_count = 0,
):
"""
Program to send an order to fast sequence sub-system.
"""
if order == 0:
# stop
order_number = join_8_8bit264bit(5,1,0,0,0,0,0,0)
elif order == 1:
# set divider
order_number = join_numbers(5,7, final_size=16)
order_number = join_numbers(order_number, 0, final_size=32)
order_number = join_numbers(order_number, divider, final_size=64)
elif order == 2:
# unset ramp mode
order_number = join_8_8bit264bit(6,9,0,0,0,0,0,0)
elif order == 3:
# unset stop count
order_number = join_8_8bit264bit(5,6,0,0,0,0,0,0)
elif order == 4:
# set pulse length
order_number = join_numbers(5, 10, final_size=16)
order_number = join_numbers(order_number, 0, final_size=32)
pulse_length = join_numbers(0, pulse_length, final_size=32)
order_number = join_numbers(order_number, pulse_length, final_size=64)
elif order == 5:
# set ramp
order_number = join_numbers(5, 8, final_size=16)
order_number = join_numbers(order_number, 0, final_size=32)
sample_count = join_numbers(0, sample_count, final_size=32)
order_number = join_numbers(order_number, sample_count, final_size=64)
elif order == 6:
# start
order_number = join_8_8bit264bit(5,2,0,0,0,0,0,0)
self.DAC_Xmit_order(order = order_number)
# def fastseq_set_fastChannel(self,
# fast_chan_number=0,
# panel_channel = {'panel':0, 'channel':0},
# is_dummy = False,
# ):
# """
# Allocate DAC panel_channel to fast sequence channels (up to 16 DACs).
# """
# panel = panel_channel['panel']
# if is_dummy:
# # Dummy channel is 255.
# channel = 255
# else:
# channel = panel_channel['channel']
# # Check whether fast_chan_number is out of range or not.
# if fast_chan_number < 0:
# fast_chan_number = 0
# print('fast channel number is out of range and cast to closest available value.')
# elif fast_chan_number > 15:
# fast_chan_number = 15
# print('fast channel number is out of range and cast to closest available value.')
# order_number = join_8_8bit264bit(5,3,0,0,fast_chan_number,0,panel,channel)
# self.DAC_Xmit_order(order = order_number)
def fastseq_set_fastChannel(self,
fast_chan_number=0,
panel_channel = {'panel':0, 'channel':0},
is_dummy = False,
):
"""
Allocate DAC panel_channel to fast sequence channels (up to 32 DACs). # HE 32
"""
panel = panel_channel['panel']
if is_dummy:
# Dummy channel is 255.
channel = 255
else:
channel = panel_channel['channel']
# Check whether fast_chan_number is out of range or not.
if fast_chan_number < 0:
fast_chan_number = 0
print('fast channel number is out of range and cast to closest available value.')
elif fast_chan_number > 31:
fast_chan_number = 31
print('fast channel number is out of range and cast to closest available value.')
order_number = join_8_8bit264bit(5,3,0,0,fast_chan_number,0,panel,channel)
self.DAC_Xmit_order(order = order_number)
def fastseq_set_slot(self,
choice={'DAC':0, 'timing':1, 'triggers':2, 'jump':3}['DAC'],
slot_number=0,
fast_chan_number=0,
DAC_Offset = 0.0,
time_ms = 0.0,
trigger = {'trig1_ramp':False, 'trig2':False, 'trig3':False, 'trig4':False, 'stop':False},
jump2 = 0,
):
"""
Set fast sequence slot
"""
if choice == 0:
#DAC
if fast_chan_number < 0:
fast_chan_number = 0
# elif fast_chan_number > (2**4-1):
# fast_chan_number = (2**4-1)
# val = fast_chan_number + (choice << 4)
elif fast_chan_number > (2**5-1): # HE 32
fast_chan_number = (2**5-1)
val = fast_chan_number + (choice << 4)
print(val) # HE
# order_number = join_numbers(5,4,final_size=16)
order_number = join_numbers(5,4,final_size=16) # HE 32
val = join_numbers(val, 0, final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
# detailed safe check will be performed elsewhere
# here we only check the value is smaller than |5|.
if DAC_Offset < -5.0:
DAC_Offset = -5.0
print('DAC offset input value is not normal. Please check it.')
elif DAC_Offset > 5.0:
DAC_Offset = 5.0
print('DAC offset input value is not normal. Please check it.')
DAC_Offset = DAC_Offset/5.0 * 32768
if slot_number < 0:
slot_number = 0
elif slot_number > (2**16-1):
slot_number = 65535
val = join_numbers(slot_number, DAC_Offset, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
elif choice == 1:
# Timing
val = (choice << 4)
order_number = join_numbers(5,4,final_size=16)
val = join_numbers(val,0,final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
# Convert time to us
time_ms = np.abs(time_ms*1000.0)
if time_ms < 1:
# Force wait time above 1 us.
time_ms = 1.0
val = np.int64(np.floor(np.log2(time_ms))) - 10
if val < 0:
val = 0
time_ms = np.floor(time_ms * (2.0**(-val)))
if time_ms > ((2**11)-1):
# Time(ms) is casted to 11bit in LabVIEW program
# so I will do the same.
time_ms = ((2**11)-1)
val = time_ms + (val << 11)
val = join_numbers(slot_number, val, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
elif choice == 2:
# triggers
val = (choice << 4)
order_number = join_numbers(5,4,final_size=16)
val = join_numbers(val,0,final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
val = 0
if trigger['trig1_ramp']:
val += 2**0
if trigger['trig2']:
val += 2**1
if trigger['trig3']:
val += 2**2
if trigger['trig4']:
val += 2**3
if trigger['stop']:
val += 2**15
val = join_numbers(slot_number, val, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
elif choice == 3:
# jump
val = (choice << 4)
order_number = join_numbers(5,4,final_size=16)
val = join_numbers(val,0,final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
val = join_numbers(slot_number, jump2, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
self.DAC_Xmit_order(order = order_number)
def fast_seq_set_slots(self,
seq_array: np.ndarray):
"""
This function set slots of fast sequence by the given array.
Args:
seq_array: (2,N) dimensional array
[Limitation for N: 1<= N <= 4096
(0,:) is parameter (0 ~ 15: fast channels, 101: trigger,
102: timing (ms), 103: jump, else: jump to its index)
(1,:) is values. (DAC = value offset,
trigger = bit wise value for each trigger (1~4, stop)
timing = ms to wait, jump = # of slot ot jump)]
"""
# Check array size and cut down if it is too large.
if seq_array.shape[1] > 4096:
seq_array = seq_array[:,0:4096]
N = seq_array.shape[1]
for i in range(N):
tp = int(seq_array[0,i])
value = seq_array[1,i]
# if tp < 16:
if tp < 32:
# DAC shift
dac_move_min = min(self._FS_move_limit[0], self._FS_move_limit[1])
dac_move_max = max(self._FS_move_limit[0], self._FS_move_limit[1])
# Limit check
if value < dac_move_min:
value = dac_move_min
print('Compliance is applied and dac move value is cast to {:f}'.format(dac_move_min))
if value > dac_move_max:
value = dac_move_max
print('Compliance is applied and dac move value is cast to {:f}'.format(dac_move_max))
self.fastseq_set_slot(choice=0,
slot_number=i,
fast_chan_number=tp,
DAC_Offset = value)
elif tp == 101:
# Trigger control
trigger = {'trig1_ramp':False, 'trig2':False, 'trig3':False, 'trig4':False, 'stop':False}
value = int(value)
if not (value & 2**0)==0:
trigger['trig1_ramp']=True
if not (value & 2**1)==0:
trigger['trig2']=True
if not (value & 2**2)==0:
trigger['trig3']=True
if not (value & 2**3)==0:
trigger['trig4']=True
if not (value & 2**4)==0:
trigger['stop']=True
self.fastseq_set_slot(choice=2,
slot_number=i,
trigger = trigger)
elif tp == 102:
# Timing (wait) (ms)
self.fastseq_set_slot(choice=1,
slot_number=i,
time_ms = value)
elif tp == 103:
# Jump to slot ??
self.fastseq_set_slot(choice=3,
slot_number=i,
jump2 = np.uint16(value))
else:
raise ValueError('fast sequence contains undefined type number.')
def DAC_set_stop_sample_count(self,
sample_count=0,
):
order_number = join_numbers(5,5,final_size=16)
order_number = join_numbers(order_number,0,final_size=32)
val = join_numbers(0,sample_count,final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
self.DAC_Xmit_order(order = order_number)
# """ FUNCTIONS TO CONTROL SHORT-CUT REFERENCE TO NEEL_DAC_CHANNEL """
#
# def configure(self, settings = None):
# """
# This function applies a list of settings on various NEEL_DAC_CHANNELS.
#
# settings (list): list of dictionaries for different channels.
# Example:
# settings = [
# { 'channel': [1,0], 'alias': 'right barrier', 'voltage': -0.1, 'range': [-5.0,+0.3], 'label': r'$V_{\rm BR}$'},
# { 'channel': [2,0], 'alias': 'left barrier', 'voltage': -0.2, 'range': [-5.0,+0.3], 'label': r'$V_{\rm BL}$'},
# ...
# ]
# """
# for setting in settings:
# panel = 'p{:d}'.format(setting['channel'][0])
# channel = 'c{:d}'.format(setting['channel'][1])
# self.v[setting['alias']] = self.submodules[panel].submodules[channel].v
# # transform range-attribute for QCoDeS:
# setting['vals'] = vals.Numbers( np.min(setting['range']), np.max(setting['range']) )
# # set voltage:
# self.v[setting['alias']].set(setting['voltage'])
# # set channel attributes:
# for key, item in setting.items():
# try:
# setattr(self.v[setting['alias']], key, item)
# except:
# #print(key,'not found!') # for testing of code
# pass
# def clear_v(self, aliases = None):
if __name__=='__main__':
dac = NEEL_DAC('dac')
#------------------------
# Test DAC movement
#------------------------
# dac.p0.c0.v(-0.0)
# dac.DAC_start_movement()
# dac.DAC_wait_end_of_move()
#
# # Test lock-in
# dac.LI_status(False)
# dac.LI_frequency(20.0)
# dac.LI_amplitude(0.2)
# dac.LI_channel(0)
# dac.LI_status(False)
#------------------------
# Test fast sequence
#------------------------
ramp = True
divider = 6661
sample_count = 403
# Stop fast sequence
dac.FS_status(False)
# Set fast sequence divider
dac.FS_divider(divider)
# set operation mode ('ramp' or 'start')
dac.FS_ramp(ramp)
# Set fast sequence channels
dac.FS_chan_list(list(range(16)))
# Set pulse length
dac.FS_pulse_len(1000)
# Set fast sequence
seq_array = np.zeros((2,sample_count))
seq_array[:,0] = [101,0]
seq_array[1,1:sample_count-1] = np.linspace(0.0, -0.5,num=sample_count-2)
seq_array[:,sample_count-1] = [103, sample_count-1]
dac.FS_slots(seq_array)
# Set sample count
size = seq_array.shape[1]
dac.FS_sample_count(size)
dac.FS_status(True)
# sleep
sleep_time = 4.5e-7*divider*sample_count+5
time.sleep(sleep_time)
dac.FS_status(False)
dac.close()
| [
"logging.getLogger",
"numpy.abs",
"numpy.uint8",
"numpy.log2",
"qcodes.validators.Numbers",
"numpy.int16",
"numpy.floor",
"time.sleep",
"numpy.zeros",
"numpy.uint32",
"numpy.linspace",
"numpy.uint64",
"nifpga.Session",
"numpy.isnan",
"qcodes.validators.Ints",
"numpy.uint16",
"numpy.r... | [((501, 528), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (518, 528), False, 'import logging\n'), ((53997, 54024), 'numpy.zeros', 'np.zeros', (['(2, sample_count)'], {}), '((2, sample_count))\n', (54005, 54024), True, 'import numpy as np\n'), ((54089, 54133), 'numpy.linspace', 'np.linspace', (['(0.0)', '(-0.5)'], {'num': '(sample_count - 2)'}), '(0.0, -0.5, num=sample_count - 2)\n', (54100, 54133), True, 'import numpy as np\n'), ((54400, 54422), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (54410, 54422), False, 'import time\n'), ((1240, 1252), 'numpy.uint32', 'np.uint32', (['b'], {}), '(b)\n', (1249, 1252), True, 'import numpy as np\n'), ((1265, 1277), 'numpy.uint32', 'np.uint32', (['c'], {}), '(c)\n', (1274, 1277), True, 'import numpy as np\n'), ((1809, 1821), 'numpy.uint32', 'np.uint32', (['a'], {}), '(a)\n', (1818, 1821), True, 'import numpy as np\n'), ((1834, 1846), 'numpy.uint32', 'np.uint32', (['b'], {}), '(b)\n', (1843, 1846), True, 'import numpy as np\n'), ((1885, 1897), 'numpy.uint64', 'np.uint64', (['c'], {}), '(c)\n', (1894, 1897), True, 'import numpy as np\n'), ((9412, 9442), 'numpy.zeros', 'np.zeros', (['(2, 10)'], {'dtype': 'float'}), '((2, 10), dtype=float)\n', (9420, 9442), True, 'import numpy as np\n'), ((17334, 17345), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (17340, 17345), True, 'import numpy as np\n'), ((26060, 26137), 'nifpga.Session', 'Session', ([], {'bitfile': 'self.bitFilePath', 'resource': "('rio://' + self.address + '/RIO0')"}), "(bitfile=self.bitFilePath, resource='rio://' + self.address + '/RIO0')\n", (26067, 26137), False, 'from nifpga import Session\n'), ((29179, 29195), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (29189, 29195), False, 'import time\n'), ((38563, 38592), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {'dtype': 'float'}), '((8, 8), dtype=float)\n', (38571, 38592), True, 'import numpy as np\n'), ((39357, 39384), 'numpy.round', 'np.round', (['values', 'precision'], {}), '(values, precision)\n', (39365, 39384), True, 'import numpy as np\n'), ((1311, 1323), 'numpy.uint16', 'np.uint16', (['b'], {}), '(b)\n', (1320, 1323), True, 'import numpy as np\n'), ((1336, 1348), 'numpy.uint16', 'np.uint16', (['c'], {}), '(c)\n', (1345, 1348), True, 'import numpy as np\n'), ((1937, 1949), 'numpy.uint16', 'np.uint16', (['a'], {}), '(a)\n', (1946, 1949), True, 'import numpy as np\n'), ((1962, 1974), 'numpy.uint16', 'np.uint16', (['b'], {}), '(b)\n', (1971, 1974), True, 'import numpy as np\n'), ((2013, 2025), 'numpy.uint32', 'np.uint32', (['c'], {}), '(c)\n', (2022, 2025), True, 'import numpy as np\n'), ((4754, 4767), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (4762, 4767), True, 'import numpy as np\n'), ((29006, 29029), 'numpy.uint64', 'np.uint64', (['order_number'], {}), '(order_number)\n', (29015, 29029), True, 'import numpy as np\n'), ((30209, 30221), 'numpy.uint32', 'np.uint32', (['f'], {}), '(f)\n', (30218, 30221), True, 'import numpy as np\n'), ((1382, 1393), 'numpy.uint8', 'np.uint8', (['b'], {}), '(b)\n', (1390, 1393), True, 'import numpy as np\n'), ((1406, 1417), 'numpy.uint8', 'np.uint8', (['c'], {}), '(c)\n', (1414, 1417), True, 'import numpy as np\n'), ((2065, 2076), 'numpy.uint8', 'np.uint8', (['a'], {}), '(a)\n', (2073, 2076), True, 'import numpy as np\n'), ((2089, 2100), 'numpy.uint8', 'np.uint8', (['b'], {}), '(b)\n', (2097, 2100), True, 'import numpy as np\n'), ((2138, 2150), 'numpy.uint16', 'np.uint16', (['c'], {}), '(c)\n', (2147, 2150), True, 'import numpy as np\n'), ((4497, 4521), 'qcodes.validators.Numbers', 'vals.Numbers', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (4509, 4521), True, 'from qcodes import Instrument, validators as vals\n'), ((10391, 10417), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.0)', '(50000.0)'], {}), '(0.0, 50000.0)\n', (10403, 10417), True, 'from qcodes import Instrument, validators as vals\n'), ((11011, 11033), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.0)', '(2.0)'], {}), '(0.0, 2.0)\n', (11023, 11033), True, 'from qcodes import Instrument, validators as vals\n'), ((12702, 12717), 'qcodes.validators.Ints', 'vals.Ints', (['(0)', '(5)'], {}), '(0, 5)\n', (12711, 12717), True, 'from qcodes import Instrument, validators as vals\n'), ((13183, 13209), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.00046)', '(450)'], {}), '(0.00046, 450)\n', (13195, 13209), True, 'from qcodes import Instrument, validators as vals\n'), ((14037, 14058), 'qcodes.validators.Ints', 'vals.Ints', (['(100)', '(10000)'], {}), '(100, 10000)\n', (14046, 14058), True, 'from qcodes import Instrument, validators as vals\n'), ((15214, 15234), 'qcodes.validators.Ints', 'vals.Ints', (['(1)', '(100000)'], {}), '(1, 100000)\n', (15223, 15234), True, 'from qcodes import Instrument, validators as vals\n'), ((46044, 46068), 'numpy.abs', 'np.abs', (['(time_ms * 1000.0)'], {}), '(time_ms * 1000.0)\n', (46050, 46068), True, 'import numpy as np\n'), ((46301, 46332), 'numpy.floor', 'np.floor', (['(time_ms * 2.0 ** -val)'], {}), '(time_ms * 2.0 ** -val)\n', (46309, 46332), True, 'import numpy as np\n'), ((11930, 11945), 'qcodes.validators.Ints', 'vals.Ints', (['(0)', '(7)'], {}), '(0, 7)\n', (11939, 11945), True, 'from qcodes import Instrument, validators as vals\n'), ((30833, 30845), 'numpy.uint16', 'np.uint16', (['a'], {}), '(a)\n', (30842, 30845), True, 'import numpy as np\n'), ((35816, 35854), 'numpy.int16', 'np.int16', (['(DAC_goto_value / 5.0 * 32768)'], {}), '(DAC_goto_value / 5.0 * 32768)\n', (35824, 35854), True, 'import numpy as np\n'), ((46207, 46223), 'numpy.log2', 'np.log2', (['time_ms'], {}), '(time_ms)\n', (46214, 46223), True, 'import numpy as np\n'), ((51024, 51040), 'numpy.uint16', 'np.uint16', (['value'], {}), '(value)\n', (51033, 51040), True, 'import numpy as np\n')] |
########## Script 1 ###################
import sys
from RK_IO_model import RK_IO_methods
from Generalized_RK_Framework import generalized_RK_framework
import pdb #for debugging
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
from pyomo.opt import SolverStatus, TerminationCondition
import pyomo.mpec as pyompec #for the complementarity
import math
from scipy.io import savemat, loadmat
import pandas
import time
import matplotlib.pyplot as plt
import pickle
import networkx as nx
################### Step 1: Generating Data ######################
##################### Sioux Falls ###################################
##### Thanks to the Github Jupyter code that came with this
##### and the pandas documentation (plus this site https://www.geeksforgeeks.org/indexing-and-selecting-data-with-pandas/)
sioux_falls_network = pandas.read_csv("SiouxFalls_flow.tntp",\
sep="\t")
incidence_matrix = np.zeros((24,76))
for i in range(0,76):
end = sioux_falls_network.loc[i,"To "]
start = sioux_falls_network.loc[i,"From "]
incidence_matrix[end-1,i] = 1
incidence_matrix[start-1,i] = -1
###################################################################################
################### Step 2: Setting up Object and Saving Matlab #############################
name_of_grid = "Sioux_Falls"
GRKF_Object = generalized_RK_framework(num_nodes=24,num_arcs=76,num_players=int(sys.argv[2]),num_trials=10,\
node_arc_incidence_matrix=incidence_matrix,\
name_of_graph=name_of_grid)
alpha_flag = int(sys.argv[1])
if alpha_flag == 1:
alpha = float(sys.argv[2])*0.5
elif alpha_flag == 2:
alpha = float(sys.argv[2])
GRKF_Object.saving_for_matlab_files_randomized_costs(lowerbound_c=1,upperbound_c=5,\
lowerbound_chat=5,upperbound_chat=20,\
alpha=alpha,if_different_costs=0)
################### Step 3: Saving the Object #################################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
name_of_file = "class_object_1"
test = open(name_of_file,'wb')
pickle.dump(GRKF_Object,test)
test.close()
| [
"numpy.zeros",
"pickle.dump",
"pandas.read_csv"
] | [((885, 934), 'pandas.read_csv', 'pandas.read_csv', (['"""SiouxFalls_flow.tntp"""'], {'sep': '"""\t"""'}), "('SiouxFalls_flow.tntp', sep='\\t')\n", (900, 934), False, 'import pandas\n'), ((994, 1012), 'numpy.zeros', 'np.zeros', (['(24, 76)'], {}), '((24, 76))\n', (1002, 1012), True, 'import numpy as np\n'), ((2290, 2320), 'pickle.dump', 'pickle.dump', (['GRKF_Object', 'test'], {}), '(GRKF_Object, test)\n', (2301, 2320), False, 'import pickle\n')] |
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import time
import numpy as np
import argparse
import json
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
sys.path.append(os.path.abspath('../'))
from keras.models import Sequential, Model
from keras.layers import Layer, Dense, Activation, LSTM, Input, Lambda, BatchNormalization, LayerNormalization, Conv1D, Bidirectional
from keras import activations
import keras.backend as K
import tensorflow as tf
from loaders.feature_generator import feature_generator
from utils.mat_helpers import *
from algorithms.audio_processing import *
from utils.keras_helpers import *
from ops.complex_ops import *
from utils.matplotlib_helpers import *
from modules.beamforming_td import beamforming
from modules.identification_td import identification
np.set_printoptions(precision=3, threshold=3, edgeitems=3)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class bssd(object):
def __init__(self, config, set='train'):
self.config = config
self.fgen = feature_generator(config, set)
self.nsrc = config['nsrc'] # number of concurrent speakers
self.filename = os.path.basename(__file__)
self.name = self.filename[:-3] + '_' + config['rir_type']
self.creation_date = os.path.getmtime(self.filename)
self.weights_file = self.config['weights_path'] + self.name + '.h5'
self.predictions_file = self.config['predictions_path'] + self.name + '.mat'
self.logger = Logger(self.name)
self.samples = self.fgen.samples # number of samples per utterance
self.nmic = self.fgen.nmic # number of microphones
self.ndoa = self.fgen.ndoa # number of DOA vectors on the sphere
self.nbin = 500 # latent space H
self.wlen = 200 # convolution kernel filter length
self.shift = self.wlen//4 # convolution stride
self.ndim = 100 # embedding dimension E
self.beamforming = beamforming(self.fgen)
self.identification = identification(self.fgen)
self.create_model()
self.si_sdr = []
self.eer = []
self.epoch = 0
data = load_numpy_from_mat(self.predictions_file)
if data is not None:
if 'epoch' in data.keys():
self.epoch = data['epoch']
self.si_sdr = data['si_sdr']
self.eer = data['eer']
#---------------------------------------------------------
def create_model(self):
print('*** creating model: %s' % self.name)
Z = Input(shape=(self.samples, self.nmic), dtype=tf.float32) # shape = (nbatch, nsamples, nmic)
R = Input(shape=(self.samples,), dtype=tf.float32) # shape = (nbatch, nsamples)
pid = Input(shape=(1,), dtype=tf.int32) # shape = (nbatch,)
sid = Input(shape=(1,), dtype=tf.int32) # shape = (nbatch, 1)
[Py, Y, cost_bf] = self.beamforming.model([Z, R, pid])
[E, cost_id] = self.identification.model([Py, sid])
# compile model
self.model = Model(inputs=[Z, R, pid, sid], outputs=[Y, E])
self.model.add_loss(cost_bf + 0.01*cost_id)
self.model.compile(loss=None, optimizer='adam')
print(self.model.summary())
try:
self.model.load_weights(self.weights_file)
except:
print('error loading weights file: %s' % self.weights_file)
#---------------------------------------------------------
def save_weights(self):
self.model.save_weights(self.weights_file)
return
#---------------------------------------------------------
def train(self):
print('train the model')
while (self.epoch<self.config['epochs']) and self.check_date():
sid0 = self.fgen.generate_triplet_indices(speakers=20, utterances_per_speaker=3)
z, r, sid, pid = self.fgen.generate_multichannel_mixtures(nsrc=self.nsrc, sid=sid0)
self.model.fit([z, r, pid[:,0], sid[:,0]], None, batch_size=len(sid0), epochs=1, verbose=0, shuffle=False, callbacks=[self.logger])
self.epoch += 1
if (self.epoch%100)==0:
self.save_weights()
self.validate()
#---------------------------------------------------------
def validate(self):
sid = self.fgen.generate_triplet_indices(speakers=self.fgen.nspk, utterances_per_speaker=3)
z, r, sid, pid = self.fgen.generate_multichannel_mixtures(nsrc=self.nsrc, sid=sid)
y, E = self.model.predict([z, r, pid[:,0], sid[:,0]], batch_size=50)
si_sdr = self.beamforming.si_sdr(r, y)
far, frr, eer = self.identification.calc_eer(E, sid[:,0])
print('SI-SDR:', si_sdr)
print('EER:', eer)
self.si_sdr = np.append(self.si_sdr, si_sdr)
self.eer = np.append(self.eer, eer)
data = {
'z': z[0,:,0],
'r': r[0,:],
'y': y[0,:],
'E': E,
'pid': pid,
'sid': sid,
'far': far,
'frr': frr,
'si_sdr': self.si_sdr,
'eer': self.eer,
'epoch': self.epoch,
}
save_numpy_to_mat(self.predictions_file, data)
#---------------------------------------------------------
def plot(self):
z, r, sid, pid = self.fgen.generate_multichannel_mixtures(nsrc=self.nsrc)
data = []
z0 = z[0,:,0]/np.amax(np.abs(z[0,:,0]))
data.append( 20*np.log10(np.abs(mstft(z0))) )
for c in range(self.nsrc):
y, E = self.model.predict([z, r, pid[:,c], sid[:,c]])
y0 = y[0,:]/np.amax(np.abs(y[0,:]))
data.append( 20*np.log10(np.abs(mstft(y0))) )
legend = ['mixture z(t)', 'extracted speaker y1(t)', 'extracted speaker y2(t)', 'extracted speaker y3(t)', 'extracted speaker y4(t)']
filename = self.config['predictions_path'] + self.name + '_spectrogram.png'
draw_subpcolor(data, legend, filename)
#---------------------------------------------------------
def check_date(self):
if (self.creation_date == os.path.getmtime(self.filename)):
return True
else:
return False
#---------------------------------------------------------
#---------------------------------------------------------
if __name__ == "__main__":
# parse command line args
parser = argparse.ArgumentParser(description='speaker separation')
parser.add_argument('--config_file', help='name of json configuration file', default='shoebox_c2.json')
parser.add_argument('mode', help='mode: [train, valid, plot]', nargs='?', choices=('train', 'valid', 'plot'), default='train')
args = parser.parse_args()
# load config file
try:
print('*** loading config file: %s' % args.config_file )
with open(args.config_file, 'r') as f:
config = json.load(f)
except:
print('*** could not load config file: %s' % args.config_file)
quit(0)
if args.mode == 'train':
bssd = bssd(config)
bssd.train()
if args.mode == 'valid':
bssd = bssd(config)
bssd.validate()
if args.mode == 'plot':
bssd = bssd(config)
bssd.plot()
| [
"numpy.abs",
"argparse.ArgumentParser",
"tensorflow.compat.v1.logging.set_verbosity",
"loaders.feature_generator.feature_generator",
"numpy.append",
"json.load",
"modules.identification_td.identification",
"keras.layers.Input",
"os.path.basename",
"modules.beamforming_td.beamforming",
"keras.mod... | [((802, 860), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'threshold': '(3)', 'edgeitems': '(3)'}), '(precision=3, threshold=3, edgeitems=3)\n', (821, 860), True, 'import numpy as np\n'), ((861, 923), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (895, 923), True, 'import tensorflow as tf\n'), ((185, 207), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (200, 207), False, 'import os\n'), ((6872, 6929), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""speaker separation"""'}), "(description='speaker separation')\n", (6895, 6929), False, 'import argparse\n'), ((1195, 1225), 'loaders.feature_generator.feature_generator', 'feature_generator', (['config', 'set'], {}), '(config, set)\n', (1212, 1225), False, 'from loaders.feature_generator import feature_generator\n'), ((1339, 1365), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1355, 1365), False, 'import os\n'), ((1461, 1492), 'os.path.getmtime', 'os.path.getmtime', (['self.filename'], {}), '(self.filename)\n', (1477, 1492), False, 'import os\n'), ((2309, 2331), 'modules.beamforming_td.beamforming', 'beamforming', (['self.fgen'], {}), '(self.fgen)\n', (2320, 2331), False, 'from modules.beamforming_td import beamforming\n'), ((2362, 2387), 'modules.identification_td.identification', 'identification', (['self.fgen'], {}), '(self.fgen)\n', (2376, 2387), False, 'from modules.identification_td import identification\n'), ((2900, 2956), 'keras.layers.Input', 'Input', ([], {'shape': '(self.samples, self.nmic)', 'dtype': 'tf.float32'}), '(shape=(self.samples, self.nmic), dtype=tf.float32)\n', (2905, 2956), False, 'from keras.layers import Layer, Dense, Activation, LSTM, Input, Lambda, BatchNormalization, LayerNormalization, Conv1D, Bidirectional\n'), ((3019, 3065), 'keras.layers.Input', 'Input', ([], {'shape': '(self.samples,)', 'dtype': 'tf.float32'}), '(shape=(self.samples,), dtype=tf.float32)\n', (3024, 3065), False, 'from keras.layers import Layer, Dense, Activation, LSTM, Input, Lambda, BatchNormalization, LayerNormalization, Conv1D, Bidirectional\n'), ((3134, 3167), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': 'tf.int32'}), '(shape=(1,), dtype=tf.int32)\n', (3139, 3167), False, 'from keras.layers import Layer, Dense, Activation, LSTM, Input, Lambda, BatchNormalization, LayerNormalization, Conv1D, Bidirectional\n'), ((3238, 3271), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': 'tf.int32'}), '(shape=(1,), dtype=tf.int32)\n', (3243, 3271), False, 'from keras.layers import Layer, Dense, Activation, LSTM, Input, Lambda, BatchNormalization, LayerNormalization, Conv1D, Bidirectional\n'), ((3500, 3546), 'keras.models.Model', 'Model', ([], {'inputs': '[Z, R, pid, sid]', 'outputs': '[Y, E]'}), '(inputs=[Z, R, pid, sid], outputs=[Y, E])\n', (3505, 3546), False, 'from keras.models import Sequential, Model\n'), ((5225, 5255), 'numpy.append', 'np.append', (['self.si_sdr', 'si_sdr'], {}), '(self.si_sdr, si_sdr)\n', (5234, 5255), True, 'import numpy as np\n'), ((5275, 5299), 'numpy.append', 'np.append', (['self.eer', 'eer'], {}), '(self.eer, eer)\n', (5284, 5299), True, 'import numpy as np\n'), ((6580, 6611), 'os.path.getmtime', 'os.path.getmtime', (['self.filename'], {}), '(self.filename)\n', (6596, 6611), False, 'import os\n'), ((7367, 7379), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7376, 7379), False, 'import json\n'), ((5899, 5917), 'numpy.abs', 'np.abs', (['z[0, :, 0]'], {}), '(z[0, :, 0])\n', (5905, 5917), True, 'import numpy as np\n'), ((6105, 6120), 'numpy.abs', 'np.abs', (['y[0, :]'], {}), '(y[0, :])\n', (6111, 6120), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from image_processing.contour import create_contour_from_points
from image_processing.scale import scale_point
class ScalePointTestCase(unittest.TestCase):
def test_yolo(self):
roi_points = np.array([[30,88], [118, 88], [118, 20], [30, 20]])
roi_contours = create_contour_from_points(roi_points)
new_contour = [scale_point(300, 300, 330, 330, x, y) for [[x, y]] in roi_contours]
print(roi_contours)
print(new_contour)
| [
"image_processing.scale.scale_point",
"numpy.array",
"image_processing.contour.create_contour_from_points"
] | [((240, 292), 'numpy.array', 'np.array', (['[[30, 88], [118, 88], [118, 20], [30, 20]]'], {}), '([[30, 88], [118, 88], [118, 20], [30, 20]])\n', (248, 292), True, 'import numpy as np\n'), ((315, 353), 'image_processing.contour.create_contour_from_points', 'create_contour_from_points', (['roi_points'], {}), '(roi_points)\n', (341, 353), False, 'from image_processing.contour import create_contour_from_points\n'), ((378, 415), 'image_processing.scale.scale_point', 'scale_point', (['(300)', '(300)', '(330)', '(330)', 'x', 'y'], {}), '(300, 300, 330, 330, x, y)\n', (389, 415), False, 'from image_processing.scale import scale_point\n')] |
import numpy as np
from opendp.smartnoise_t.sql.privacy import Privacy
class Odometer:
"""
Implements k-folds homogeneous composition from Kairouz, et al
Theorem 3.4
https://arxiv.org/pdf/1311.0776.pdf
"""
def __init__(self, privacy: Privacy):
self.k = 0
self.privacy = privacy
if not self.privacy.delta:
self.privacy.delta = 0.0
self.tol = self.privacy.delta / 2
def spend(self, k=1):
self.k += k
def reset(self):
self.k = 0
@property
def spent(self):
epsilon = self.privacy.epsilon
delta = self.privacy.delta
tol = self.tol
if self.k == 0:
return (0.0, 0.0)
basic = self.k * epsilon
optimal_left_side = ((np.exp(epsilon) - 1) * epsilon * self.k)/(np.exp(epsilon) + 1)
optimal_a = optimal_left_side + epsilon * np.sqrt(2 * self.k * np.log(epsilon + (np.sqrt(self.k*epsilon*epsilon)/tol)))
optimal_b = optimal_left_side + epsilon * np.sqrt(2 * self.k * (1/tol))
delta = 1 - (1 - delta) ** self.k
delta = delta * (1 - delta) + self.tol
return tuple([min(basic, optimal_a, optimal_b), delta])
class OdometerHeterogeneous:
"""
Implements k-folds heterogeneous composition from Kairouz, et al
Theorem 3.5
https://arxiv.org/pdf/1311.0776.pdf
"""
def __init__(self, privacy: Privacy = None):
self.steps = []
self.privacy = privacy
self.tol = None
if privacy:
if not self.privacy.delta:
self.privacy.delta = 0.0
self.tol = self.privacy.delta / 2
def spend(self, privacy: Privacy = None):
if privacy:
if not self.tol:
self.tol = privacy.delta / 2
if self.tol > privacy.delta:
self.tol = privacy.delta
self.steps.append((privacy.epsilon, privacy.delta))
elif self.privacy:
self.steps.append((self.privacy.epsilon, self.privacy.delta))
else:
raise ValueError("No privacy information passed in")
def reset(self):
self.steps = []
@property
def k(self):
return len(self.steps)
@property
def spent(self):
k = len(self.steps)
basic = np.sum([eps for eps, _ in self.steps])
optimal_left_side = np.sum([((np.exp(eps) - 1) * eps) / ((np.exp(eps) + 1)) for eps, _ in self.steps])
sq = np.sum([eps * eps for eps, _ in self.steps])
sqsq = np.sum([2 * eps * eps for eps, _ in self.steps])
optimal_a = optimal_left_side + np.sqrt(sqsq * np.log(np.exp(1) + (np.sqrt(sq)/self.tol)))
optimal_b = optimal_left_side + np.sqrt(sqsq * np.log(1/self.tol))
delta = 1 - (1 - self.tol) * np.prod([(1 - delta) for _, delta in self.steps])
return tuple([min(basic, optimal_a, optimal_b), delta])
| [
"numpy.prod",
"numpy.sqrt",
"numpy.log",
"numpy.exp",
"numpy.sum"
] | [((2291, 2329), 'numpy.sum', 'np.sum', (['[eps for eps, _ in self.steps]'], {}), '([eps for eps, _ in self.steps])\n', (2297, 2329), True, 'import numpy as np\n'), ((2454, 2500), 'numpy.sum', 'np.sum', (['[(eps * eps) for eps, _ in self.steps]'], {}), '([(eps * eps) for eps, _ in self.steps])\n', (2460, 2500), True, 'import numpy as np\n'), ((2514, 2564), 'numpy.sum', 'np.sum', (['[(2 * eps * eps) for eps, _ in self.steps]'], {}), '([(2 * eps * eps) for eps, _ in self.steps])\n', (2520, 2564), True, 'import numpy as np\n'), ((812, 827), 'numpy.exp', 'np.exp', (['epsilon'], {}), '(epsilon)\n', (818, 827), True, 'import numpy as np\n'), ((1011, 1042), 'numpy.sqrt', 'np.sqrt', (['(2 * self.k * (1 / tol))'], {}), '(2 * self.k * (1 / tol))\n', (1018, 1042), True, 'import numpy as np\n'), ((2774, 2823), 'numpy.prod', 'np.prod', (['[(1 - delta) for _, delta in self.steps]'], {}), '([(1 - delta) for _, delta in self.steps])\n', (2781, 2823), True, 'import numpy as np\n'), ((2717, 2737), 'numpy.log', 'np.log', (['(1 / self.tol)'], {}), '(1 / self.tol)\n', (2723, 2737), True, 'import numpy as np\n'), ((770, 785), 'numpy.exp', 'np.exp', (['epsilon'], {}), '(epsilon)\n', (776, 785), True, 'import numpy as np\n'), ((2396, 2407), 'numpy.exp', 'np.exp', (['eps'], {}), '(eps)\n', (2402, 2407), True, 'import numpy as np\n'), ((2368, 2379), 'numpy.exp', 'np.exp', (['eps'], {}), '(eps)\n', (2374, 2379), True, 'import numpy as np\n'), ((2625, 2634), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (2631, 2634), True, 'import numpy as np\n'), ((2638, 2649), 'numpy.sqrt', 'np.sqrt', (['sq'], {}), '(sq)\n', (2645, 2649), True, 'import numpy as np\n'), ((922, 957), 'numpy.sqrt', 'np.sqrt', (['(self.k * epsilon * epsilon)'], {}), '(self.k * epsilon * epsilon)\n', (929, 957), True, 'import numpy as np\n')] |
import numpy as np
import math
from matplotlib import pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 共轭梯度法
def cg(A, b, x, r, d, epsilon):
while np.linalg.norm(r) >= epsilon:
temp = np.linalg.norm(r) ** 2
alpha = np.dot(d.T, A)
alpha = np.dot(alpha, d)
alpha = temp / alpha
x += alpha * d
r = b - np.dot(A, x)
beta = np.linalg.norm(r) ** 2 / temp
d = r + beta * d
# xi,yi是给定数据,p是拟合多项式的次数
def my_polyfit(xi, yi, p):
# 构造法方程组
m = len(xi)
n = p + 1
err = 0
G = np.zeros((n, n))
for i in range(n):
for j in range(n):
for k in range(m):
G[i][j] += pow(xi[k], i + j)
y = np.zeros(n)
for i in range(n):
for k in range(m):
y[i] += pow(xi[k], i) * yi[k]
# 采用共轭梯度法求解法方程组
c0 = np.zeros(n)
c0 = c0.reshape(-1, 1)
c = c0
y = y.reshape(-1, 1)
r0 = y - np.dot(G, c0)
r = r0
d = r0
cg(G, y, c, r, d, 1e-8)
# 输出拟合多项式的各项系数
print('拟合多项式的各项系数为:')
for i in range(len(c)):
print('c', i, '= ', c[i], sep='')
# 计算拟合多项式的误差
for i in range(m):
temp = 0
for j in range(n):
temp += c[j] * pow(xi[i], j)
err += pow(temp - yi[i], 2)
err = math.sqrt(err)
print('拟合多项式的误差E=', err)
# 作出拟合多项式的曲线
xt = np.linspace(xi[0], xi[-1], len(xi) * 20)
yt = 0
for i in range(len(c)):
yt += pow(xt, i) * c[i]
plt.title("最小二乘拟合四次多项式曲线图")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(xi, yi, '*')
plt.plot(xt, yt)
plt.show()
if __name__ == "__main__":
# 拟合数据
xi = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
yi = [5.1234, 5.3057, 5.5687, 5.9375, 6.4370, 7.0978, 7.9493, 9.0253, 10.3627]
# 调用最小二乘拟合多项式函数,参数为数据点xi,yi和拟合的多项式次数
my_polyfit(xi, yi, 3)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.norm",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((637, 653), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (645, 653), True, 'import numpy as np\n'), ((795, 806), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (803, 806), True, 'import numpy as np\n'), ((935, 946), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (943, 946), True, 'import numpy as np\n'), ((1399, 1413), 'math.sqrt', 'math.sqrt', (['err'], {}), '(err)\n', (1408, 1413), False, 'import math\n'), ((1594, 1621), 'matplotlib.pyplot.title', 'plt.title', (['"""最小二乘拟合四次多项式曲线图"""'], {}), "('最小二乘拟合四次多项式曲线图')\n", (1603, 1621), True, 'from matplotlib import pyplot as plt\n'), ((1627, 1642), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1637, 1642), True, 'from matplotlib import pyplot as plt\n'), ((1648, 1663), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1658, 1663), True, 'from matplotlib import pyplot as plt\n'), ((1669, 1690), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'yi', '"""*"""'], {}), "(xi, yi, '*')\n", (1677, 1690), True, 'from matplotlib import pyplot as plt\n'), ((1696, 1712), 'matplotlib.pyplot.plot', 'plt.plot', (['xt', 'yt'], {}), '(xt, yt)\n', (1704, 1712), True, 'from matplotlib import pyplot as plt\n'), ((1718, 1728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1726, 1728), True, 'from matplotlib import pyplot as plt\n'), ((219, 236), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (233, 236), True, 'import numpy as np\n'), ((305, 319), 'numpy.dot', 'np.dot', (['d.T', 'A'], {}), '(d.T, A)\n', (311, 319), True, 'import numpy as np\n'), ((337, 353), 'numpy.dot', 'np.dot', (['alpha', 'd'], {}), '(alpha, d)\n', (343, 353), True, 'import numpy as np\n'), ((1029, 1042), 'numpy.dot', 'np.dot', (['G', 'c0'], {}), '(G, c0)\n', (1035, 1042), True, 'import numpy as np\n'), ((265, 282), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (279, 282), True, 'import numpy as np\n'), ((425, 437), 'numpy.dot', 'np.dot', (['A', 'x'], {}), '(A, x)\n', (431, 437), True, 'import numpy as np\n'), ((454, 471), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (468, 471), True, 'import numpy as np\n')] |
import numpy as np
class Activator(object):
def forward(self, z):
pass
def backward(self, z, a, delta):
pass
class Identity(Activator):
def forward(self, z):
return z
def backward(self, z, a, delta):
return delta, a
class Sigmoid(Activator):
def forward(self, z):
return 1.0 / (1.0 + np.exp(-z))
def backward(self, z, a, delta):
da = np.multiply(a, 1 - a)
dz = np.multiply(delta, da)
return dz, da
class Tanh(Activator):
def forward(self, z):
return 2.0 / (1.0 + np.exp(-2 * z)) - 1
def backward(self, z, a, delta):
da = 1 - np.multiply(a, a)
dz = np.multiply(delta, da)
return dz, da
class Relu(Activator):
def forward(self, z):
a = np.maximum(z, 0)
return a
def backward(self, z, a, delta):
da = np.zeros(z.shape)
da[z > 0] = 1
dz = da * delta
return dz, da
class BenIdentity(Activator):
def forward(self, z):
# (sqrt(z * z + 1) -1) / 2 + z
p1 = np.multiply(z, z)
p2 = np.sqrt(p1 + 1)
a = (p2 - 1) / 2 + z
return a
def backward(self, z, a, delta):
da = z / (2 * np.sqrt(z ** 2 + 1)) + 1
dz = np.multiply(da, delta)
return dz, da
class Elu(Activator):
def __init__(self, alpha):
self.alpha = alpha
def forward(self, z):
return np.array([x if x > 0 else self.alpha * (np.exp(x) - 1) for x in z])
def backward(self, z, a, delta):
da = np.array([1 if x > 0 else self.alpha * np.exp(x) for x in a])
dz = np.multiply(delta, da)
return dz, da
class LeakyRelu(Activator):
def __init__(self, alpha):
self.alpha = alpha
def forward(self, z):
return np.array([x if x > 0 else self.alpha * x for x in z])
def backward(self, z, a, delta):
da = np.array([1 if x > 0 else self.alpha for x in a])
dz = np.multiply(delta, da)
return dz, da
class SoftPlus(Activator):
def forward(self, z):
a = np.log(1 + np.exp(z))
return a
def backward(self, z, a, delta):
p = np.exp(z)
da = p / (1 + p)
dz = np.multiply(delta, da)
return dz, da
class Step(Activator):
def __init__(self, threshold):
self.threshold = threshold
def forward(self, z):
a = np.array([1 if x > self.threshold else 0 for x in z])
return a
def backward(self, z, a, delta):
da = np.zeros(a.shape)
dz = da
return dz, da
| [
"numpy.multiply",
"numpy.sqrt",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.maximum"
] | [((439, 460), 'numpy.multiply', 'np.multiply', (['a', '(1 - a)'], {}), '(a, 1 - a)\n', (450, 460), True, 'import numpy as np\n'), ((475, 497), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (486, 497), True, 'import numpy as np\n'), ((715, 737), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (726, 737), True, 'import numpy as np\n'), ((829, 845), 'numpy.maximum', 'np.maximum', (['z', '(0)'], {}), '(z, 0)\n', (839, 845), True, 'import numpy as np\n'), ((918, 935), 'numpy.zeros', 'np.zeros', (['z.shape'], {}), '(z.shape)\n', (926, 935), True, 'import numpy as np\n'), ((1123, 1140), 'numpy.multiply', 'np.multiply', (['z', 'z'], {}), '(z, z)\n', (1134, 1140), True, 'import numpy as np\n'), ((1155, 1170), 'numpy.sqrt', 'np.sqrt', (['(p1 + 1)'], {}), '(p1 + 1)\n', (1162, 1170), True, 'import numpy as np\n'), ((1321, 1343), 'numpy.multiply', 'np.multiply', (['da', 'delta'], {}), '(da, delta)\n', (1332, 1343), True, 'import numpy as np\n'), ((1697, 1719), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (1708, 1719), True, 'import numpy as np\n'), ((1881, 1936), 'numpy.array', 'np.array', (['[(x if x > 0 else self.alpha * x) for x in z]'], {}), '([(x if x > 0 else self.alpha * x) for x in z])\n', (1889, 1936), True, 'import numpy as np\n'), ((1989, 2040), 'numpy.array', 'np.array', (['[(1 if x > 0 else self.alpha) for x in a]'], {}), '([(1 if x > 0 else self.alpha) for x in a])\n', (1997, 2040), True, 'import numpy as np\n'), ((2053, 2075), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (2064, 2075), True, 'import numpy as np\n'), ((2264, 2273), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (2270, 2273), True, 'import numpy as np\n'), ((2314, 2336), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (2325, 2336), True, 'import numpy as np\n'), ((2502, 2557), 'numpy.array', 'np.array', (['[(1 if x > self.threshold else 0) for x in z]'], {}), '([(1 if x > self.threshold else 0) for x in z])\n', (2510, 2557), True, 'import numpy as np\n'), ((2628, 2645), 'numpy.zeros', 'np.zeros', (['a.shape'], {}), '(a.shape)\n', (2636, 2645), True, 'import numpy as np\n'), ((683, 700), 'numpy.multiply', 'np.multiply', (['a', 'a'], {}), '(a, a)\n', (694, 700), True, 'import numpy as np\n'), ((373, 383), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (379, 383), True, 'import numpy as np\n'), ((2182, 2191), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (2188, 2191), True, 'import numpy as np\n'), ((605, 619), 'numpy.exp', 'np.exp', (['(-2 * z)'], {}), '(-2 * z)\n', (611, 619), True, 'import numpy as np\n'), ((1282, 1301), 'numpy.sqrt', 'np.sqrt', (['(z ** 2 + 1)'], {}), '(z ** 2 + 1)\n', (1289, 1301), True, 'import numpy as np\n'), ((1660, 1669), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1666, 1669), True, 'import numpy as np\n'), ((1539, 1548), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1545, 1548), True, 'import numpy as np\n')] |
import influxdb
import pandas as pd
import numpy as np
import kubernetes
import os
import math
import signal
import socket
import multiprocessing
import datetime as dt
from utils import Timer
import scipy.optimize as opt
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingRegressor
import time
from sklearn.preprocessing import MinMaxScaler
import keras
from keras import optimizers
from keras.layers import LSTM,Dense,Activation,Dropout,Input,concatenate
from keras.models import Sequential,Model
from keras.callbacks import EarlyStopping, ModelCheckpoint,LearningRateScheduler,ReduceLROnPlateau
from keras.utils.vis_utils import plot_model
import matplotlib.pyplot as plt
import time
import json
import re
# from fps import vggfpmodel,resfpmodel,res2fpmodel,xcefpmodel,denfpmodel
from TimeoutException import Myhandler,TimeoutError
# def load_task(params_dict,template_id):
# if template_id == 1:
# try:
# batch_size,flops,params = vggfpmodel.vggfp(**params_dict)
# except:
# print("报错")
# elif template_id == 2:
# try:
# batch_size,flops,params = resfpmodel.resfp(**params_dict)
# except Exception as e:
# print(e)
# elif template_id == 3:
# try:
# batch_size,flops,params = res2fpmodel.res2fp(**params_dict)
# except Exception as e:
# print(e)
# else:
# try:
# batch_size,flops,params = xcefpmodel.xcefp(**params_dict)
# except Exception as e:
# print(e)
#
# return batch_size,flops,params
def save_config(config,name):
config_content = {}
filename = "%s.json" % name
for key,value in config.items():
# if key != 'job' and key != 'ns':
config_content[key] = value
# task_content['task_id'] = tasks['task_id']
fw = open(filename, 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dic_json = json.dumps(config_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fw.write(dic_json)
fw.close()
def load_config(config_file):
# # json串是一个字符串
# f = open('product.json', encoding='utf-8')
# res = f.read()
# product_dic = json.loads(res) # 把json串,变成python的数据类型,只能转换json串内容
# print(product_dic)
# print(product_dic['iphone'])
# # t = json.load(f)
# # print(t) #传一个文件对象,它会帮你直接读json文件,并转换成python数据
# # print(t['iphone'])
# f.close()
f = open(config_file,encoding='utf-8')
res = f.read()
config_content = json.loads(res)
f.close()
return config_content
def select_node(client,measure_s):
res0 = client.query("select * from " + measure_s + " group by nodes order by desc limit 10")
keys0 = res0.keys()
node_list = [b['nodes'] for a, b in keys0]
node_index = [int(p[6:]) for p in node_list]
node_index.sort()
selected_node = 'worker%d' % node_index[0]
return selected_node
def load_data(min_steps,length,measure,db="PREDICT",host='192.168.128.10',first=True):
# measure,db="PREDICT",host='192.168.128.10'
aToken = '<KEY>'
aConfiguration = kubernetes.client.Configuration()
aConfiguration.host = "https://192.168.128.10:6443"
aConfiguration.verify_ssl = False
aConfiguration.api_key = {"authorization": "Bearer " + aToken}
aApiClient = kubernetes.client.ApiClient(aConfiguration)
v1 = kubernetes.client.CoreV1Api(aApiClient)
print("Start for db load data")
client = influxdb.InfluxDBClient(host=host,port=8086,username='admin',password='<PASSWORD>',database=db)
pre_list = measure.split(" ")
measure_s = pre_list[0]+'S'+pre_list[-1]
measure_t = pre_list[0]+'T'+pre_list[-1]
measure_write = pre_list[0]+'W'+pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
print(measure_s)
catched_job = pre_list[0]
catched_job = catched_job.lower()
jieshu = False
if catched_job == 'xce':
aim_ns = 'xception-' + pre_list[-1] + '-' + pre_list[-1]
else:
aim_ns = catched_job + "-" + pre_list[-1] + "-" + pre_list[-1]
if first:
min_steps2 = min_steps
yichang = False
countt00 = 0
while True:
# selected_node = select_node(client,measure_s)
res = client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
print("select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
keys = res.keys()
print(keys[:])
while True:
if keys:
break
else:
time.sleep(10)
res = client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
keys = res.keys()
print(keys[:])
msg_inter = list(res[keys[0]])
step_now = int(msg_inter[0]['step'])
print(step_now)
len_msg = len(msg_inter)
interval_step = 0
for i in range(len_msg):
interval_step += msg_inter[i]['time_d']
interval_step = (interval_step / len_msg)
if step_now >= min_steps2:
break
else:
ns_list = get_ns(v1)
write_ss = client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if aim_ns not in ns_list and (write_now==0):
yichang = True
break
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
print(pod_status)
print("going on")
print(measure)
# print(math.ceil(step_to_train * 0.75))
# print(step_now)
write_ss = client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now==0):
if countt00 <= 3:
countt00+=1
else:
print("Job is ended")
yichang = True
break
div_num = min_steps2 - step_now + 1
sleep_last = interval_step * div_num
print(sleep_last)
print(div_num)
print(interval_step)
result = client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= math.ceil(trains_step*0.85):
jieshu = True
break
if step_now >= trains_step - 3:
print("This process is ended!!")
jieshu = True
break
# allow path!!!
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
allow_path = '/tfdata/k8snfs/setad2/%s/%s.json' % (aim_ns, measure_t)
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
ps_now = int(result_items[0]['ps'])
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
# print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
# print(result_items2)
retry_top = int(result_items2[0]['retry'])
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
# print(trains_step)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
min_steps2 = trains_step*0.2
time.sleep(float(interval_step))
if yichang:
return [],0
# selected_node = select_node(client,measure_s)
result = client.query("select * from " + measure_s + " where nodes='worker0' order by desc")
else:
# selected_node = select_node(client,measure_s)
result = client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit "+str(length))
print("select * from " + measure_s + " where nodes='worker0' order by desc limit "+str(length))
keys = result.keys()
print(keys)
msg_raw = list(result[keys[0]])
print(msg_raw)
print(first)
print("Catched raw data")
# msg = {}
# tmp_step = []
tmp_loss = {}
for i in range(len(msg_raw)):
# tmp_step.append(int(msg_raw[i]['step']))
tmp = int(msg_raw[i]['step'])
# tmp_loss.append(msg_raw[i]['loss'])
if tmp in tmp_loss:
tmp_loss[tmp].append(msg_raw[i]['loss'])
else:
tmp_loss[tmp] = [msg_raw[i]['loss']]
steps = list(tmp_loss.keys())
loss = []
steps.sort()
for i in steps:
loss_per_step = np.mean(tmp_loss[i])
loss.append(loss_per_step)
step_high = steps[-1]
step_low = steps[0]
if first:
config = {}
loss_max = max(loss)
config['high'] = step_high
config['low'] = step_low
config['loss_max'] = loss_max
save_config(config,measure_s)
else:
filename = '%s.json' % measure_s
config = load_config(filename)
config['high'] = step_high
config['low'] = step_low
save_config(config,measure_s)
print("saved config")
max_loss = config['loss_max']
print(loss)
if jieshu:
return loss, max_loss, 1
else:
return loss, max_loss, 0
# tmp_step.reverse()
# tmp_loss.reverse()
# msg['step'] = tmp_step
# msg['loss'] = tmp_loss
# step_set = set(tmp_step)
def predict_nnls(data_in,step_x):
w,theta = opt.nnls(data_in,step_x)
return w
def predict_step_nnls(data_in,step_x,measure,top_step,low_step,threshold=0.01):
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
filename = '%s.json' % measure_s
config = load_config(filename)
step_now = config['high']+1
w = predict_nnls(data_in,step_x)
step_to_train = step_now
tiaochu = False
while step_now <= top_step+1:
fed_in = [1/step_now,1]
predict_result = float(np.array(fed_in).dot(w))
if predict_result < threshold:
step_to_train = predict_result
tiaochu = True
break
step_now+=1
if tiaochu:
if step_now<=low_step+1:
step_to_train = low_step+2
return step_to_train
else:
step_to_train = top_step
return step_to_train
def load_data_nnls(min_steps,length,measure,db="PREDICT",host='192.168.128.10',first=True):
aToken = '<KEY>'
aConfiguration = kubernetes.client.Configuration()
aConfiguration.host = "https://192.168.128.10:6443"
aConfiguration.verify_ssl = False
aConfiguration.api_key = {"authorization": "Bearer " + aToken}
aApiClient = kubernetes.client.ApiClient(aConfiguration)
v1 = kubernetes.client.CoreV1Api(aApiClient)
print("Start for db load data")
client = influxdb.InfluxDBClient(host=host, port=8086, username='admin', password='<PASSWORD>', database=db)
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
measure_write = pre_list[0] + 'W' + pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
print(measure_s)
catched_job = pre_list[0]
catched_job = catched_job.lower()
jieshu = False
if catched_job == 'xce':
aim_ns = 'xception-' + pre_list[-1] + '-' + pre_list[-1]
else:
aim_ns = catched_job + "-" + pre_list[-1] + "-" + pre_list[-1]
if first:
min_steps2 = min_steps
yichang = False
countt00 = 0
while True:
# selected_node = select_node(client, measure_s)
res = client.query(
"select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
print("select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
keys = res.keys()
print(keys[:])
while True:
if keys:
break
else:
time.sleep(10)
res = client.query(
"select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
keys = res.keys()
print(keys[:])
msg_inter = list(res[keys[0]])
step_now = int(msg_inter[0]['step'])
print(step_now)
len_msg = len(msg_inter)
interval_step = 0
for i in range(len_msg):
interval_step += msg_inter[i]['time_d']
interval_step = (interval_step / len_msg)
if step_now >= min_steps2:
break
else:
ns_list = get_ns(v1)
write_ss = client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if aim_ns not in ns_list and (write_now == 0):
yichang = True
break
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
print(pod_status)
print("going on")
print(measure)
# print(math.ceil(step_to_train * 0.75))
# print(step_now)
write_ss = client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now == 0):
if countt00 <= 3:
countt00+=1
else:
print("Job is ended")
yichang = True
break
div_num = min_steps2 - step_now + 1
sleep_last = interval_step * div_num
print(sleep_last)
print(div_num)
print(interval_step)
result = client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= math.ceil(trains_step * 0.85):
jieshu = True
break
if step_now >= trains_step - 3:
print("This process is ended!!")
jieshu = True
break
# allow path!!!
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
allow_path = '/tfdata/k8snfs/setad2/%s/%s.json' % (aim_ns, measure_t)
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
ps_now = int(result_items[0]['ps'])
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
# print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
# print(result_items2)
retry_top = int(result_items2[0]['retry'])
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
# print(trains_step)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
min_steps2 = trains_step * 0.2
time.sleep(float(interval_step))
if yichang:
return [], 0
# selected_node = select_node(client, measure_s)
result = client.query("select * from " + measure_s + " where nodes='worker0' order by desc")
else:
# selected_node = select_node(client, measure_s)
result = client.query("select * from " + measure_s + " where nodes='worker0' order by desc")
print("select * from " + measure_s + " where nodes='worker0' order by desc")
keys = result.keys()
print(keys)
msg_raw = list(result[keys[0]])
print(msg_raw)
print(first)
print("Catched raw data")
tmp_loss = {}
for i in range(len(msg_raw)):
# tmp_step.append(int(msg_raw[i]['step']))
tmp = int(msg_raw[i]['step'])
# tmp_loss.append(msg_raw[i]['loss'])
if tmp in tmp_loss:
tmp_loss[tmp].append(msg_raw[i]['loss'])
else:
tmp_loss[tmp] = [msg_raw[i]['loss']]
steps = list(tmp_loss.keys())
loss = []
steps.sort()
for i in steps:
loss_per_step = np.mean(tmp_loss[i])
loss.append(loss_per_step)
step_high = steps[-1]
step_low = steps[0]
if first:
config = {}
loss_max = max(loss)
config['high'] = step_high
config['low'] = step_low
config['loss_max'] = loss_max
save_config(config, measure_s)
else:
filename = '%s.json' % measure_s
config = load_config(filename)
config['high'] = step_high
config['low'] = step_low
save_config(config, measure_s)
print("saved config")
max_loss = config['loss_max']
# print(loss)
if jieshu:
return loss,max_loss,1
else:
return loss,max_loss,0
def normalization(loss,max_loss):
loss_array = []
for i in loss:
tmp = i / max_loss
loss_array.append(tmp)
loss_array = np.asarray(loss_array)
return loss_array
def make_dataset_nnls(data,max_loss):
step_len = len(data)
step_arrange = list(np.arange(step_len)+1)
step_arrange.reverse()
step_x = np.array([1/i for i in step_arrange])
data = data.reverse()
data_in = np.array([[i/max_loss,1] for i in data])
return data_in,step_x
def make_dataset(data,max_loss,time_step,predict_step,intra):
loss_array = normalization(data,max_loss)
train = []
total_length = len(loss_array)
for i in range(0,total_length - time_step - predict_step,intra):
train_slice = loss_array[i:i+time_step+predict_step]
train.append(train_slice)
train = np.array(train).astype(float)
train_x = train[:,0:time_step]
train_y = train[:,time_step:]
train_twice_x = []
train_twice_y = []
gap = time_step // intra
slice_length = len(train)
for i in range(gap,slice_length):
tmp_slice_twice = []
tmp_slice_twice.append(train_x[i-gap])
tmp_slice_twice.append(train_x[i])
train_twice_x.append(tmp_slice_twice)
train_twice_y.append(train_y[i])
train_twice_x = np.array(train_twice_x).astype(float)
train_twice_y = np.array(train_twice_y).astype(float)
return train_x,train_y,train_twice_x,train_twice_y
def build_lstm_model(time_step,predict_step,input_dim):
model = Sequential()
model.add(LSTM(units=16,input_shape=(time_step,input_dim),return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=64,return_sequences=True))
model.add(LSTM(units=128,return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=predict_step))
model.add(Activation('linear'))
model.summary()
optimizer = optimizers.Adam()
model.compile(loss="mse",optimizer=optimizer)
return model
def build_twice_lstm_model(time_step,predict_step,input_dim):
input_1 = Input(shape=(time_step,input_dim),dtype='float32',name='First_Time_Step')
input_2 = Input(shape=(time_step,input_dim),dtype='float32',name='Pre_First_Time_Step')
lstm1 = LSTM(units=16,input_shape=(time_step,input_dim),return_sequences=True)(input_1)
lstm1 = Dropout(0.2)(lstm1)
lstm2 = LSTM(units=16,input_shape=(time_step,input_dim),return_sequences=True)(input_2)
lstm2 = Dropout(0.2)(lstm2)
lstm = concatenate([lstm2,lstm1],axis=1)
x1 = LSTM(units=64,return_sequences=True)(lstm)
x1 = LSTM(units=128,return_sequences=False)(x1)
x1 = Dense(units=predict_step)(x1)
output = Activation('linear')(x1)
model = Model(input=[input_1,input_2],output=output)
model.summary()
optimizer = optimizers.Adam()
model.compile(loss='mse',optimizer=optimizer)
return model
#加载模型
def load_model(filepath):
print('[Model] Loading model from file %s' % filepath)
model = keras.models.load_model(filepath)
return model
def reshape_for_lstm(data):
train = np.reshape(data,[data.shape[0],data.shape[1],1])
return train
def divide_train_test(data,split):
isplit = math.ceil(data.shape[0]*split)
train_data = data[:isplit]
test_data = data[isplit:]
return train_data,test_data
def train(x, y, epochs, batch_size, save_dir, model,measure):
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
def scheduler(epoch):
# 每隔100个epoch,学习率减小为原来的1/10
if epoch % 100 == 0 and epoch != 0:
lr = keras.backend.get_value(model.optimizer.lr)
keras.backend.set_value(model.optimizer.lr, lr * 0.1)
print("lr changed to {}".format(lr * 0.1))
return keras.backend.get_value(model.optimizer.lr)
#'%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs))
save_fname = os.path.join(save_dir, '%s.h5' % measure_s)
reduce_lr = LearningRateScheduler(scheduler)
callbacks = [
EarlyStopping(monitor='val_loss', patience=10),
ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto',
epsilon=0.001, cooldown=0, min_lr=0)
]
# 当评价指标不在提升时,减少学习率
#
# 当学习停滞时,减少2倍或10倍的学习率常常能获得较好的效果。该回调函数检测指标的情况,如果在patience个epoch中看不到模型性能提升,则减少学习率
# 参数
#
# monitor:被监测的量
# factor:每次减少学习率的因子,学习率将以lr = lr*factor的形式被减少
# patience:当patience个epoch过去而模型性能不提升时,学习率减少的动作会被触发
# mode:‘auto’,‘min’,‘max’之一,在min模式下,如果检测值触发学习率减少。在max模式下,当检测值不再上升则触发学习率减少。
# epsilon:阈值,用来确定是否进入检测值的“平原区”
# cooldown:学习率减少后,会经过cooldown个epoch才重新进行正常操作
# min_lr:学习率的下限
# ————————————————
history = model.fit(
x,
y,
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks,
validation_split=0.1
)
model.save(save_fname)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
return history, model
def train_twice(x1,x2, y, epochs, batch_size, save_dir, model,measure):
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
def scheduler(epoch):
# 每隔100个epoch,学习率减小为原来的1/10
if epoch % 100 == 0 and epoch != 0:
lr = keras.backend.get_value(model.optimizer.lr)
keras.backend.set_value(model.optimizer.lr, lr * 0.1)
print("lr changed to {}".format(lr * 0.1))
return keras.backend.get_value(model.optimizer.lr)
save_fname = os.path.join(save_dir, '%s.h5' % measure_s)
reduce_lr = LearningRateScheduler(scheduler)
callbacks = [
EarlyStopping(monitor='val_loss', patience=10),
ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto',
epsilon=0.001, cooldown=0, min_lr=0)
]
# 当评价指标不在提升时,减少学习率
#
# 当学习停滞时,减少2倍或10倍的学习率常常能获得较好的效果。该回调函数检测指标的情况,如果在patience个epoch中看不到模型性能提升,则减少学习率
# 参数
#
# monitor:被监测的量
# factor:每次减少学习率的因子,学习率将以lr = lr*factor的形式被减少
# patience:当patience个epoch过去而模型性能不提升时,学习率减少的动作会被触发
# mode:‘auto’,‘min’,‘max’之一,在min模式下,如果检测值触发学习率减少。在max模式下,当检测值不再上升则触发学习率减少。
# epsilon:阈值,用来确定是否进入检测值的“平原区”
# cooldown:学习率减少后,会经过cooldown个epoch才重新进行正常操作
# min_lr:学习率的下限
# ————————————————
history = model.fit(
{'First_Time_Step': x1,'Pre_First_Time_Step':x2},
y,
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks,
validation_split=0.1
)
model.save(save_fname)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
return history,model
def predict_once(data,model,input_dim,time_step,predict_step):
data = np.reshape(data,(1,time_step,input_dim))
predict_y = model.predict(data)
predict_y = np.array(predict_y).astype(float)
predict_y = np.reshape(predict_y,(predict_step,1))
return predict_y
def predict_once_t(data1,data2,model,input_dim,time_step,predict_step):
data1 = np.reshape(data1,(1,time_step,input_dim))
data2 = np.reshape(data2,(1,time_step,input_dim))
predict_y = model.predict([data1,data2])
predict_y = np.array(predict_y).astype(float)
predict_y = np.reshape(predict_y,(predict_step,1))
return predict_y
def predict_multi(data,model,input_dim,time_step,predict_step,intra):
iter = predict_step // intra
predict = []
for i in range(0,data.shape[0],iter):
pone = predict_once(data[i],model,input_dim,time_step,predict_step)
pone = np.array(pone).astype(float)
pone = np.reshape(pone,(predict_step,))
for p in pone:
predict.append(p)
predict = np.array(predict).astype(float)
predict = np.reshape(predict,(len(predict),1))
return predict
def predict_multi_t(data1,data2,model,input_dim,time_step,predict_step,intra):
iter = predict_step // intra
predict = []
for i in range(0,data1.shape[0],iter):
pone = predict_once_t(data1[i],data2[i],model,input_dim,time_step,predict_step)
pone = np.array(pone).astype(float)
pone = np.reshape(pone,(predict_step,))
for p in pone:
predict.append(p)
predict = np.array(predict).astype(float)
predict = np.reshape(predict,(len(predict),1))
return predict
def derivation(x1,x2):
xx = (x1 - x2)**2
result = float((math.sqrt((xx))) / x1)
return result
def step_predict(data,model,input_dim,predict_step,time_step,div,top_step,low_step,measure):
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
filename = '%s.json' % measure_s
config = load_config(filename)
# config['high'] = step_high
# config['low'] = step_low
# save_config(config, measure)
#
#
# max_loss = config['loss_max']
step_high = config['high']
max_loss_read = config['loss_max']
data_array = np.array(data).astype(float)
data_array = data_array / max_loss_read
data_use = list(data_array)
fit_step = 0 - time_step - predict_step
data_fit = data_use[fit_step:]
data_list = list(data_fit[:])
data_fit = np.array(data_fit[-time_step:]).astype(float)
data_fit = np.reshape(data_fit,(1,time_step,input_dim))
# data = np.reshape(data, (1, time_step, input_dim))
predict_res = predict_once(data_fit,model,input_dim,time_step,predict_step)
predict_res = np.squeeze(predict_res)
step_to_train = predict_step
tmp_base = 0 - 3*predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
while True:
print(step_to_train)
if step_to_train + step_high >= top_step:
break
data_div_pre = data_list[tmp_base:]
print(data_div_pre)
data_div_base = []
for i in range(1,3*predict_step):
tmp_div = derivation(data_div_pre[i-1],data_div_pre[i])
data_div_base.append(tmp_div)
der_base = np.mean(data_div_base)
print(der_base)
if der_base < div:
break
data_fit = data_list[fit_step:]
data_list = list(data_fit[:])
data_fit = np.array(data_fit[-time_step:]).astype(float)
data_fit = np.reshape(data_fit, (1, time_step, input_dim))
# data = np.reshape(data, (1, time_step, input_dim))
predict_res = predict_once(data_fit, model, input_dim, time_step, predict_step)
predict_res = np.squeeze(predict_res)
step_to_train += predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
step_to_train = step_to_train+step_high
if step_to_train <= low_step:
step_to_train = low_step
return step_to_train
# def step_predict_nnls(data,step_in):
def step_predict_twice(data,model,input_dim,predict_step,time_step,div,top_step,low_step,measure):
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
filename = '%s.json' % measure_s
config = load_config(filename)
# config['high'] = step_high
# config['low'] = step_low
# save_config(config, measure)
#
#
# max_loss = config['loss_max']
step_high = config['high']
max_loss_read = config['loss_max']
data_array = np.array(data).astype(float)
data_array = data_array / max_loss_read
data_use = list(data_array)
fit_step = 0 - time_step - 2*predict_step
data_fit = data_use[fit_step:]
data_list = list(data_fit[:])
data_fit_1 = np.array(data_fit[-time_step:]).astype(float)
data_fit_2 = np.array(data_fit[-1*2*time_step:-time_step]).astype(float)
data_fit_1 = np.reshape(data_fit_1,(1,time_step,input_dim))
data_fit_2 = np.reshape(data_fit_2,(1,time_step,input_dim))
# data = np.reshape(data, (1, time_step, input_dim))
predict_res = predict_once_t(data_fit_1,data_fit_2,model,input_dim,time_step,predict_step)
predict_res = np.squeeze(predict_res)
step_to_train = predict_step
tmp_base = 0 - 3*predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
while True:
print(step_to_train)
if step_to_train + step_high >= top_step:
break
data_div_pre = data_list[tmp_base:]
print(data_div_pre)
data_div_base = []
for i in range(1,3*predict_step):
tmp_div = derivation(data_div_pre[i-1],data_div_pre[i])
data_div_base.append(tmp_div)
der_base = np.mean(data_div_base)
print(der_base)
if der_base < div:
break
data_fit = data_list[fit_step:]
data_list = list(data_fit[:])
data_fit_1 = np.array(data_fit[-time_step:]).astype(float)
data_fit_2 = np.array(data_fit[-1 * 2 * time_step:-time_step]).astype(float)
data_fit_1 = np.reshape(data_fit_1, (1, time_step, input_dim))
data_fit_2 = np.reshape(data_fit_2, (1, time_step, input_dim))
# data = np.reshape(data, (1, time_step, input_dim))
predict_res = predict_once_t(data_fit_1, data_fit_2, model, input_dim, time_step, predict_step)
predict_res = np.squeeze(predict_res)
step_to_train += predict_step
for i in range(predict_step):
data_list.append(predict_res[i])
step_to_train = step_to_train+step_high
if step_to_train <= low_step:
step_to_train = low_step
return step_to_train
def get_ns(v1):
ns_list = []
for i in v1.list_namespace().items:
ns_list.append(i.metadata.name)
return ns_list
def save_config2(config,filename):
config_content = {}
for key,value in config.items():
# if key != 'job' and key != 'ns':
config_content[key] = value
# task_content['task_id'] = tasks['task_id']
fw = open(filename, 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dic_json = json.dumps(config_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fw.write(dic_json)
fw.close()
def check_path(name):
#check_path!!!
# train_dir = os.path.join('/tfdata/k8snfs/', name)
train_dir = os.path.join('/tfdata/k8snfs/setad2/', name)
created = False
print(train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
created = True
return train_dir,created
def step_resource_predict_handle(conn,dictionary,lock,pool_size,connect_try=5,predict_fre=150):
#measure,db="PREDICT",host='192.168.128.10'
aToken = '<KEY>'
aConfiguration = kubernetes.client.Configuration()
aConfiguration.host = "https://192.168.128.10:6443"
aConfiguration.verify_ssl = False
aConfiguration.api_key = {"authorization": "Bearer " + aToken}
aApiClient = kubernetes.client.ApiClient(aConfiguration)
v1 = kubernetes.client.CoreV1Api(aApiClient)
try:
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp + 1
dictionary['running_number'] = tmp
lock.release()
except Exception as e:
print(e)
lock.release()
print("now running number is: %d" % tmp)
influx_client = influxdb.InfluxDBClient(host='192.168.128.10',port=8086,username='admin',password='<PASSWORD>',database="PREDICT")
try_times = 1
legal_pattern = '\w+ \d+'
msg_from_client = conn.recv(4096)
matched = None
while True:
if try_times > connect_try:
break
msg_from_client_str = str(msg_from_client.decode('utf-8'))
print(msg_from_client_str+" "+"try_time: "+str(try_times))
# try_times = try_times + 1
matched = re.match(legal_pattern,msg_from_client_str)
if matched is not None:
break
if not msg_from_client:
break
response = "403 "+"Message-error!"
conn.send(bytes(response, 'utf-8'))
msg_from_client = conn.recv(4096)
try_times = try_times + 1
# msg_from_client_str = str(msg_from_client.decode('utf-8'))
if matched is None:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
print("connect success!")
measure = matched.group()
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
measure_write = pre_list[0]+'W'+pre_list[-1]
lock.acquire()
# lock.release()
tmp_running = dictionary['running_number']
lock.release()
res_pool = pool_size - tmp_running
print("resuming pool size: %d" % res_pool)
response = "400 "+pre_list[0]+" "+pre_list[-1]+" "+str(res_pool)
conn.send(bytes(response,'utf-8'))
catched_job = pre_list[0]
catched_job = catched_job.lower()
if catched_job == 'xce':
aim_ns = 'xception-' + pre_list[-1] + '-' + pre_list[-1]
else:
aim_ns = catched_job + "-" + pre_list[-1] + "-" + pre_list[-1]
print("this is work for %s" % (aim_ns))
try:
# job_con_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, aim_ns)
job_con_path = "/tfdata/k8snfs/setad2/%s/%s.json" % (aim_ns, aim_ns)
job_config = load_config(job_con_path)
print("load job config success!!")
# allow path!!!
allow_path = '/tfdata/k8snfs/setad2/%s/%s.json' % (aim_ns, measure_t)
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
except Exception as e:
print(e)
# allow_path2 = "/tfdata/k8snfs/%s/%s_r.json" % (measure_t,measure_t)
allow_p, created = check_path(aim_ns)
print(allow_p)
if created:
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read,allow_path)
# save_config2(allow_readr,allow_path2)
if not os.path.exists(allow_path):
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read, allow_path)
ns_list = get_ns(v1)
print(ns_list)
print(aim_ns)
print(aim_ns in ns_list)
ceshi_count = 0
ceshi_in = False
while True:
if ceshi_count > 210:
break
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if aim_ns not in ns_list and (write_now==0):
ceshi_count+=1
time.sleep(2.5)
else:
ceshi_in = True
break
if not ceshi_in:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
print("namespace created error!")
return
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_item = dict(result_items[0])
key_tmp = list(tmp_item.keys())
if 'retry' not in key_tmp:
retry_now = int(job_config['retry'])
else:
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
# 'ps_replicas': job.ps_replicas,
# 'worker_replicas': job.worker_replicas
if 'ps' not in key_tmp:
ps_now = int(job_config['ps_replicas'])
else:
ps_now = int(result_items[0]['ps'])
if 'worker' not in key_tmp:
worker_now = int(job_config['worker_replicas'])
else:
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read,allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
print(result_items2)
retry_top = int(result_items2[0]['retry'])
print(retry_top)
print(type(retry_top))
print(retry_now)
print(type(retry_now))
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step*worker_now/new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read,allow_path)
print("saved successful!!")
print(trains_step)
modekk = 0
if trains_step <= 200:
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(trains_step)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
# conn.close()
# lock.acquire()
# # lock.release()
# tmp = dictionary['running_number']
# tmp = tmp - 1
# dictionary['running_number'] = tmp
# lock.release()
print("Do not need to predict,return")
modekk = 1
min_steps = math.ceil(trains_step*0.2)
length = math.ceil(min_steps*0.6)
print("Initial Config Success!"+"min_steps:"+str(min_steps))
time_start = time.time()
print("start to load data")
loss,max_loss,modekk_z = load_data(min_steps=min_steps,length=length,measure=measure,first=True)
if not loss:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
# loss_array = normalization(loss,max_loss)
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
step_to_train = trains_step
if trains_step<=200:
modekk_z = 1
if modekk_z!=1:
print("Get data first time")
data_x, data_y, data_twice_x, data_twice_y = make_dataset(loss[:], max_loss, 20, 10, 1)
data_x_lstm = reshape_for_lstm(data_x[:])
# data_y_lstm = reshape_for_lstm(data_y[:])
# data_twice_x_1 = data_twice_x[:,1,:]
# data_twice_x_2 = data_twice_x[:,0,:]
# # data_twice_y = reshape_for_lstm(data_twice_y[:])
# data_twice_x_1_lstm = reshape_for_lstm(data_twice_x_1[:])
# data_twice_x_2_lstm = reshape_for_lstm(data_twice_x_2[:])
print("Make dataset first time")
# model = load_model('save_model/31122019-031018-e10.h5')
if os.path.exists("save_model/%s.h5" % measure_s):
model = load_model('save_model/%s.h5' % measure_s)
else:
model = build_lstm_model(time_step=20, predict_step=10, input_dim=1)
print("Start to train")
history, model = train(x=data_x_lstm, y=data_y, epochs=100, batch_size=64, save_dir='save_model', model=model,
measure=measure)
step_to_train = step_predict(data=loss[:], model=model, input_dim=1, predict_step=10, time_step=20, div=0.01,
top_step=trains_step, low_step=math.ceil(trains_step * 0.5), measure=measure)
else:
step_to_train = trains_step
res1 = influx_client.query("select * from "+measure_up+" order by desc limit 1")
key1 = res1.keys()
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
retry = int(res1_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
if retry_now != retry:
new_ps = int(res1_items[0]['ps'])
new_worker = int(res1_items[0]['worker'])
step_to_train = math.ceil(step_to_train*int(allow_read['worker'])/new_worker)
allow_read['retry'] = retry
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read,allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
print("First prdict cost time: "+str(time.time() - time_start))
iftrain = 0
time_total = 0
if modekk != 1:
modekk = modekk_z
countt00 = 0
# iikk =0
# tmp_panduan_key = -1
while True:
if modekk == 1:
break
# selected_node = select_node(influx_client,measure_s)
res1 = influx_client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
key1 = res1.keys()
# print(key1[:])
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
# print(res1_items[:])
step_now = int(res1_items[0]['step'])
time_mean_list = [float(i['time_d']) for i in res1_items]
time_mean = np.mean(time_mean_list)
# print(time_mean)
# time_sleep = predict_fre * time_mean
# print(step_now)
ns_list = get_ns(v1)
# print(ns_list)
# print(aim_ns)
# print(aim_ns in ns_list)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
# print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
tmp_panduan_key = -1
for iikk in range(32):
time.sleep(1)
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
# print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
print("namespace is missing")
else:
tmp_panduan_key = 1
break
if tmp_panduan_key < 0:
print("namespace has been missed")
break
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
# print(pod_status)
print("going on")
# print(measure)
print(math.ceil(step_to_train*0.85))
print(step_now)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
write_now = int(write_items[0]['modulate'])
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now == 0):
if countt00 <=16:
countt00+=1
time.sleep(1.5)
continue
else:
print("Job is ended")
break
else:
time.sleep(1.2)
print("Job is going")
# print(math.ceil(step_to_train*0.85))
# print(step_now)
panduan_going = math.ceil(step_to_train*0.85)
# print(type(step_now))
step_now = int(step_now)
print(type(step_now))
print(step_now)
if step_now >= panduan_going:
print("It need not to predict")
modekk = 1
break
else:
time.sleep(1.2)
print("Job is going to load")
time.sleep(2.2)
print(measure)
print(length)
print(type(length))
print("load data again")
if time_total>= predict_fre:
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
loss, max_loss = load_data(min_steps=min_steps, length=length, measure=measure, first=False)
print("Start to load model!")
try:
model = load_model('save_model/%s.h5' % measure_s)
except Exception as e:
print(e)
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
print("get model successfully!")
if iftrain > 0 and iftrain % 20 == 19:
data_x, data_y, data_twice_x, data_twice_y = make_dataset(loss[:], max_loss, 20, 10, 1)
data_x_lstm = reshape_for_lstm(data_x[:])
# data_y_lstm = reshape_for_lstm(data_y[:])
data_twice_x_1 = data_twice_x[:, 1, :]
data_twice_x_2 = data_twice_x[:, 0, :]
# data_twice_y = reshape_for_lstm(data_twice_y[:])
data_twice_x_1_lstm = reshape_for_lstm(data_twice_x_1[:])
data_twice_x_2_lstm = reshape_for_lstm(data_twice_x_2[:])
history, model = train(x=data_x_lstm, y=data_y, epochs=10, batch_size=64, save_dir='save_model',
model=model, measure=measure)
step_to_train = step_predict(data=loss[:], model=model, input_dim=1, predict_step=10, time_step=20,
div=0.005, top_step=trains_step, low_step=math.ceil(trains_step * 0.5),
measure=measure)
res2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = list(res2.keys())
res2_inter = res2[key2[0]]
res2_items = list(res2_inter)
retry = int(res2_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
new_ps = int(allow_read['ps'])
new_worker = int(allow_read['worker'])
if retry_now != retry:
new_ps = int(res2_items[0]['ps'])
new_worker = int(res2_items[0]['worker'])
step_to_train = math.ceil(step_to_train * int(allow_read['worker']) / new_worker)
allow_read['retry'] = retry
allow_read['worker'] = new_worker
allow_read['ps'] = new_ps
save_config2(allow_read, allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': new_ps,
'worker': new_worker
}
}
]
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed result in db")
iftrain = iftrain + 1
print("Predict " + str(iftrain) + " costs time: " + str(time.time() - time_start))
time_total = 0
time_total+=1
time.sleep(float(time_mean))
else:
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
ps_now = int(result_items[0]['ps'])
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
retry_top = int(result_items2[0]['retry'])
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
# print(trains_step)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
time_total += 1
step_to_train = trains_step
time.sleep(float(time_mean))
if modekk == 1:
countt00 = 0
while True:
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now == 0):
if countt00 <= 16:
countt00+=1
time.sleep(1.5)
continue
else:
print("Job is ended")
break
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
ns_list = get_ns(v1)
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
tmp_panduan_key = -1
for iikk in range(32):
time.sleep(1)
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
# print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
print("namespace is missing")
else:
tmp_panduan_key = 1
break
if tmp_panduan_key < 0:
print("namespace has been missed")
break
# time.sleep(9)
# ns_list = get_ns(v1)
# write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
# key_write = write_ss.keys()
# # print(key_write[:])
# write_inter = write_ss[key_write[0]]
# write_items = list(write_inter)
# # print(write_items[:])
# write_now = int(write_items[0]['modulate'])
# if (aim_ns not in ns_list) and (write_now == 0):
# print("namespace is missing")
# break
# print(pod_status)
print("going on")
# print(measure)
# print(math.ceil(step_to_train * 0.75))
# print(step_now)
# worker%d
# selected_node = select_node(influx_client, measure_s)
res1 = influx_client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit 3")
key1 = res1.keys()
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
step_now = int(res1_items[0]['step'])
time_mean_list = [float(i['time_d']) for i in res1_items]
time_mean = np.mean(time_mean_list)
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
ps_now = int(result_items[0]['ps'])
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
# print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
# print(result_items2)
retry_top = int(result_items2[0]['retry'])
# print(retry_top)
# print(type(retry_top))
# print(retry_now)
# print(type(retry_now))
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
# print(trains_step)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
time.sleep(float(0.3*time_mean))
else:
time.sleep(float(time_mean))
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
time_end = time.time()
print(time_end - time_start)
print("This prediction end!")
def step_nnls_predict_handle(conn,dictionary,lock,pool_size,connect_try=5,predict_fre=150):
aToken = 'eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTJ3dGRuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5YWE4ZTc4OS0zODM1LTExZWEtYWZlMi1mYTE2M2UzMzBlYWEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.qzHVo1KysWhnSAMwKAcaKLWkqOxBlSBr7qR4LtldusdM0Z9dDQVH2TMmtvmkBDyfqVKQttMmTGXDHhW-dOD9uJVn8w84zitd7eAgVCrHm2nhTMbsf2ZKH0DuU6t_SGYkyBWVIedMpZis-K2mzCjmSq5TAd67cMSCqGHQVMtjEsqpPyBeY_nrqgzWWwX3X3E0hHGk7CvICndFiqUeI9xKVluA-TdR6HzPXbaCIGAcvSHeIlc4GdhmDTJ47U4rQON3IL0dhC6Adom7c65I5pwBdYpfqkDhKld1o7ErhXS8Qhcv0BHhfuj-Bdn6MMsH7PXpH-7I5dxoKDVlTC-q7KV9EQ'
aConfiguration = kubernetes.client.Configuration()
aConfiguration.host = "https://192.168.128.10:6443"
aConfiguration.verify_ssl = False
aConfiguration.api_key = {"authorization": "Bearer " + aToken}
aApiClient = kubernetes.client.ApiClient(aConfiguration)
v1 = kubernetes.client.CoreV1Api(aApiClient)
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp + 1
dictionary['running_number'] = tmp
lock.release()
influx_client = influxdb.InfluxDBClient(host='192.168.128.10', port=8086, username='admin', password='<PASSWORD>',
database="PREDICT")
try_times = 1
legal_pattern = '\w+ \d+'
msg_from_client = conn.recv(4096)
matched = None
while True:
if try_times > connect_try:
break
msg_from_client_str = str(msg_from_client.decode('utf-8'))
print(msg_from_client_str + " " + "try_time: " + str(try_times))
# try_times = try_times + 1
matched = re.match(legal_pattern, msg_from_client_str)
if matched is not None:
break
if not msg_from_client:
break
response = "403 " + "Message-error!"
conn.send(bytes(response, 'utf-8'))
msg_from_client = conn.recv(4096)
try_times = try_times + 1
# msg_from_client_str = str(msg_from_client.decode('utf-8'))
if matched is None:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
print("connect success!")
measure = matched.group()
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
measure_write = pre_list[0] + 'W' + pre_list[-1]
lock.acquire()
# lock.release()
tmp_running = dictionary['running_number']
lock.release()
res_pool = pool_size - tmp_running
response = "400 " + pre_list[0] + " " + pre_list[-1] + " " + str(res_pool)
conn.send(bytes(response, 'utf-8'))
catched_job = pre_list[0]
catched_job = catched_job.lower()
if catched_job == 'xce':
aim_ns = 'xception-' + pre_list[-1] + '-' + pre_list[-1]
else:
aim_ns = catched_job + "-" + pre_list[-1] + "-" + pre_list[-1]
#/tfdata/k8snfs/setfix/
job_con_path = "/tfdata/k8snfs/setad2/%s/%s.json" % (aim_ns, aim_ns)
# job_con_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, aim_ns)
job_config = load_config(job_con_path)
print("load job config success!!")
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
allow_path = "/tfdata/k8snfs/setad2/%s/%s.json" % (aim_ns, measure_t)
# allow_path2 = "/tfdata/k8snfs/%s/%s_r.json" % (measure_t,measure_t)
allow_p, created = check_path(aim_ns)
print(allow_p)
if created:
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read, allow_path)
# save_config2(allow_readr,allow_path2)
if not os.path.exists(allow_path):
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read, allow_path)
ns_list = get_ns(v1)
ceshi_count = 0
ceshi_in = False
while True:
if ceshi_count > 35:
break
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if aim_ns not in ns_list and (write_now == 0):
ceshi_count += 1
time.sleep(15)
else:
ceshi_in = True
break
if not ceshi_in:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
print("namespace created error!")
return
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_item = dict(result_items[0])
key_tmp = tmp_item.keys()
if 'retry' not in key_tmp:
retry_now = int(job_config['retry'])
else:
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
# 'ps_replicas': job.ps_replicas,
# 'worker_replicas': job.worker_replicas
if 'ps' not in key_tmp:
ps_now = int(job_config['ps_replicas'])
else:
ps_now = int(result_items[0]['ps'])
if 'worker' not in key_tmp:
worker_now = int(job_config['worker_replicas'])
else:
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
print(result_items2)
retry_top = int(result_items2[0]['retry'])
print(retry_top)
print(type(retry_top))
print(retry_now)
print(type(retry_now))
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
print(trains_step)
modekk = 0
if trains_step <= 200:
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(trains_step)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
# conn.close()
# lock.acquire()
# # lock.release()
# tmp = dictionary['running_number']
# tmp = tmp - 1
# dictionary['running_number'] = tmp
# lock.release()
print("Do not need to predict,return")
modekk = 1
min_steps = math.ceil(trains_step * 0.2)
length = math.ceil(min_steps * 0.4)
print("Initial Config Success!" + "min_steps:" + str(min_steps))
time_start = time.time()
print("start to load data")
loss, max_loss, modekk_z = load_data_nnls(min_steps=min_steps, length=length, measure=measure, first=True)
if not loss:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
# loss_array = normalization(loss,max_loss)
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
step_to_train = trains_step
if trains_step <= 200:
modekk_z = 1
if modekk_z != 1:
print("Get data first time")
data_in, step_x = make_dataset_nnls(loss, max_loss)
step_to_train = predict_step_nnls(data_in, step_x, measure, trains_step, math.ceil(trains_step * 0.5))
else:
step_to_train = trains_step
res1 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key1 = res1.keys()
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
retry = int(res1_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
if retry_now != retry:
new_ps = int(res1_items[0]['ps'])
new_worker = int(res1_items[0]['worker'])
step_to_train = math.ceil(step_to_train * int(allow_read['worker']) / new_worker)
allow_read['retry'] = retry
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
print("First prdict cost time: " + str(time.time() - time_start))
iftrain = 0
time_total = 0
if modekk != 1:
modekk = modekk_z
while True:
if modekk == 1:
break
# selected_node = select_node(influx_client, measure_s)
res1 = influx_client.query(
"select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
key1 = res1.keys()
print(key1[:])
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
print(res1_items[:])
step_now = int(res1_items[0]['step'])
time_mean_list = [float(i['time_d']) for i in res1_items]
time_mean = np.mean(time_mean_list)
print(time_mean)
# time_sleep = predict_fre * time_mean
print(step_now)
ns_list = get_ns(v1)
print(ns_list)
print(aim_ns)
print(aim_ns in ns_list)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
time.sleep(15)
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
# print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
print("namespace is missing")
break
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
print(pod_status)
print("going on")
print(measure)
print(math.ceil(step_to_train * 0.85))
print(step_now)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
write_now = int(write_items[0]['modulate'])
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now == 0):
print("Job is ended")
break
else:
time.sleep(3)
print("Job is going")
print(math.ceil(step_to_train * 0.85))
print(step_now)
panduan_going = math.ceil(step_to_train * 0.85)
print(type(step_now))
step_now = int(step_now)
print(type(step_now))
print(step_now)
if step_now >= panduan_going:
print("It need not to predict")
modekk = 1
break
else:
time.sleep(2)
print("Job is going to load")
time.sleep(2.5)
print(measure)
print(length)
print(type(length))
print("load data again")
if time_total >= predict_fre:
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
# loss, max_loss = load_data_nnls(min_steps=min_steps, length=length, measure=measure, first=False)
loss,max_loss = load_data_nnls(min_steps=min_steps,length=length,measure=measure,first=False)
print("start to nnls process!!")
data_in,step_x = make_dataset_nnls(loss,max_loss)
step_to_train = predict_step_nnls(data_in,step_x,measure,trains_step,math.ceil(trains_step*0.5))
# step_to_train = step_predict(data=loss[:], model=model, input_dim=1, predict_step=10, time_step=20,
# div=0.01, top_step=trains_step, low_step=math.ceil(trains_step * 0.5),
# measure=measure)
res2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = list(res2.keys())
res2_inter = res2[key2[0]]
res2_items = list(res2_inter)
retry = int(res2_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
new_ps = int(allow_read['ps'])
new_worker = int(allow_read['worker'])
if retry_now != retry:
new_ps = int(res2_items[0]['ps'])
new_worker = int(res2_items[0]['worker'])
step_to_train = math.ceil(step_to_train * int(allow_read['worker']) / new_worker)
allow_read['retry'] = retry
allow_read['worker'] = new_worker
allow_read['ps'] = new_ps
save_config2(allow_read, allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': new_ps,
'worker': new_worker
}
}
]
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed result in db")
iftrain = iftrain + 1
print("Predict " + str(iftrain) + " costs time: " + str(time.time() - time_start))
time_total = 0
time_total += 1
time.sleep(float(time_mean))
else:
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
ps_now = int(result_items[0]['ps'])
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
retry_top = int(result_items2[0]['retry'])
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
# print(trains_step)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
time_total += 1
step_to_train = trains_step
time.sleep(float(time_mean) * 0.8)
if modekk == 1:
while True:
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now == 0):
print("Job is ended")
break
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
ns_list = get_ns(v1)
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
time.sleep(9)
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
# print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
print("namespace is missing")
break
# print(pod_status)
print("going on")
res1 = influx_client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit 3")
key1 = res1.keys()
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
step_now = int(res1_items[0]['step'])
time_mean_list = [float(i['time_d']) for i in res1_items]
time_mean = np.mean(time_mean_list)
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
ps_now = int(result_items[0]['ps'])
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
# print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
# print(result_items2)
retry_top = int(result_items2[0]['retry'])
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
# print(trains_step)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
time.sleep(float(0.3 * time_mean))
else:
time.sleep(float(time_mean))
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
# print(data_x.shape)
# print(data_y.shape)
# print(data_twice_x.shape)
# print(data_twice_y.shape)
# print(normalization(loss,max_loss))
# print(data_x)
# print(data_twice_x)
time_end = time.time()
print(time_end - time_start)
print("This prediction end!")
if __name__ == '__main__':
HOST = '192.168.128.5'
PORT = 12527
ADDR = (HOST,PORT)
mgr = multiprocessing.Manager()
dictionary = mgr.dict()
dictionary['running_number'] = 0
lock = mgr.Lock()
pool = multiprocessing.Pool(processes=45)
pool_size = 45
connect_try = 5
predict_fre = 100
# new_mem = joblib.load('est_mem.pkl')
# new_cpu = joblib.load('est_cpu.pkl')
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
server.listen(5)
print(dictionary['running_number'])
print("Waiting for connection...")
while True:
conn,addr = server.accept()
print("Get an request!")
# step_predict_handle(conn,dictionary,lock,pool_size=5,connect_try=5,predict_fre=150)
# pool.apply_async(step_predict_handle, (conn, dictionary, lock,pool_size,connect_try,predict_fre))
pool.apply_async(step_resource_predict_handle,
(conn, dictionary, lock, pool_size, connect_try, predict_fre))
print("Allocate Pool Process Success")
pool.close() # 进程池不再接收新任务
pool.join() # 进程池内的进程都执行完了
server.close()
# time_start = time.time()
# measure = "VGG 1"
#
# loss,max_loss = load_data(min_steps=200,length=800,measure="VGG 1")
# # loss_array = normalization(loss,max_loss)
# data_x,data_y,data_twice_x,data_twice_y = make_dataset(loss,max_loss,20,10,1)
# data_x_lstm = reshape_for_lstm(data_x[:])
# data_y_lstm = reshape_for_lstm(data_y[:])
# data_twice_x_1 = data_twice_x[:,1,:]
# data_twice_x_2 = data_twice_x[:,0,:]
# data_twice_y = reshape_for_lstm(data_twice_y[:])
# data_twice_x_1_lstm = reshape_for_lstm(data_twice_x_1[:])
# data_twice_x_2_lstm = reshape_for_lstm(data_twice_x_2[:])
#
#
#
# # model = load_model('save_model/31122019-031018-e10.h5')
# if os.path.exists("save_model/%s.h5" % measure):
# model = load_model('save_model/%s.h5' % measure)
# else:
# model = build_lstm_model(time_step=20,predict_step=10,input_dim=1)
#
# history, model = train(x=data_x_lstm,y=data_y,epochs=100,batch_size=32,save_dir='save_model',model=model,measure=measure)
#
# step_to_train = step_predict(data=loss[:],model=model,input_dim=1,predict_step=10,time_step=20,div=0.01,top_step=2000,measure=measure)
# print(step_to_train)
# # print(data_x.shape)
# # print(data_y.shape)
# # print(data_twice_x.shape)
# # print(data_twice_y.shape)
# # print(normalization(loss,max_loss))
# # print(data_x)
# # print(data_twice_x)
# time_end = time.time()
# print(time_end - time_start)
| [
"keras.backend.get_value",
"kubernetes.client.CoreV1Api",
"math.sqrt",
"scipy.optimize.nnls",
"time.sleep",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.arange",
"numpy.mean",
"os.path.exists",
"numpy.reshape",
"influxdb.InfluxDBClient",
"utils.Timer",
"json.dum... | [((2005, 2061), 'json.dumps', 'json.dumps', (['config_content'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(config_content, ensure_ascii=False, indent=4)\n', (2015, 2061), False, 'import json\n'), ((2577, 2592), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (2587, 2592), False, 'import json\n'), ((3157, 3190), 'kubernetes.client.Configuration', 'kubernetes.client.Configuration', ([], {}), '()\n', (3188, 3190), False, 'import kubernetes\n'), ((3369, 3412), 'kubernetes.client.ApiClient', 'kubernetes.client.ApiClient', (['aConfiguration'], {}), '(aConfiguration)\n', (3396, 3412), False, 'import kubernetes\n'), ((3422, 3461), 'kubernetes.client.CoreV1Api', 'kubernetes.client.CoreV1Api', (['aApiClient'], {}), '(aApiClient)\n', (3449, 3461), False, 'import kubernetes\n'), ((3511, 3615), 'influxdb.InfluxDBClient', 'influxdb.InfluxDBClient', ([], {'host': 'host', 'port': '(8086)', 'username': '"""admin"""', 'password': '"""<PASSWORD>"""', 'database': 'db'}), "(host=host, port=8086, username='admin', password=\n '<PASSWORD>', database=db)\n", (3534, 3615), False, 'import influxdb\n'), ((12157, 12182), 'scipy.optimize.nnls', 'opt.nnls', (['data_in', 'step_x'], {}), '(data_in, step_x)\n', (12165, 12182), True, 'import scipy.optimize as opt\n'), ((13196, 13229), 'kubernetes.client.Configuration', 'kubernetes.client.Configuration', ([], {}), '()\n', (13227, 13229), False, 'import kubernetes\n'), ((13408, 13451), 'kubernetes.client.ApiClient', 'kubernetes.client.ApiClient', (['aConfiguration'], {}), '(aConfiguration)\n', (13435, 13451), False, 'import kubernetes\n'), ((13461, 13500), 'kubernetes.client.CoreV1Api', 'kubernetes.client.CoreV1Api', (['aApiClient'], {}), '(aApiClient)\n', (13488, 13500), False, 'import kubernetes\n'), ((13550, 13654), 'influxdb.InfluxDBClient', 'influxdb.InfluxDBClient', ([], {'host': 'host', 'port': '(8086)', 'username': '"""admin"""', 'password': '"""<PASSWORD>"""', 'database': 'db'}), "(host=host, port=8086, username='admin', password=\n '<PASSWORD>', database=db)\n", (13573, 13654), False, 'import influxdb\n'), ((22229, 22251), 'numpy.asarray', 'np.asarray', (['loss_array'], {}), '(loss_array)\n', (22239, 22251), True, 'import numpy as np\n'), ((22426, 22467), 'numpy.array', 'np.array', (['[(1 / i) for i in step_arrange]'], {}), '([(1 / i) for i in step_arrange])\n', (22434, 22467), True, 'import numpy as np\n'), ((22504, 22547), 'numpy.array', 'np.array', (['[[i / max_loss, 1] for i in data]'], {}), '([[i / max_loss, 1] for i in data])\n', (22512, 22547), True, 'import numpy as np\n'), ((23597, 23609), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23607, 23609), False, 'from keras.models import Sequential, Model\n'), ((23971, 23988), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (23986, 23988), False, 'from keras import optimizers\n'), ((24135, 24211), 'keras.layers.Input', 'Input', ([], {'shape': '(time_step, input_dim)', 'dtype': '"""float32"""', 'name': '"""First_Time_Step"""'}), "(shape=(time_step, input_dim), dtype='float32', name='First_Time_Step')\n", (24140, 24211), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24223, 24308), 'keras.layers.Input', 'Input', ([], {'shape': '(time_step, input_dim)', 'dtype': '"""float32"""', 'name': '"""Pre_First_Time_Step"""'}), "(shape=(time_step, input_dim), dtype='float32', name='Pre_First_Time_Step'\n )\n", (24228, 24308), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24560, 24595), 'keras.layers.concatenate', 'concatenate', (['[lstm2, lstm1]'], {'axis': '(1)'}), '([lstm2, lstm1], axis=1)\n', (24571, 24595), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24787, 24833), 'keras.models.Model', 'Model', ([], {'input': '[input_1, input_2]', 'output': 'output'}), '(input=[input_1, input_2], output=output)\n', (24792, 24833), False, 'from keras.models import Sequential, Model\n'), ((24868, 24885), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (24883, 24885), False, 'from keras import optimizers\n'), ((25060, 25093), 'keras.models.load_model', 'keras.models.load_model', (['filepath'], {}), '(filepath)\n', (25083, 25093), False, 'import keras\n'), ((25152, 25203), 'numpy.reshape', 'np.reshape', (['data', '[data.shape[0], data.shape[1], 1]'], {}), '(data, [data.shape[0], data.shape[1], 1])\n', (25162, 25203), True, 'import numpy as np\n'), ((25267, 25299), 'math.ceil', 'math.ceil', (['(data.shape[0] * split)'], {}), '(data.shape[0] * split)\n', (25276, 25299), False, 'import math\n'), ((25666, 25673), 'utils.Timer', 'Timer', ([], {}), '()\n', (25671, 25673), False, 'from utils import Timer\n'), ((26243, 26286), 'os.path.join', 'os.path.join', (['save_dir', "('%s.h5' % measure_s)"], {}), "(save_dir, '%s.h5' % measure_s)\n", (26255, 26286), False, 'import os\n'), ((26303, 26335), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['scheduler'], {}), '(scheduler)\n', (26324, 26335), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((27741, 27748), 'utils.Timer', 'Timer', ([], {}), '()\n', (27746, 27748), False, 'from utils import Timer\n'), ((28240, 28283), 'os.path.join', 'os.path.join', (['save_dir', "('%s.h5' % measure_s)"], {}), "(save_dir, '%s.h5' % measure_s)\n", (28252, 28283), False, 'import os\n'), ((28300, 28332), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['scheduler'], {}), '(scheduler)\n', (28321, 28332), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((29575, 29618), 'numpy.reshape', 'np.reshape', (['data', '(1, time_step, input_dim)'], {}), '(data, (1, time_step, input_dim))\n', (29585, 29618), True, 'import numpy as np\n'), ((29718, 29758), 'numpy.reshape', 'np.reshape', (['predict_y', '(predict_step, 1)'], {}), '(predict_y, (predict_step, 1))\n', (29728, 29758), True, 'import numpy as np\n'), ((29863, 29907), 'numpy.reshape', 'np.reshape', (['data1', '(1, time_step, input_dim)'], {}), '(data1, (1, time_step, input_dim))\n', (29873, 29907), True, 'import numpy as np\n'), ((29917, 29961), 'numpy.reshape', 'np.reshape', (['data2', '(1, time_step, input_dim)'], {}), '(data2, (1, time_step, input_dim))\n', (29927, 29961), True, 'import numpy as np\n'), ((30070, 30110), 'numpy.reshape', 'np.reshape', (['predict_y', '(predict_step, 1)'], {}), '(predict_y, (predict_step, 1))\n', (30080, 30110), True, 'import numpy as np\n'), ((32087, 32134), 'numpy.reshape', 'np.reshape', (['data_fit', '(1, time_step, input_dim)'], {}), '(data_fit, (1, time_step, input_dim))\n', (32097, 32134), True, 'import numpy as np\n'), ((32288, 32311), 'numpy.squeeze', 'np.squeeze', (['predict_res'], {}), '(predict_res)\n', (32298, 32311), True, 'import numpy as np\n'), ((34550, 34599), 'numpy.reshape', 'np.reshape', (['data_fit_1', '(1, time_step, input_dim)'], {}), '(data_fit_1, (1, time_step, input_dim))\n', (34560, 34599), True, 'import numpy as np\n'), ((34614, 34663), 'numpy.reshape', 'np.reshape', (['data_fit_2', '(1, time_step, input_dim)'], {}), '(data_fit_2, (1, time_step, input_dim))\n', (34624, 34663), True, 'import numpy as np\n'), ((34832, 34855), 'numpy.squeeze', 'np.squeeze', (['predict_res'], {}), '(predict_res)\n', (34842, 34855), True, 'import numpy as np\n'), ((36821, 36877), 'json.dumps', 'json.dumps', (['config_content'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(config_content, ensure_ascii=False, indent=4)\n', (36831, 36877), False, 'import json\n'), ((37050, 37094), 'os.path.join', 'os.path.join', (['"""/tfdata/k8snfs/setad2/"""', 'name'], {}), "('/tfdata/k8snfs/setad2/', name)\n", (37062, 37094), False, 'import os\n'), ((37444, 37477), 'kubernetes.client.Configuration', 'kubernetes.client.Configuration', ([], {}), '()\n', (37475, 37477), False, 'import kubernetes\n'), ((37656, 37699), 'kubernetes.client.ApiClient', 'kubernetes.client.ApiClient', (['aConfiguration'], {}), '(aConfiguration)\n', (37683, 37699), False, 'import kubernetes\n'), ((37709, 37748), 'kubernetes.client.CoreV1Api', 'kubernetes.client.CoreV1Api', (['aApiClient'], {}), '(aApiClient)\n', (37736, 37748), False, 'import kubernetes\n'), ((38070, 38192), 'influxdb.InfluxDBClient', 'influxdb.InfluxDBClient', ([], {'host': '"""192.168.128.10"""', 'port': '(8086)', 'username': '"""admin"""', 'password': '"""<PASSWORD>"""', 'database': '"""PREDICT"""'}), "(host='192.168.128.10', port=8086, username='admin',\n password='<PASSWORD>', database='PREDICT')\n", (38093, 38192), False, 'import influxdb\n'), ((45055, 45083), 'math.ceil', 'math.ceil', (['(trains_step * 0.2)'], {}), '(trains_step * 0.2)\n', (45064, 45083), False, 'import math\n'), ((45095, 45121), 'math.ceil', 'math.ceil', (['(min_steps * 0.6)'], {}), '(min_steps * 0.6)\n', (45104, 45121), False, 'import math\n'), ((45202, 45213), 'time.time', 'time.time', ([], {}), '()\n', (45211, 45213), False, 'import time\n'), ((65056, 65067), 'time.time', 'time.time', ([], {}), '()\n', (65065, 65067), False, 'import time\n'), ((66129, 66162), 'kubernetes.client.Configuration', 'kubernetes.client.Configuration', ([], {}), '()\n', (66160, 66162), False, 'import kubernetes\n'), ((66341, 66384), 'kubernetes.client.ApiClient', 'kubernetes.client.ApiClient', (['aConfiguration'], {}), '(aConfiguration)\n', (66368, 66384), False, 'import kubernetes\n'), ((66394, 66433), 'kubernetes.client.CoreV1Api', 'kubernetes.client.CoreV1Api', (['aApiClient'], {}), '(aApiClient)\n', (66421, 66433), False, 'import kubernetes\n'), ((66609, 66731), 'influxdb.InfluxDBClient', 'influxdb.InfluxDBClient', ([], {'host': '"""192.168.128.10"""', 'port': '(8086)', 'username': '"""admin"""', 'password': '"""<PASSWORD>"""', 'database': '"""PREDICT"""'}), "(host='192.168.128.10', port=8086, username='admin',\n password='<PASSWORD>', database='PREDICT')\n", (66632, 66731), False, 'import influxdb\n'), ((73440, 73468), 'math.ceil', 'math.ceil', (['(trains_step * 0.2)'], {}), '(trains_step * 0.2)\n', (73449, 73468), False, 'import math\n'), ((73482, 73508), 'math.ceil', 'math.ceil', (['(min_steps * 0.4)'], {}), '(min_steps * 0.4)\n', (73491, 73508), False, 'import math\n'), ((73595, 73606), 'time.time', 'time.time', ([], {}), '()\n', (73604, 73606), False, 'import time\n'), ((89699, 89710), 'time.time', 'time.time', ([], {}), '()\n', (89708, 89710), False, 'import time\n'), ((89886, 89911), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (89909, 89911), False, 'import multiprocessing\n'), ((90010, 90044), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(45)'}), '(processes=45)\n', (90030, 90044), False, 'import multiprocessing\n'), ((90205, 90254), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (90218, 90254), False, 'import socket\n'), ((11287, 11307), 'numpy.mean', 'np.mean', (['tmp_loss[i]'], {}), '(tmp_loss[i])\n', (11294, 11307), True, 'import numpy as np\n'), ((21400, 21420), 'numpy.mean', 'np.mean', (['tmp_loss[i]'], {}), '(tmp_loss[i])\n', (21407, 21420), True, 'import numpy as np\n'), ((23624, 23697), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(16)', 'input_shape': '(time_step, input_dim)', 'return_sequences': '(True)'}), '(units=16, input_shape=(time_step, input_dim), return_sequences=True)\n', (23628, 23697), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((23710, 23722), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (23717, 23722), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((23738, 23775), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(64)', 'return_sequences': '(True)'}), '(units=64, return_sequences=True)\n', (23742, 23775), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((23790, 23829), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(128)', 'return_sequences': '(False)'}), '(units=128, return_sequences=False)\n', (23794, 23829), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((23844, 23856), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (23851, 23856), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((23872, 23897), 'keras.layers.Dense', 'Dense', ([], {'units': 'predict_step'}), '(units=predict_step)\n', (23877, 23897), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((23913, 23933), 'keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (23923, 23933), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24313, 24386), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(16)', 'input_shape': '(time_step, input_dim)', 'return_sequences': '(True)'}), '(units=16, input_shape=(time_step, input_dim), return_sequences=True)\n', (24317, 24386), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24405, 24417), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (24412, 24417), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24437, 24510), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(16)', 'input_shape': '(time_step, input_dim)', 'return_sequences': '(True)'}), '(units=16, input_shape=(time_step, input_dim), return_sequences=True)\n', (24441, 24510), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24529, 24541), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (24536, 24541), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24603, 24640), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(64)', 'return_sequences': '(True)'}), '(units=64, return_sequences=True)\n', (24607, 24640), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24655, 24694), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(128)', 'return_sequences': '(False)'}), '(units=128, return_sequences=False)\n', (24659, 24694), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24707, 24732), 'keras.layers.Dense', 'Dense', ([], {'units': 'predict_step'}), '(units=predict_step)\n', (24712, 24732), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((24750, 24770), 'keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (24760, 24770), False, 'from keras.layers import LSTM, Dense, Activation, Dropout, Input, concatenate\n'), ((25598, 25622), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (25612, 25622), False, 'import os\n'), ((25632, 25653), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (25643, 25653), False, 'import os\n'), ((26103, 26146), 'keras.backend.get_value', 'keras.backend.get_value', (['model.optimizer.lr'], {}), '(model.optimizer.lr)\n', (26126, 26146), False, 'import keras\n'), ((26362, 26408), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(10)'}), "(monitor='val_loss', patience=10)\n", (26375, 26408), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((26418, 26495), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'save_fname', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath=save_fname, monitor='val_loss', save_best_only=True)\n", (26433, 26495), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((26505, 26632), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""auto"""', 'epsilon': '(0.001)', 'cooldown': '(0)', 'min_lr': '(0)'}), "(monitor='val_loss', factor=0.1, patience=10, verbose=0,\n mode='auto', epsilon=0.001, cooldown=0, min_lr=0)\n", (26522, 26632), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((27673, 27697), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (27687, 27697), False, 'import os\n'), ((27707, 27728), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (27718, 27728), False, 'import os\n'), ((28178, 28221), 'keras.backend.get_value', 'keras.backend.get_value', (['model.optimizer.lr'], {}), '(model.optimizer.lr)\n', (28201, 28221), False, 'import keras\n'), ((28359, 28405), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(10)'}), "(monitor='val_loss', patience=10)\n", (28372, 28405), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((28415, 28492), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'save_fname', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath=save_fname, monitor='val_loss', save_best_only=True)\n", (28430, 28492), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((28502, 28629), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""auto"""', 'epsilon': '(0.001)', 'cooldown': '(0)', 'min_lr': '(0)'}), "(monitor='val_loss', factor=0.1, patience=10, verbose=0,\n mode='auto', epsilon=0.001, cooldown=0, min_lr=0)\n", (28519, 28629), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\n'), ((30428, 30461), 'numpy.reshape', 'np.reshape', (['pone', '(predict_step,)'], {}), '(pone, (predict_step,))\n', (30438, 30461), True, 'import numpy as np\n'), ((30950, 30983), 'numpy.reshape', 'np.reshape', (['pone', '(predict_step,)'], {}), '(pone, (predict_step,))\n', (30960, 30983), True, 'import numpy as np\n'), ((32837, 32859), 'numpy.mean', 'np.mean', (['data_div_base'], {}), '(data_div_base)\n', (32844, 32859), True, 'import numpy as np\n'), ((33091, 33138), 'numpy.reshape', 'np.reshape', (['data_fit', '(1, time_step, input_dim)'], {}), '(data_fit, (1, time_step, input_dim))\n', (33101, 33138), True, 'import numpy as np\n'), ((33311, 33334), 'numpy.squeeze', 'np.squeeze', (['predict_res'], {}), '(predict_res)\n', (33321, 33334), True, 'import numpy as np\n'), ((35381, 35403), 'numpy.mean', 'np.mean', (['data_div_base'], {}), '(data_div_base)\n', (35388, 35403), True, 'import numpy as np\n'), ((35724, 35773), 'numpy.reshape', 'np.reshape', (['data_fit_1', '(1, time_step, input_dim)'], {}), '(data_fit_1, (1, time_step, input_dim))\n', (35734, 35773), True, 'import numpy as np\n'), ((35795, 35844), 'numpy.reshape', 'np.reshape', (['data_fit_2', '(1, time_step, input_dim)'], {}), '(data_fit_2, (1, time_step, input_dim))\n', (35805, 35844), True, 'import numpy as np\n'), ((36033, 36056), 'numpy.squeeze', 'np.squeeze', (['predict_res'], {}), '(predict_res)\n', (36043, 36056), True, 'import numpy as np\n'), ((37147, 37172), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (37161, 37172), False, 'import os\n'), ((37182, 37204), 'os.makedirs', 'os.makedirs', (['train_dir'], {}), '(train_dir)\n', (37193, 37204), False, 'import os\n'), ((38549, 38593), 're.match', 're.match', (['legal_pattern', 'msg_from_client_str'], {}), '(legal_pattern, msg_from_client_str)\n', (38557, 38593), False, 'import re\n'), ((40901, 40927), 'os.path.exists', 'os.path.exists', (['allow_path'], {}), '(allow_path)\n', (40915, 40927), False, 'import os\n'), ((43738, 43786), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (43747, 43786), False, 'import math\n'), ((46611, 46657), 'os.path.exists', 'os.path.exists', (["('save_model/%s.h5' % measure_s)"], {}), "('save_model/%s.h5' % measure_s)\n", (46625, 46657), False, 'import os\n'), ((49272, 49295), 'numpy.mean', 'np.mean', (['time_mean_list'], {}), '(time_mean_list)\n', (49279, 49295), True, 'import numpy as np\n'), ((51697, 51728), 'math.ceil', 'math.ceil', (['(step_to_train * 0.85)'], {}), '(step_to_train * 0.85)\n', (51706, 51728), False, 'import math\n'), ((52061, 52076), 'time.sleep', 'time.sleep', (['(2.2)'], {}), '(2.2)\n', (52071, 52076), False, 'import time\n'), ((67141, 67185), 're.match', 're.match', (['legal_pattern', 'msg_from_client_str'], {}), '(legal_pattern, msg_from_client_str)\n', (67149, 67185), False, 'import re\n'), ((69351, 69377), 'os.path.exists', 'os.path.exists', (['allow_path'], {}), '(allow_path)\n', (69365, 69377), False, 'import os\n'), ((72118, 72166), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (72127, 72166), False, 'import math\n'), ((76567, 76590), 'numpy.mean', 'np.mean', (['time_mean_list'], {}), '(time_mean_list)\n', (76574, 76590), True, 'import numpy as np\n'), ((78545, 78576), 'math.ceil', 'math.ceil', (['(step_to_train * 0.85)'], {}), '(step_to_train * 0.85)\n', (78554, 78576), False, 'import math\n'), ((78907, 78922), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (78917, 78922), False, 'import time\n'), ((22363, 22382), 'numpy.arange', 'np.arange', (['step_len'], {}), '(step_len)\n', (22372, 22382), True, 'import numpy as np\n'), ((22907, 22922), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (22915, 22922), True, 'import numpy as np\n'), ((23376, 23399), 'numpy.array', 'np.array', (['train_twice_x'], {}), '(train_twice_x)\n', (23384, 23399), True, 'import numpy as np\n'), ((23434, 23457), 'numpy.array', 'np.array', (['train_twice_y'], {}), '(train_twice_y)\n', (23442, 23457), True, 'import numpy as np\n'), ((25923, 25966), 'keras.backend.get_value', 'keras.backend.get_value', (['model.optimizer.lr'], {}), '(model.optimizer.lr)\n', (25946, 25966), False, 'import keras\n'), ((25979, 26032), 'keras.backend.set_value', 'keras.backend.set_value', (['model.optimizer.lr', '(lr * 0.1)'], {}), '(model.optimizer.lr, lr * 0.1)\n', (26002, 26032), False, 'import keras\n'), ((27998, 28041), 'keras.backend.get_value', 'keras.backend.get_value', (['model.optimizer.lr'], {}), '(model.optimizer.lr)\n', (28021, 28041), False, 'import keras\n'), ((28054, 28107), 'keras.backend.set_value', 'keras.backend.set_value', (['model.optimizer.lr', '(lr * 0.1)'], {}), '(model.optimizer.lr, lr * 0.1)\n', (28077, 28107), False, 'import keras\n'), ((29668, 29687), 'numpy.array', 'np.array', (['predict_y'], {}), '(predict_y)\n', (29676, 29687), True, 'import numpy as np\n'), ((30020, 30039), 'numpy.array', 'np.array', (['predict_y'], {}), '(predict_y)\n', (30028, 30039), True, 'import numpy as np\n'), ((30528, 30545), 'numpy.array', 'np.array', (['predict'], {}), '(predict)\n', (30536, 30545), True, 'import numpy as np\n'), ((31050, 31067), 'numpy.array', 'np.array', (['predict'], {}), '(predict)\n', (31058, 31067), True, 'import numpy as np\n'), ((31218, 31231), 'math.sqrt', 'math.sqrt', (['xx'], {}), '(xx)\n', (31227, 31231), False, 'import math\n'), ((31791, 31805), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (31799, 31805), True, 'import numpy as np\n'), ((32026, 32057), 'numpy.array', 'np.array', (['data_fit[-time_step:]'], {}), '(data_fit[-time_step:])\n', (32034, 32057), True, 'import numpy as np\n'), ((34173, 34187), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (34181, 34187), True, 'import numpy as np\n'), ((34410, 34441), 'numpy.array', 'np.array', (['data_fit[-time_step:]'], {}), '(data_fit[-time_step:])\n', (34418, 34441), True, 'import numpy as np\n'), ((34473, 34522), 'numpy.array', 'np.array', (['data_fit[-1 * 2 * time_step:-time_step]'], {}), '(data_fit[-1 * 2 * time_step:-time_step])\n', (34481, 34522), True, 'import numpy as np\n'), ((41756, 41771), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (41766, 41771), False, 'import time\n'), ((50916, 50947), 'math.ceil', 'math.ceil', (['(step_to_train * 0.85)'], {}), '(step_to_train * 0.85)\n', (50925, 50947), False, 'import math\n'), ((51550, 51565), 'time.sleep', 'time.sleep', (['(1.2)'], {}), '(1.2)\n', (51560, 51565), False, 'import time\n'), ((51995, 52010), 'time.sleep', 'time.sleep', (['(1.2)'], {}), '(1.2)\n', (52005, 52010), False, 'import time\n'), ((62051, 62074), 'numpy.mean', 'np.mean', (['time_mean_list'], {}), '(time_mean_list)\n', (62058, 62074), True, 'import numpy as np\n'), ((70143, 70157), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (70153, 70157), False, 'import time\n'), ((74552, 74580), 'math.ceil', 'math.ceil', (['(trains_step * 0.5)'], {}), '(trains_step * 0.5)\n', (74561, 74580), False, 'import math\n'), ((77194, 77208), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (77204, 77208), False, 'import time\n'), ((77907, 77938), 'math.ceil', 'math.ceil', (['(step_to_train * 0.85)'], {}), '(step_to_train * 0.85)\n', (77916, 77938), False, 'import math\n'), ((78402, 78415), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (78412, 78415), False, 'import time\n'), ((78464, 78495), 'math.ceil', 'math.ceil', (['(step_to_train * 0.85)'], {}), '(step_to_train * 0.85)\n', (78473, 78495), False, 'import math\n'), ((78843, 78856), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (78853, 78856), False, 'import time\n'), ((86625, 86648), 'numpy.mean', 'np.mean', (['time_mean_list'], {}), '(time_mean_list)\n', (86632, 86648), True, 'import numpy as np\n'), ((30384, 30398), 'numpy.array', 'np.array', (['pone'], {}), '(pone)\n', (30392, 30398), True, 'import numpy as np\n'), ((30906, 30920), 'numpy.array', 'np.array', (['pone'], {}), '(pone)\n', (30914, 30920), True, 'import numpy as np\n'), ((33026, 33057), 'numpy.array', 'np.array', (['data_fit[-time_step:]'], {}), '(data_fit[-time_step:])\n', (33034, 33057), True, 'import numpy as np\n'), ((35572, 35603), 'numpy.array', 'np.array', (['data_fit[-time_step:]'], {}), '(data_fit[-time_step:])\n', (35580, 35603), True, 'import numpy as np\n'), ((35639, 35688), 'numpy.array', 'np.array', (['data_fit[-1 * 2 * time_step:-time_step]'], {}), '(data_fit[-1 * 2 * time_step:-time_step])\n', (35647, 35688), True, 'import numpy as np\n'), ((47202, 47230), 'math.ceil', 'math.ceil', (['(trains_step * 0.5)'], {}), '(trains_step * 0.5)\n', (47211, 47230), False, 'import math\n'), ((49985, 49998), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (49995, 49998), False, 'import time\n'), ((51405, 51420), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (51415, 51420), False, 'import time\n'), ((57423, 57471), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (57432, 57471), False, 'import math\n'), ((63592, 63640), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (63601, 63640), False, 'import math\n'), ((79870, 79898), 'math.ceil', 'math.ceil', (['(trains_step * 0.5)'], {}), '(trains_step * 0.5)\n', (79879, 79898), False, 'import math\n'), ((83316, 83364), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (83325, 83364), False, 'import math\n'), ((85606, 85619), 'time.sleep', 'time.sleep', (['(9)'], {}), '(9)\n', (85616, 85619), False, 'import time\n'), ((88030, 88078), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (88039, 88078), False, 'import math\n'), ((4666, 4680), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4676, 4680), False, 'import time\n'), ((7314, 7343), 'math.ceil', 'math.ceil', (['(trains_step * 0.85)'], {}), '(trains_step * 0.85)\n', (7323, 7343), False, 'import math\n'), ((8824, 8872), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (8833, 8872), False, 'import math\n'), ((12695, 12711), 'numpy.array', 'np.array', (['fed_in'], {}), '(fed_in)\n', (12703, 12711), True, 'import numpy as np\n'), ((14739, 14753), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (14749, 14753), False, 'import time\n'), ((17412, 17441), 'math.ceil', 'math.ceil', (['(trains_step * 0.85)'], {}), '(trains_step * 0.85)\n', (17421, 17441), False, 'import math\n'), ((19005, 19053), 'math.ceil', 'math.ceil', (['(trains_step * worker_now / new_worker)'], {}), '(trains_step * worker_now / new_worker)\n', (19014, 19053), False, 'import math\n'), ((48582, 48593), 'time.time', 'time.time', ([], {}), '()\n', (48591, 48593), False, 'import time\n'), ((54207, 54235), 'math.ceil', 'math.ceil', (['(trains_step * 0.5)'], {}), '(trains_step * 0.5)\n', (54216, 54235), False, 'import math\n'), ((59352, 59367), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (59362, 59367), False, 'import time\n'), ((59971, 59984), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (59981, 59984), False, 'import time\n'), ((75925, 75936), 'time.time', 'time.time', ([], {}), '()\n', (75934, 75936), False, 'import time\n'), ((55968, 55979), 'time.time', 'time.time', ([], {}), '()\n', (55977, 55979), False, 'import time\n'), ((81859, 81870), 'time.time', 'time.time', ([], {}), '()\n', (81868, 81870), False, 'import time\n')] |
#PyTrx (c) by <NAME>, <NAME>, <NAME>
#
#PyTrx is licensed under a MIT License.
#
#You should have received a copy of the license along with this
#work. If not, see <https://choosealicense.com/licenses/mit/>.
"""
The Area module handles the functionality for obtaining areal measurements from
oblique time-lapse imagery. Specifically, this module contains functions for:
(1) Performing automated and manual detection of areal extents in oblique
imagery; and (2) Determining real-world surface areas from oblique imagery.
"""
#Import packages
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import cv2
from PIL import Image
import ogr
import sys
#Import PyTrx functions and classes
from FileHandler import readMask
from Images import ImageSequence, enhanceImage
import Velocity
from CamEnv import projectUV, setProjection
#------------------------------------------------------------------------------
class Area(ImageSequence):
"""A class for processing change in area (i.e. a lake or plume) through an
image sequence, with methods to calculate extent change in an image plane
(px) and real areal change via georectification.
:param imageList: List of images, for the :class:`PyTrx.Images.ImageSequence` object
:type imageList: str/list
:param cameraenv: Camera environment parameters which can be read into the :class:`PyTrx.CamEnv.CamEnv` object as a text file
:type cameraenv: str
:param hmatrix: Homography matrix
:type hmatrix: arr
:param calibFlag: An indicator of whether images are calibrated, for the :class:`PyTrx.Images.ImageSequence` object
:type calibFlag: bool
:param band: String denoting the desired image band, default to 'L' (grayscale)
:type band: str, optional
:param equal: Flag denoting whether histogram equalisation is applied to images (histogram equalisation is applied if True). Default to True.
:type equal: bool, optional
"""
#Initialisation of Area class object
def __init__(self, imageList, cameraenv, hmatrix, calibFlag=True,
band='L', equal=True):
#Initialise and inherit from the ImageSequence object
ImageSequence.__init__(self, imageList, band, equal)
#Set up class properties
self._camEnv = cameraenv
self._calibFlag = calibFlag
self._pxplot = None
self._maximg = 0
self._mask = None
self._enhance = None
if hmatrix is not None:
self._hmatrix=hmatrix
hmat0=None
self._hmatrix.insert(0, hmat0)
else:
self._hmatrix=None
def calcAutoAreas(self, colour=False, verify=False):
"""Detects areas of interest from a sequence of images, and returns
pixel and xyz areas.
:param colour: Flag to denote whether colour range for detection should be defined for each image or only once, default to False
:type colour: bool, optional
:param verify: Flag to denote whether detected polygons should be manually verified by user, default to False
:type verify: bool, optional
:returns: XYZ and UV area information
:rtype: list
"""
print('\n\nCOMMENCING AUTOMATED AREA DETECTION')
#Get DEM from camera environment
dem = self._camEnv.getDEM()
#Get inverse projection variables through camera info
invprojvars = setProjection(dem, self._camEnv._camloc,
self._camEnv._camDirection,
self._camEnv._radCorr,
self._camEnv._tanCorr,
self._camEnv._focLen,
self._camEnv._camCen,
self._camEnv._refImage)
#If user is only defining the color range once
if colour is False:
#Define colour range if none is given
if self._colourrange is None:
#Get image (either corrected or distorted)
if self._calibFlag is True:
cameraMatrix=self._camEnv.getCamMatrixCV2()
distortP=self._camEnv.getDistortCoeffsCV2()
setting=self._imageSet[self._maximg].getImageCorr(cameraMatrix,
distortP)
else:
setting = self._imageSet[self._maximg].getImageArray()
#Get image name
setimn=self._imageSet[self._maximg].getImageName()
#Get mask and mask image if present
if self._mask is not None:
booleanMask = np.array(self._mask, dtype=bool)
booleanMask = np.invert(booleanMask)
#Mask extent image with boolean array
np.where(booleanMask, 0, setting) #Fit arrays to each other
setting[booleanMask] = 0 #Mask image with boolean mask object
#Enhance image if enhancement parameters given
if self._enhance is not None:
setting = enhanceImage(setting, self._enhance[0],
self._enhance[1], self._enhance[2])
#Define colour range
defineColourrange(setting, setimn, pxplot=self._pxplot)
#Set up output datasets
area=[]
#Cycle through image sequence (numbered from 0)
for i in range(self.getLength()):
#Get corrected/distorted image
if self._calibFlag is True:
cameraMatrix=self._camEnv.getCamMatrixCV2()
distortP=self._camEnv.getDistortCoeffsCV2()
img1 = self._imageSet[i].getImageCorr(cameraMatrix,
distortP)
else:
img1=self._imageSet[i].getImageArray()
#Get image name
imn=self._imageSet[i].getImageName()
#Make a copy of the image array
img2 = np.copy(img1)
#Mask image if mask is present
if self._mask is not None:
booleanMask = np.array(self._mask, dtype=bool)
booleanMask = np.invert(booleanMask)
#Mask extent image with boolean array
np.where(booleanMask, 0, img2) #Fit arrays to each other
img2[booleanMask] = 0 #Mask image with boolean mask object
#Enhance image if enhancement parameters are present
if self._enhance is not None:
img2 = enhanceImage(img2, self._enhance[0], self._enhance[1],
self._enhance[2])
#Define colour range if required
if colour is True:
defineColourrange(img2, imn, pxplot=self._pxplot)
#Calculate extent
if self._hmatrix is not None:
out = calcAutoArea(img2, imn, self._colourrange,
self._hmatrix[i], self._threshold,
invprojvars)
else:
out = calcAutoArea(img2, imn, self._colourrange, None,
self._threshold, invprojvars)
area.append(out)
#Clear memory
self._imageSet[i].clearImage()
self._imageSet[i].clearImageArray()
#Verify areas if flag is true
if verify is True:
area = self.verifyAreas(area, invprojvars)
#Return all xy coordinates and pixel extents
return area
def calcManualAreas(self):
"""Manually define areas of interest in a sequence of images. User
input is facilitated through an interactive plot to click around the
area of interest
:returns: XYZ and UV area information
:rtype: list
"""
'\n\nCOMMENCING MANUAL AREA DETECTION'
#Set up output dataset
area=[]
#Get DEM from camera environment
dem = self._camEnv.getDEM()
#Get inverse projection variables through camera info
invprojvars = setProjection(dem, self._camEnv._camloc,
self._camEnv._camDirection,
self._camEnv._radCorr,
self._camEnv._tanCorr,
self._camEnv._focLen,
self._camEnv._camCen,
self._camEnv._refImage)
#Cycle through images
for i in (range(self.getLength())):
#Call corrected/uncorrected image
if self._calibFlag is True:
img=self._imageSet[i].getImageCorr(self._camEnv.getCamMatrixCV2(),
self._camEnv.getDistortCoeffsCV2())
else:
img=self._imageSet[i].getImageArray()
#Get image name
imn=self._imageSet[i].getImageName()
#Manually define extent and append
if self._hmatrix is not None:
polys = calcManualArea(img, imn, self._hmatrix[i],
self._pxplot, invprojvars)
else:
polys = calcManualArea(img, imn, None, self._pxplot,
invprojvars)
area.append(polys)
#Clear memory
self._imageSet[i].clearImage()
self._imageSet[i].clearImageArray()
#Return all extents, all cropped images and corresponding image names
return area
def verifyAreas(self, areas, invprojvars):
"""Method to manually verify all polygons in images. Plots sequential
images with detected polygons and the user manually verifies them by
clicking them.
:param area: XYZ and UV area information
:type area: list
:param invprojvars: Inverse projection variables [X,Y,Z,uv0]
:type invprojvars: list
:param verified: Verified XYZ and UV area information
:type verified: list
"""
#Create output
verified = []
#Get UV point coordinates
uvpts=[item[1][1] for item in areas]
#Verify pixel polygons in each image
for i in range(len(uvpts)):
#Call corrected/uncorrected image
if self._calibFlag is True:
img1=self._imageSet[i].getImageCorr(self._camEnv.getCamMatrixCV2(),
self._camEnv.getDistortCoeffsCV2())
else:
img1=self._imageSet[i].getImageArray()
#Get image name
imn=self._imageSet[i].getImageName()
#Verify polygons
img2 = np.copy(img1)
if 1:
print('\nVerifying detected areas from ' + str(imn))
#Set up empty output list
verf = []
#Function for click verification within a plot
def onpick(event):
#Get XY coordinates for clicked point in a plot
v = []
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
#Append XY coordinates
for x,y in zip(xdata,ydata):
v.append([x,y])
v2=np.array(v, dtype=np.int32).reshape((len(xdata)),2)
verf.append(v2)
#Verify extent if XY coordinates coincide with a
#detected area
ind=event.ind
print ('Verified extent at ' +
str(np.take(xdata, ind)[0]) + ', ' +
str(np.take(ydata, ind)[0]))
#Plot image
fig, ax1 = plt.subplots()
fig.canvas.set_window_title(imn + ': Click on valid areas.')
ax1.imshow(img2, cmap='gray')
#Chane plot extent if pxplot variable is present
if self._pxplot is not None:
ax1.axis([self._pxplot[0],self._pxplot[1],
self._pxplot[2],self._pxplot[3]])
#Plot all detected areas
for a in uvpts[i]:
x=[]
y=[]
for b in a:
for c in b:
x.append(c[0])
y.append(c[1])
line = Line2D(x, y, linestyle='-', color='y', picker=True)
ax1.add_line(line)
#Verify extents using onpick function
fig.canvas.mpl_connect('pick_event', onpick)
#Show plot
plt.show()
plt.close()
#Append all verified extents
vpx=[]
vpx=verf
#Get areas of verified extents
h = img2.shape[0]
w = img2.shape[1]
px_im = Image.new('L', (w,h), 'black')
px_im = np.array(px_im)
cv2.drawContours(px_im, vpx, -1, (255,255,255), 4)
for p in vpx:
cv2.fillConvexPoly(px_im, p, color=(255,255,255))
output = Image.fromarray(px_im)
pixels = output.getdata()
values = []
for px in pixels:
if px > 0:
values.append(px)
pxext = len(values)
print('Total verified extent: ' + str(pxext))
#Get xyz coordinates with inverse projection
if invprojvars is not None:
vxyzpts=[]
vxyzarea=[]
for i in vpx:
#Inverse project points
proj=projectUV(i, invprojvars)
vxyzpts.append(proj)
ogrpol = getOGRArea(proj)
vxyzarea.append(ogrpol.GetArea())
print('Total verified area: ' + str(sum(vxyzarea)) + ' m')
verified.append([[pxext, vpx],[vxyzarea, vxyzpts]])
#Clear memory
self._imageSet[i].clearImage()
self._imageSet[i].clearImageArray()
#Rewrite verified area data
return verified
def setMax(self, maxMaskPath, maxim):
"""Set image in sequence which pictures the maximum extent of the area
of interest.
:param maxMaskPath: File path to mask with maximum extent
:type maxMaskPath: str
:param maxim: Image with maximum extent
:type maxim: arr
"""
#Calibrate image if calibration flag is true
if self._calibFlag is True:
cameraMatrix=self._camEnv.getCamMatrixCV2()
distortP=self._camEnv.getDistortCoeffsCV2()
maxi = self._imageSet[maxim].getImageCorr(cameraMatrix,
distortP)
else:
maxi = self._imageSet[maxim].getImageArray()
#Define mask on image with maximum areal extent
self._mask = readMask(maxi, maxMaskPath)
#Retain image sequence number for image with maximum extent
self._maximg = maxim
def setPXExt(self,xmin,xmax,ymin,ymax):
"""Set plotting extent. Setting the plot extent will make it easier to
define colour ranges and verify areas.
:param xmin: X-axis minimum value.
:type xmin: int
:param xmax: X-axis maximum value.
:type xmax: int
:param ymin: Y-axis minimum value.
:type ymin: int
:param ymax: Y-axis maximum value.
:type ymax: int
"""
self._pxplot = [xmin,xmax,ymin,ymax]
def setThreshold(self, number):
"""Set threshold for number of polgons kept from an image.
:param number: Number denoting the number of detected polygons that will be retained
:type number: int
"""
self._threshold = number
def setColourrange(self, upper, lower):
"""Manually define the RBG colour range that will be used to filter
the image/images.
:param upper: Upper value of colour range
:type upper: int
:param lower: Lower value of colour range
:type lower: int
"""
print('\nColour range defined from given values:')
print('Upper RBG boundary: ', upper)
print('Lower RBG boundary: ', lower)
#Assign colour range
self._colourrange = [upper, lower]
def setEnhance(self, diff, phi, theta):
"""Set image enhancement parameters. Change brightness and contrast of
image using phi and theta variables. Change phi and theta values
accordingly. See enhanceImg function for detailed explanation of the
parameters.
:param diff: Inputted as either 'light or 'dark', signifying the intensity of the image pixels. 'light' increases the intensity such that dark pixels become much brighter and bright pixels become slightly brighter. 'dark' decreases the intensity such that dark pixels become much darker and bright pixels become slightly darker.
:type diff: str
:param phi: Defines the intensity of all pixel values
:type phi: int
:param theta: Defines the number of "colours" in the image, e.g. 3 signifies that all the pixels will be grouped into one of three pixel values
:type theta: int .
"""
self._enhance = diff, phi, theta
#------------------------------------------------------------------------------
def calcAutoArea(img, imn, colourrange, hmatrix=None, threshold=None,
invprojvars=None):
"""Detects areas of interest from a given image, and returns pixel and xyz
areas along with polygon coordinates. Detection is performed from the image
using a predefined RBG colour range. The colour range is then used to
extract pixels within that range using the OpenCV function inRange. If a
threshold has been set (using the setThreshold function) then only nth
polygons will be retained. XYZ areas and polygon coordinates are only
calculated when a set of inverse projection variables are provided.
:param img: Image array
:type img: arr
:param imn: Image name
:type imn: str
:param colourrange: RBG colour range for areas to be detected from
:type colourrange: list
:param hmatrix: Homography matrix, default to None
:type hmatrix: arr
:param threshold: Threshold number of detected areas to retain, default to None
:type threshold: int, optional
:param invprojvars: Inverse projection variables [X,Y,Z,uv0], default to None
:type invprojvars: list, optional
:returns: Four list items containing 1) the sum of total detected areas (xyz), 2) XYZ coordinates of detected areas, 3) Sum of total detected areas (px), and 4) UV coordinates of detected areas
:rtype: list
"""
#Get upper and lower RBG boundaries from colour range
upper_boundary = colourrange[0]
lower_boundary = colourrange[1]
#Transform RBG range to array
upper_boundary = np.array(upper_boundary, dtype='uint8')
lower_boundary = np.array(lower_boundary, dtype='uint8')
#Extract extent based on RBG range
mask = cv2.inRange(img, lower_boundary, upper_boundary)
# #Speckle filter to remove noise - needs fixing
# mask = cv2.filterSpeckles(mask, 1, 30, 2)
#Polygonize extents using OpenCV findContours function
line, hier = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
print('\nDetected ' + str(len(line)) + ' regions in ' + str(imn))
#Append all polygons from the polys list that have more than
#a given number of points
rawpx = []
for c in line:
if len(c) >= 40:
rawpx.append(c)
#If threshold has been set, only keep the nth longest polygons
if threshold is not None:
if len(rawpx) >= threshold:
rawpx.sort(key=len)
rawpx = rawpx[-(threshold):]
print('Kept ' + str(len(rawpx)) + ' regions')
#Calculate homography-corrected pts if desired
if hmatrix is not None:
print('Correcting for camera motion')
pxpts=[]
for i in rawpx:
corr = Velocity.apply_persp_homographyPts(i, hmatrix, inverse=True)
pxpts.append(corr)
else:
pxpts=rawpx
#Calculate areas
pxextent=[]
for p in range(len(pxpts)):
try:
#Create geometry
pxpoly=getOGRArea(pxpts[p].squeeze())
#Calculate area of polygon area
pxextent.append(pxpoly.Area())
#Create zero object if no polygon has been recorded
except:
pxextent = 0
print ('Total extent: ' + str(sum(pxextent)) + ' px (out of '
+ str(img.shape[0]*img.shape[1]) + ' px)')
#Get xyz coordinates with inverse projection
if invprojvars is not None:
xyzpts=[]
xyzarea=[]
for i in pxpts:
#Inverse project points
proj=projectUV(i, invprojvars)
xyzpts.append(proj)
#Get areas for xyz polygons
ogrpol = getOGRArea(proj)
xyzarea.append(ogrpol.GetArea())
print('Total area: ' + str(sum(xyzarea)) + ' m')
#Return XYZ and pixel areas
return [[xyzarea, xyzpts], [pxextent, pxpts]]
else:
#Return pixel areas only
return [[None, None], [pxextent, pxpts]]
def calcManualArea(img, imn, hmatrix=None, pxplot=None, invprojvars=None):
"""Manually define an area in a given image. User input is facilitated
through an interactive plot to click around the area of interest. XYZ areas
are calculated if a set of inverse projection variables are given.
:param img: Image array
:type img: arr
:param imn: Image name
:type imn: str
:param hmatrix: Homography matrix, default to None
:type hmatrix: arr
:param pxplot: Plotting extent for manual area definition, default to None
:type pxplot: list, optional
:param invprojvars: Inverse projection variables [X,Y,Z,uv0], default to None
:type invprojvars: list, optional
:returns: Four list items containing 1) the sum of total detected areas (xyz), 2) XYZ coordinates of detected areas, 3) Sum of total detected areas (px), and 4) UV coordinates of detected areas
:rtype: list
"""
#Initialise figure window and plot image
fig=plt.gcf()
fig.canvas.set_window_title(imn + ': Click around region. Press enter '
'to record points.')
plt.imshow(img, origin='upper', cmap='gray')
#Set plotting extent if required
if pxplot is not None:
plt.axis([pxplot[0],pxplot[1],
pxplot[2],pxplot[3]])
#Manual input of points from clicking on plot using pyplot.ginput
rawpx = plt.ginput(n=0, timeout=0, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2)
print('\n' + str(imn) + ': you clicked ' + str(len(rawpx)) + ' points')
#Show plot
plt.show()
plt.close()
#Convert coordinates to array
pxpts=[]
for i in rawpx:
pxpts.append([[i[0],i[1]]])
pxpts.append([[rawpx[0][0],rawpx[0][1]]])
pxpts=np.asarray(pxpts)
#Calculate homography-corrected pts if desired
if hmatrix is not None:
print('Correcting for camera motion')
pxpts = Velocity.apply_persp_homographyPts(pxpts, hmatrix,
inverse=True)
#Create polygon if area has been recorded
try:
#Create geometry
pxpoly=getOGRArea(pxpts.squeeze())
#Calculate area of polygon area
pxextent = pxpoly.Area()
#Create zero object if no polygon has been recorded
except:
pxextent = 0
print('Total extent: ' + str(pxextent) + ' px (out of ' +
str(img.shape[0]*img.shape[1]) + ' px)')
#Convert pts list to array
pxpts = np.array(pxpts)
pxpts = np.squeeze(pxpts)
if invprojvars is not None:
#Get xyz coordinates with inverse projection
xyzpts=projectUV(pxpts, invprojvars)
#Calculate area of xyz polygon
xyzarea = getOGRArea(xyzpts)
xyzarea=xyzarea.GetArea()
#Return XYZ and pixel areas
print('Total area: ' + str(xyzarea) + ' m')
return [[[xyzarea], [xyzpts]], [[pxextent], [pxpts]]]
#Return pixel areas only
else:
return [[None, None], [pxextent, pxpts]]
def defineColourrange(img, imn, pxplot=None):
"""Define colour range manually by clicking on the lightest and
darkest regions of the target extent that will be defined. Plot interaction
information: Left click to select, right click to undo selection, close the
image window to continue, and the window automatically times out after two
clicks.
:param img: Image array
:type img: arr
:param imn: Image name
:type imn: str
:param pxplot: Plotting extent for manual area definition, default to None
:type pxplot: list, optional
:returns: List containing the upper and lower boundary for pixel detection
:rtype: list
"""
#Initialise figure window
fig=plt.gcf()
fig.canvas.set_window_title(imn + ': Click lightest colour and darkest'
' colour')
#Plot image
plt.imshow(img, origin='upper')
#Define plotting extent if required
if pxplot is not None:
plt.axis([pxplot[0],pxplot[1],pxplot[2],pxplot[3]])
#Manually interact to select lightest and darkest part of the region
colours = plt.ginput(n=2, timeout=0, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2)
print('\n' + str(imn) + ': you clicked ' + str(colours))
#Show plot
plt.show()
plt.close()
#Get pixel intensity value for pt1
col1_rbg = img[int(colours[0][1]),int(colours[0][0])]
if col1_rbg == 0:
col1_rbg=1
#Get pixel intensity value for pt2
col2_rbg = img[int(colours[1][1]),int(colours[1][0])]
if col2_rbg == 0:
col2_rbg=1
#Assign RBG range based on value of the chosen RBG values
if col1_rbg > col2_rbg:
upper_boundary = col1_rbg
lower_boundary = col2_rbg
else:
upper_boundary = col2_rbg
lower_boundary = col1_rbg
print('\nColour range found from manual selection')
print('Upper RBG boundary: ' + str(upper_boundary))
print('Lower RBG boundary: ' + str(lower_boundary))
#Return RBG range
return [upper_boundary, lower_boundary]
def getOGRArea(pts):
"""Get real world OGR polygons (.shp) from xyz poly pts with real world
points which are compatible with mapping software (e.g. ArcGIS).
:param pts: UV/XYZ coordinates of a given area shape
:type pts: arr
:returns: List of OGR geometry polygons
:rtype: list
"""
#Create geometries from uv/xyz coordinates using ogr
ring = ogr.Geometry(ogr.wkbLinearRing)
for p in pts:
if np.isnan(p[0]) == False:
if len(p)==2:
ring.AddPoint(int(p[0]),int(p[1]))
else:
ring.AddPoint(p[0],p[1],p[2])
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
return poly
| [
"PIL.Image.new",
"ogr.Geometry",
"numpy.invert",
"numpy.array",
"Images.enhanceImage",
"Images.ImageSequence.__init__",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.imshow",
"numpy.where",
"FileHandler.readMask",
"numpy.asarray",
"matplotlib.pyplot.close",
"numpy.take",
"matplotlib.pyplot... | [((20748, 20787), 'numpy.array', 'np.array', (['upper_boundary'], {'dtype': '"""uint8"""'}), "(upper_boundary, dtype='uint8')\n", (20756, 20787), True, 'import numpy as np\n'), ((20809, 20848), 'numpy.array', 'np.array', (['lower_boundary'], {'dtype': '"""uint8"""'}), "(lower_boundary, dtype='uint8')\n", (20817, 20848), True, 'import numpy as np\n'), ((20900, 20948), 'cv2.inRange', 'cv2.inRange', (['img', 'lower_boundary', 'upper_boundary'], {}), '(img, lower_boundary, upper_boundary)\n', (20911, 20948), False, 'import cv2\n'), ((21142, 21206), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (21158, 21206), False, 'import cv2\n'), ((24354, 24363), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (24361, 24363), True, 'import matplotlib.pyplot as plt\n'), ((24497, 24541), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'origin': '"""upper"""', 'cmap': '"""gray"""'}), "(img, origin='upper', cmap='gray')\n", (24507, 24541), True, 'import matplotlib.pyplot as plt\n'), ((24778, 24866), 'matplotlib.pyplot.ginput', 'plt.ginput', ([], {'n': '(0)', 'timeout': '(0)', 'show_clicks': '(True)', 'mouse_add': '(1)', 'mouse_pop': '(3)', 'mouse_stop': '(2)'}), '(n=0, timeout=0, show_clicks=True, mouse_add=1, mouse_pop=3,\n mouse_stop=2)\n', (24788, 24866), True, 'import matplotlib.pyplot as plt\n'), ((24987, 24997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24995, 24997), True, 'import matplotlib.pyplot as plt\n'), ((25002, 25013), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25011, 25013), True, 'import matplotlib.pyplot as plt\n'), ((25174, 25191), 'numpy.asarray', 'np.asarray', (['pxpts'], {}), '(pxpts)\n', (25184, 25191), True, 'import numpy as np\n'), ((25942, 25957), 'numpy.array', 'np.array', (['pxpts'], {}), '(pxpts)\n', (25950, 25957), True, 'import numpy as np\n'), ((25981, 25998), 'numpy.squeeze', 'np.squeeze', (['pxpts'], {}), '(pxpts)\n', (25991, 25998), True, 'import numpy as np\n'), ((27259, 27268), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (27266, 27268), True, 'import matplotlib.pyplot as plt\n'), ((27414, 27445), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'origin': '"""upper"""'}), "(img, origin='upper')\n", (27424, 27445), True, 'import matplotlib.pyplot as plt\n'), ((27678, 27766), 'matplotlib.pyplot.ginput', 'plt.ginput', ([], {'n': '(2)', 'timeout': '(0)', 'show_clicks': '(True)', 'mouse_add': '(1)', 'mouse_pop': '(3)', 'mouse_stop': '(2)'}), '(n=2, timeout=0, show_clicks=True, mouse_add=1, mouse_pop=3,\n mouse_stop=2)\n', (27688, 27766), True, 'import matplotlib.pyplot as plt\n'), ((27878, 27888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27886, 27888), True, 'import matplotlib.pyplot as plt\n'), ((27893, 27904), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27902, 27904), True, 'import matplotlib.pyplot as plt\n'), ((29148, 29179), 'ogr.Geometry', 'ogr.Geometry', (['ogr.wkbLinearRing'], {}), '(ogr.wkbLinearRing)\n', (29160, 29179), False, 'import ogr\n'), ((29404, 29432), 'ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPolygon'], {}), '(ogr.wkbPolygon)\n', (29416, 29432), False, 'import ogr\n'), ((2403, 2455), 'Images.ImageSequence.__init__', 'ImageSequence.__init__', (['self', 'imageList', 'band', 'equal'], {}), '(self, imageList, band, equal)\n', (2425, 2455), False, 'from Images import ImageSequence, enhanceImage\n'), ((3732, 3924), 'CamEnv.setProjection', 'setProjection', (['dem', 'self._camEnv._camloc', 'self._camEnv._camDirection', 'self._camEnv._radCorr', 'self._camEnv._tanCorr', 'self._camEnv._focLen', 'self._camEnv._camCen', 'self._camEnv._refImage'], {}), '(dem, self._camEnv._camloc, self._camEnv._camDirection, self.\n _camEnv._radCorr, self._camEnv._tanCorr, self._camEnv._focLen, self.\n _camEnv._camCen, self._camEnv._refImage)\n', (3745, 3924), False, 'from CamEnv import projectUV, setProjection\n'), ((8875, 9067), 'CamEnv.setProjection', 'setProjection', (['dem', 'self._camEnv._camloc', 'self._camEnv._camDirection', 'self._camEnv._radCorr', 'self._camEnv._tanCorr', 'self._camEnv._focLen', 'self._camEnv._camCen', 'self._camEnv._refImage'], {}), '(dem, self._camEnv._camloc, self._camEnv._camDirection, self.\n _camEnv._radCorr, self._camEnv._tanCorr, self._camEnv._focLen, self.\n _camEnv._camCen, self._camEnv._refImage)\n', (8888, 9067), False, 'from CamEnv import projectUV, setProjection\n'), ((16555, 16582), 'FileHandler.readMask', 'readMask', (['maxi', 'maxMaskPath'], {}), '(maxi, maxMaskPath)\n', (16563, 16582), False, 'from FileHandler import readMask\n'), ((24619, 24673), 'matplotlib.pyplot.axis', 'plt.axis', (['[pxplot[0], pxplot[1], pxplot[2], pxplot[3]]'], {}), '([pxplot[0], pxplot[1], pxplot[2], pxplot[3]])\n', (24627, 24673), True, 'import matplotlib.pyplot as plt\n'), ((25338, 25402), 'Velocity.apply_persp_homographyPts', 'Velocity.apply_persp_homographyPts', (['pxpts', 'hmatrix'], {'inverse': '(True)'}), '(pxpts, hmatrix, inverse=True)\n', (25372, 25402), False, 'import Velocity\n'), ((26105, 26134), 'CamEnv.projectUV', 'projectUV', (['pxpts', 'invprojvars'], {}), '(pxpts, invprojvars)\n', (26114, 26134), False, 'from CamEnv import projectUV, setProjection\n'), ((27526, 27580), 'matplotlib.pyplot.axis', 'plt.axis', (['[pxplot[0], pxplot[1], pxplot[2], pxplot[3]]'], {}), '([pxplot[0], pxplot[1], pxplot[2], pxplot[3]])\n', (27534, 27580), True, 'import matplotlib.pyplot as plt\n'), ((6615, 6628), 'numpy.copy', 'np.copy', (['img1'], {}), '(img1)\n', (6622, 6628), True, 'import numpy as np\n'), ((11762, 11775), 'numpy.copy', 'np.copy', (['img1'], {}), '(img1)\n', (11769, 11775), True, 'import numpy as np\n'), ((14033, 14043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14041, 14043), True, 'import matplotlib.pyplot as plt\n'), ((14057, 14068), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14066, 14068), True, 'import matplotlib.pyplot as plt\n'), ((14314, 14345), 'PIL.Image.new', 'Image.new', (['"""L"""', '(w, h)', '"""black"""'], {}), "('L', (w, h), 'black')\n", (14323, 14345), False, 'from PIL import Image\n'), ((14365, 14380), 'numpy.array', 'np.array', (['px_im'], {}), '(px_im)\n', (14373, 14380), True, 'import numpy as np\n'), ((14394, 14446), 'cv2.drawContours', 'cv2.drawContours', (['px_im', 'vpx', '(-1)', '(255, 255, 255)', '(4)'], {}), '(px_im, vpx, -1, (255, 255, 255), 4)\n', (14410, 14446), False, 'import cv2\n'), ((14569, 14591), 'PIL.Image.fromarray', 'Image.fromarray', (['px_im'], {}), '(px_im)\n', (14584, 14591), False, 'from PIL import Image\n'), ((21970, 22030), 'Velocity.apply_persp_homographyPts', 'Velocity.apply_persp_homographyPts', (['i', 'hmatrix'], {'inverse': '(True)'}), '(i, hmatrix, inverse=True)\n', (22004, 22030), False, 'import Velocity\n'), ((22854, 22879), 'CamEnv.projectUV', 'projectUV', (['i', 'invprojvars'], {}), '(i, invprojvars)\n', (22863, 22879), False, 'from CamEnv import projectUV, setProjection\n'), ((29209, 29223), 'numpy.isnan', 'np.isnan', (['p[0]'], {}), '(p[0])\n', (29217, 29223), True, 'import numpy as np\n'), ((6754, 6786), 'numpy.array', 'np.array', (['self._mask'], {'dtype': 'bool'}), '(self._mask, dtype=bool)\n', (6762, 6786), True, 'import numpy as np\n'), ((6817, 6839), 'numpy.invert', 'np.invert', (['booleanMask'], {}), '(booleanMask)\n', (6826, 6839), True, 'import numpy as np\n'), ((6927, 6957), 'numpy.where', 'np.where', (['booleanMask', '(0)', 'img2'], {}), '(booleanMask, 0, img2)\n', (6935, 6957), True, 'import numpy as np\n'), ((7202, 7274), 'Images.enhanceImage', 'enhanceImage', (['img2', 'self._enhance[0]', 'self._enhance[1]', 'self._enhance[2]'], {}), '(img2, self._enhance[0], self._enhance[1], self._enhance[2])\n', (7214, 7274), False, 'from Images import ImageSequence, enhanceImage\n'), ((13046, 13060), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13058, 13060), True, 'import matplotlib.pyplot as plt\n'), ((14487, 14538), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['px_im', 'p'], {'color': '(255, 255, 255)'}), '(px_im, p, color=(255, 255, 255))\n', (14505, 14538), False, 'import cv2\n'), ((5102, 5134), 'numpy.array', 'np.array', (['self._mask'], {'dtype': 'bool'}), '(self._mask, dtype=bool)\n', (5110, 5134), True, 'import numpy as np\n'), ((5169, 5191), 'numpy.invert', 'np.invert', (['booleanMask'], {}), '(booleanMask)\n', (5178, 5191), True, 'import numpy as np\n'), ((5291, 5324), 'numpy.where', 'np.where', (['booleanMask', '(0)', 'setting'], {}), '(booleanMask, 0, setting)\n', (5299, 5324), True, 'import numpy as np\n'), ((5603, 5678), 'Images.enhanceImage', 'enhanceImage', (['setting', 'self._enhance[0]', 'self._enhance[1]', 'self._enhance[2]'], {}), '(setting, self._enhance[0], self._enhance[1], self._enhance[2])\n', (5615, 5678), False, 'from Images import ImageSequence, enhanceImage\n'), ((13762, 13813), 'matplotlib.lines.Line2D', 'Line2D', (['x', 'y'], {'linestyle': '"""-"""', 'color': '"""y"""', 'picker': '(True)'}), "(x, y, linestyle='-', color='y', picker=True)\n", (13768, 13813), False, 'from matplotlib.lines import Line2D\n'), ((15107, 15132), 'CamEnv.projectUV', 'projectUV', (['i', 'invprojvars'], {}), '(i, invprojvars)\n', (15116, 15132), False, 'from CamEnv import projectUV, setProjection\n'), ((12554, 12581), 'numpy.array', 'np.array', (['v'], {'dtype': 'np.int32'}), '(v, dtype=np.int32)\n', (12562, 12581), True, 'import numpy as np\n'), ((12949, 12968), 'numpy.take', 'np.take', (['ydata', 'ind'], {}), '(ydata, ind)\n', (12956, 12968), True, 'import numpy as np\n'), ((12884, 12903), 'numpy.take', 'np.take', (['xdata', 'ind'], {}), '(xdata, ind)\n', (12891, 12903), True, 'import numpy as np\n')] |
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from sklearn.cluster import AgglomerativeClustering
os.chdir('Chapter_5')
# %%
# import all sites
conditions = ["G", "_MF", "pMF"] # ,"SRR"]
total_file = "allm5C_libraries_filteredDepthAnno.csv"
total_df = pd.read_csv(total_file, low_memory=False) # file
total_df = total_df.sort_values(by=['position']) # sort
# %%
names = ['G1', 'G2', 'G3', 'G4', 'MF_rep1', 'MF_rep2', 'pMF_rep1', 'pMF_rep2', 'rep1',
'rep2', 'rep3', 'rep4']
# Aggregate methylation level for each condition
total_df.index = total_df['group']
cov_df = total_df.filter(regex='cov')
count_df = total_df.filter(regex='count')
cov_dict = {}
count_dict = {}
for name in conditions:
cov_dict[name] = cov_df.filter(regex=name).sum(axis=1)
count_dict[name] = count_df.filter(regex=name).sum(axis=1)
ML_dict = {}
for i, j in cov_dict.items():
ML_dict[i] = count_dict[i].divide(j, fill_value=0)
result_df = pd.DataFrame(ML_dict)
# result_df.dropna(axis=0, inplace=True, subset=['SRR','_MF','pMF'])
# result_df.replace(np.nan, 0, inplace=True)
# result_df.replace(0, np.nan, inplace=True)
result_df = result_df[(result_df['G'] > 0.1) | (result_df['_MF'] > 0.1) |
(result_df['pMF'] > 0.1)] # | (result_df['SRR'] > 0.1)]
result_df.dropna(axis=0, inplace=True)
test = total_df[total_df['group'].isin(result_df.index)]
# test.to_csv("AllConditionOverlap_methylationLevel.csv")
# %%
result_df_ML = total_df.filter(regex="methRate")
result_df_ML.replace(np.nan, 0, inplace=True)
cov_df.columns = names
count_df.columns = names
# %%
from matplotlib.colors import LinearSegmentedColormap
boundaries = [0.0, 0.05, 0.1, 0.2, 0.4, 0.6, 1.0]
hex_colors = sns.color_palette("RdYlBu_r", n_colors=len(boundaries) * 2).as_hex()
hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
colors = list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name="cus",
colors=colors,
)
# %%
# Define clusters
correlations_array = np.asarray(result_df)
row_linkage = hierarchy.linkage(
distance.pdist(correlations_array), method='ward')
col_linkage = hierarchy.linkage(
distance.pdist(correlations_array.T), method='ward')
model = AgglomerativeClustering(n_clusters=8, affinity='euclidean', linkage='ward')
model = model.fit_predict(correlations_array)
# %%
lut = dict(zip(set(model), ['red', 'blue', 'green', 'orange', 'purple', 'pink', 'black', 'grey']))
row_colors = pd.DataFrame(model)[0].map(lut)
cg = sns.clustermap(result_df.reset_index(drop=True), row_linkage=row_linkage, col_linkage=col_linkage,
cmap=custom_color_map,
row_colors=row_colors, figsize=(5, 5), yticklabels=False, col_cluster=False,
robust=True, method='ward') # , row_cluster=False) # z_score=0,
cg.ax_row_dendrogram.set_visible(False)
# plt.savefig("ML_conditions_clusteringHeatmapDepth.png", bbox_inches='tight', dpi=400, transparent=True)
plt.show()
plt.close()
# %%
merge_df = result_df
merge_df['cluster'] = model
merge_df['group'] = result_df.index
merge_df.reset_index(drop=True)
cluster_df = pd.merge(merge_df.rename_axis(None), total_df.rename_axis(None), on='group')
cluster_gene_list = (cluster_df['gene_name'][cluster_df['cluster'] == 5]).unique()
cluster_file = open("Total_cluster_genes.txt", "w")
for i in cluster_gene_list:
cluster_file.write(i + '\n')
cluster_file.close()
# %%
from scipy.stats import zscore
# write correlation matrix (z-score)
zscore_vals = result_df.apply(zscore, axis=1)
# %%
from scipy import stats
# BH t-test
def BH_test(set1, set2):
# subset tests by relevant sites identified by 04a_OverlapDotplot.R
master_set = pd.read_csv('Dotplot_' + set1 + set2 + '_table.csv')
master_set = master_set.dropna(subset=['ML_1', 'ML_2']).reset_index()
count_set = {set1: master_set['C_count_' + set1], set2: master_set['C_count_' + set2]}
cov_set = {set1: master_set['cov_' + set1], set2: master_set['cov_' + set2]}
pvals = []
p_adj = []
try:
len(count_set[set1]) == len(cov_set[set1])
except:
print('data is not same size')
for i in range(len(count_set[set1])):
cont_table = pd.DataFrame({set1: [count_set[set1][i], cov_set[set1][i]],
set2: [count_set[set2][i], cov_set[set2][i]]})
odds, pvalue = stats.fisher_exact(cont_table)
pvals.append(pvalue)
pvals_sorted = sorted(pvals, key=float) # sorted pvalues
master_set['pval'] = pvals
master_set = master_set.sort_values('pval', ascending=True)
rank = 1
for p in pvals_sorted:
fdr_pval = p * len(pvals_sorted) / rank
rank += 1
p_adj.append(fdr_pval)
master_set['BH'] = p_adj
master_set['shape'] = np.where(master_set['BH'] <= 0.01, 'sig', 'non-sig')
return master_set
test_BH = pd.DataFrame(BH_test('G3', 'G4'))
# %%
rcParams['figure.figsize'] = 3, 3
markers = {"sig": "X", "non-sig": "o"}
palette = ['blue']
# ax = sns.scatterplot(data=test_BH[test_BH['BH'] > 0.01], x='ML_1', y='ML_2', style = 'shape',
# markers=markers, s=25)
sns.scatterplot(data=test_BH, x='ML_1', y='ML_2', style='shape', hue='shape', palette = palette,
markers=markers, s=25)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.legend([], frameon=False)
plt.savefig("G3G4_DMS.png",bbox_inches='tight', dpi=400, transparent=True)
plt.show()
# %%
# Correlation matix of samples
from scipy.spatial import distance
from scipy.cluster import hierarchy
correlations = result_df.corr()
correlations_array = np.asarray(result_df.corr())
row_linkage = hierarchy.linkage(
distance.pdist(correlations_array), method='average')
col_linkage = hierarchy.linkage(
distance.pdist(correlations_array.T), method='average')
sns.clustermap(correlations, row_linkage=col_linkage, col_linkage=row_linkage, method="average",
figsize=(5, 10))
plt.show()
# %%
from matplotlib.colors import LinearSegmentedColormap
boundaries = [0.0, 0.05, 0.1, 0.2, 0.4, 0.6, 1.0]
hex_colors = sns.color_palette("RdBu_r", n_colors=len(boundaries) * 2 + 2).as_hex()
hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
colors = list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name="cus",
colors=colors,
)
cg = sns.clustermap(result_df, annot=False, cmap=custom_color_map, dendrogram_ratio=(.1, .2),
figsize=(5, 5), yticklabels=False) # z_score=0,
cg.ax_row_dendrogram.set_visible(False)
plt.savefig("ML_conditions_clusteringHeatmapCcutoffDepth_noSRR.png", bbox_inches='tight', dpi=400, transparent=True)
plt.show()
plt.close()
| [
"sklearn.cluster.AgglomerativeClustering",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.where",
"seaborn.clustermap",
"scipy.spatial.distance.pdist",
"scipy.stats.fisher_exact",
"numpy.asarray",
"os.chdir",
"matplotlib.pyplot.close",
"seaborn.scatterplot",
"pandas.DataFrame",
"matp... | [((259, 280), 'os.chdir', 'os.chdir', (['"""Chapter_5"""'], {}), "('Chapter_5')\n", (267, 280), False, 'import os\n'), ((416, 457), 'pandas.read_csv', 'pd.read_csv', (['total_file'], {'low_memory': '(False)'}), '(total_file, low_memory=False)\n', (427, 457), True, 'import pandas as pd\n'), ((1102, 1123), 'pandas.DataFrame', 'pd.DataFrame', (['ML_dict'], {}), '(ML_dict)\n', (1114, 1123), True, 'import pandas as pd\n'), ((2066, 2126), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ([], {'name': '"""cus"""', 'colors': 'colors'}), "(name='cus', colors=colors)\n", (2099, 2126), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((2182, 2203), 'numpy.asarray', 'np.asarray', (['result_df'], {}), '(result_df)\n', (2192, 2203), True, 'import numpy as np\n'), ((2393, 2468), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(8)', 'affinity': '"""euclidean"""', 'linkage': '"""ward"""'}), "(n_clusters=8, affinity='euclidean', linkage='ward')\n", (2416, 2468), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((3142, 3152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3150, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3154, 3165), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3163, 3165), True, 'import matplotlib.pyplot as plt\n'), ((5319, 5441), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'test_BH', 'x': '"""ML_1"""', 'y': '"""ML_2"""', 'style': '"""shape"""', 'hue': '"""shape"""', 'palette': 'palette', 'markers': 'markers', 's': '(25)'}), "(data=test_BH, x='ML_1', y='ML_2', style='shape', hue=\n 'shape', palette=palette, markers=markers, s=25)\n", (5334, 5441), True, 'import seaborn as sns\n'), ((5455, 5469), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (5463, 5469), True, 'import matplotlib.pyplot as plt\n'), ((5470, 5484), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5478, 5484), True, 'import matplotlib.pyplot as plt\n'), ((5485, 5514), 'matplotlib.pyplot.legend', 'plt.legend', (['[]'], {'frameon': '(False)'}), '([], frameon=False)\n', (5495, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5590), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""G3G4_DMS.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)', 'transparent': '(True)'}), "('G3G4_DMS.png', bbox_inches='tight', dpi=400, transparent=True)\n", (5526, 5590), True, 'import matplotlib.pyplot as plt\n'), ((5590, 5600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5598, 5600), True, 'import matplotlib.pyplot as plt\n'), ((5980, 6098), 'seaborn.clustermap', 'sns.clustermap', (['correlations'], {'row_linkage': 'col_linkage', 'col_linkage': 'row_linkage', 'method': '"""average"""', 'figsize': '(5, 10)'}), "(correlations, row_linkage=col_linkage, col_linkage=\n row_linkage, method='average', figsize=(5, 10))\n", (5994, 6098), True, 'import seaborn as sns\n'), ((6109, 6119), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6117, 6119), True, 'import matplotlib.pyplot as plt\n'), ((6446, 6506), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ([], {'name': '"""cus"""', 'colors': 'colors'}), "(name='cus', colors=colors)\n", (6479, 6506), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((6524, 6653), 'seaborn.clustermap', 'sns.clustermap', (['result_df'], {'annot': '(False)', 'cmap': 'custom_color_map', 'dendrogram_ratio': '(0.1, 0.2)', 'figsize': '(5, 5)', 'yticklabels': '(False)'}), '(result_df, annot=False, cmap=custom_color_map,\n dendrogram_ratio=(0.1, 0.2), figsize=(5, 5), yticklabels=False)\n', (6538, 6653), True, 'import seaborn as sns\n'), ((6722, 6842), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ML_conditions_clusteringHeatmapCcutoffDepth_noSRR.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)', 'transparent': '(True)'}), "('ML_conditions_clusteringHeatmapCcutoffDepth_noSRR.png',\n bbox_inches='tight', dpi=400, transparent=True)\n", (6733, 6842), True, 'import matplotlib.pyplot as plt\n'), ((6839, 6849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6847, 6849), True, 'import matplotlib.pyplot as plt\n'), ((6851, 6862), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6860, 6862), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2276), 'scipy.spatial.distance.pdist', 'distance.pdist', (['correlations_array'], {}), '(correlations_array)\n', (2256, 2276), False, 'from scipy.spatial import distance\n'), ((2331, 2367), 'scipy.spatial.distance.pdist', 'distance.pdist', (['correlations_array.T'], {}), '(correlations_array.T)\n', (2345, 2367), False, 'from scipy.spatial import distance\n'), ((3878, 3930), 'pandas.read_csv', 'pd.read_csv', (["('Dotplot_' + set1 + set2 + '_table.csv')"], {}), "('Dotplot_' + set1 + set2 + '_table.csv')\n", (3889, 3930), True, 'import pandas as pd\n'), ((4959, 5011), 'numpy.where', 'np.where', (["(master_set['BH'] <= 0.01)", '"""sig"""', '"""non-sig"""'], {}), "(master_set['BH'] <= 0.01, 'sig', 'non-sig')\n", (4967, 5011), True, 'import numpy as np\n'), ((5831, 5865), 'scipy.spatial.distance.pdist', 'distance.pdist', (['correlations_array'], {}), '(correlations_array)\n', (5845, 5865), False, 'from scipy.spatial import distance\n'), ((5923, 5959), 'scipy.spatial.distance.pdist', 'distance.pdist', (['correlations_array.T'], {}), '(correlations_array.T)\n', (5937, 5959), False, 'from scipy.spatial import distance\n'), ((4382, 4493), 'pandas.DataFrame', 'pd.DataFrame', (['{set1: [count_set[set1][i], cov_set[set1][i]], set2: [count_set[set2][i],\n cov_set[set2][i]]}'], {}), '({set1: [count_set[set1][i], cov_set[set1][i]], set2: [\n count_set[set2][i], cov_set[set2][i]]})\n', (4394, 4493), True, 'import pandas as pd\n'), ((4548, 4578), 'scipy.stats.fisher_exact', 'stats.fisher_exact', (['cont_table'], {}), '(cont_table)\n', (4566, 4578), False, 'from scipy import stats\n'), ((2633, 2652), 'pandas.DataFrame', 'pd.DataFrame', (['model'], {}), '(model)\n', (2645, 2652), True, 'import pandas as pd\n')] |
"""
Code to cut SMPL into near symmetric parts.
Author: Bharat
Cite: Combining Implicit Function Learning and Parametric Models for 3D Human Reconstruction, ECCV 2020.
"""
import numpy as np
from psbody.mesh import Mesh
import sys
sys.path.append('..')
import pickle as pkl
from lib.smplx.body_models import SMPLX
def get_tpose_smplx():
# sp = SmplPaths(gender='neutral')
# smplx = sp.get_smpl()
# smplx.trans[:] = 0
# smplx.pose[:] = 0
smplx_output = SMPLX(model_path="/home/chen/SMPLX/models/smplx", batch_size=1, gender='neutral')()
return smplx_output
def cut_right_forearm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[verts[:, 0] < -0.6] = 1 # right hand
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_forearm ', np.where(col)[0].shape)
return col
def cut_left_forearm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[verts[:, 0] > 0.6] = 1 # left hand
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_forearm ', np.where(col)[0].shape)
return col
def cut_right_midarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] >= -0.6) & (verts[:, 0] < -0.4)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_midarm ', np.where(col)[0].shape)
return col
def cut_right_upperarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] >= -0.4) & (verts[:, 0] < -0.2)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_upperarm ', np.where(col)[0].shape)
return col
def cut_left_midarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] <= 0.6) & (verts[:, 0] > 0.4)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_midarm ', np.where(col)[0].shape)
return col
def cut_left_upperarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] <= 0.4) & (verts[:, 0] > 0.2)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_upperarm ', np.where(col)[0].shape)
return col
def cut_head(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[verts[:, 1] > 0.16] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('head ', np.where(col)[0].shape)
return col
def cut_upper_right_leg(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -0.44) & (verts[:, 0] < 0) & (verts[:, 1] >= -0.84)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('upper_right_leg ', np.where(col)[0].shape)
return col
def cut_right_leg(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -0.84) & (verts[:, 0] < 0) & (verts[:, 1] > -1.14)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_leg ', np.where(col)[0].shape)
return col
def cut_right_foot(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -1.14) & (verts[:, 0] < 0)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_foot ', np.where(col)[0].shape)
return col
def cut_upper_left_leg(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -0.44) & (verts[:, 0] >= 0) & (verts[:, 1] >= -0.84)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('upper_left_leg ', np.where(col)[0].shape)
return col
def cut_left_leg(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -0.84) & (verts[:, 0] >= 0) & (verts[:, 1] > -1.14)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_leg ', np.where(col)[0].shape)
return col
def cut_left_foot(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -1.14) & (verts[:, 0] >= 0)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_foot ', np.where(col)[0].shape)
return col
if __name__ == "__main__":
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros((10475,))
display = False
rfa = cut_right_forearm(display)
col += (rfa * 0.1)
rma = cut_right_midarm(display)
col += (rma * 0.2)
lfa = cut_left_forearm(display)
col += (lfa * 0.3)
lma = cut_left_midarm(display)
col += (lma * 0.4)
rua = cut_right_upperarm(display)
col += (rua * 0.5)
lua = cut_left_upperarm(display)
col += (lua * 0.6)
h = cut_head(display)
col += (h * 0.7)
url = cut_upper_right_leg(display)
col += (url * 0.8)
rl = cut_right_leg(display)
col += (rl * 0.9)
ull = cut_upper_left_leg(display)
col += (ull * 1)
ll = cut_left_leg(display)
col += (ll * 1.1)
lf = cut_left_foot(display)
col += (lf * 1.2)
rf = cut_right_foot(display)
col += (rf * 1.3)
print('torso ', len(ms.v) - np.where(col)[0].shape[0])
parts = {'right_forearm': np.where(rfa)[0], 'left_forearm': np.where(lfa)[0],
'right_upperarm': np.where(rua)[0], 'left_upperarm': np.where(lua)[0],
'head': np.where(h)[0], 'right_leg': np.where(rl)[0], 'left_leg': np.where(ll)[0],
'torso': np.where(col == 0)[0],
'right_midarm': np.where(rma)[0], 'left_midarm': np.where(lma)[0],
'upper_left_leg': np.where(ull)[0], 'upper_right_leg': np.where(url)[0],
'right_foot': np.where(rf)[0], 'left_foot': np.where(lf)[0]}
import collections
parts = collections.OrderedDict(sorted(parts.items()))
col = np.zeros((10475,))
for n, k in enumerate(parts):
col[parts[k]] = n
col[:8129] = 0
ms.set_vertex_colors_from_weights(col)
ms.show()
# import ipdb; ipdb.set_trace()
pkl.dump(parts, open('/home/chen/IPNet_SMPLX/assets/smplx_parts_dense.pkl', 'wb'))
print('Done')
| [
"lib.smplx.body_models.SMPLX",
"numpy.where",
"numpy.zeros",
"psbody.mesh.Mesh",
"sys.path.append"
] | [((232, 253), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (247, 253), False, 'import sys\n'), ((747, 769), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (751, 769), False, 'from psbody.mesh import Mesh\n'), ((780, 804), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (788, 804), True, 'import numpy as np\n'), ((1164, 1186), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (1168, 1186), False, 'from psbody.mesh import Mesh\n'), ((1197, 1221), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (1205, 1221), True, 'import numpy as np\n'), ((1578, 1600), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (1582, 1600), False, 'from psbody.mesh import Mesh\n'), ((1611, 1635), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (1619, 1635), True, 'import numpy as np\n'), ((2008, 2030), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (2012, 2030), False, 'from psbody.mesh import Mesh\n'), ((2041, 2065), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (2049, 2065), True, 'import numpy as np\n'), ((2437, 2459), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (2441, 2459), False, 'from psbody.mesh import Mesh\n'), ((2470, 2494), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (2478, 2494), True, 'import numpy as np\n'), ((2863, 2885), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (2867, 2885), False, 'from psbody.mesh import Mesh\n'), ((2896, 2920), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (2904, 2920), True, 'import numpy as np\n'), ((3282, 3304), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (3286, 3304), False, 'from psbody.mesh import Mesh\n'), ((3315, 3339), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (3323, 3339), True, 'import numpy as np\n'), ((3679, 3701), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (3683, 3701), False, 'from psbody.mesh import Mesh\n'), ((3712, 3736), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (3720, 3736), True, 'import numpy as np\n'), ((4129, 4151), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (4133, 4151), False, 'from psbody.mesh import Mesh\n'), ((4162, 4186), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (4170, 4186), True, 'import numpy as np\n'), ((4573, 4595), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (4577, 4595), False, 'from psbody.mesh import Mesh\n'), ((4606, 4630), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (4614, 4630), True, 'import numpy as np\n'), ((4997, 5019), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (5001, 5019), False, 'from psbody.mesh import Mesh\n'), ((5030, 5054), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (5038, 5054), True, 'import numpy as np\n'), ((5446, 5468), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (5450, 5468), False, 'from psbody.mesh import Mesh\n'), ((5479, 5503), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (5487, 5503), True, 'import numpy as np\n'), ((5889, 5911), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (5893, 5911), False, 'from psbody.mesh import Mesh\n'), ((5922, 5946), 'numpy.zeros', 'np.zeros', (['verts.shape[0]'], {}), '(verts.shape[0])\n', (5930, 5946), True, 'import numpy as np\n'), ((6302, 6324), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'verts', 'f': 'faces'}), '(v=verts, f=faces)\n', (6306, 6324), False, 'from psbody.mesh import Mesh\n'), ((6336, 6354), 'numpy.zeros', 'np.zeros', (['(10475,)'], {}), '((10475,))\n', (6344, 6354), True, 'import numpy as np\n'), ((7831, 7849), 'numpy.zeros', 'np.zeros', (['(10475,)'], {}), '((10475,))\n', (7839, 7849), True, 'import numpy as np\n'), ((475, 561), 'lib.smplx.body_models.SMPLX', 'SMPLX', ([], {'model_path': '"""/home/chen/SMPLX/models/smplx"""', 'batch_size': '(1)', 'gender': '"""neutral"""'}), "(model_path='/home/chen/SMPLX/models/smplx', batch_size=1, gender=\n 'neutral')\n", (480, 561), False, 'from lib.smplx.body_models import SMPLX\n'), ((7219, 7232), 'numpy.where', 'np.where', (['rfa'], {}), '(rfa)\n', (7227, 7232), True, 'import numpy as np\n'), ((7253, 7266), 'numpy.where', 'np.where', (['lfa'], {}), '(lfa)\n', (7261, 7266), True, 'import numpy as np\n'), ((7302, 7315), 'numpy.where', 'np.where', (['rua'], {}), '(rua)\n', (7310, 7315), True, 'import numpy as np\n'), ((7337, 7350), 'numpy.where', 'np.where', (['lua'], {}), '(lua)\n', (7345, 7350), True, 'import numpy as np\n'), ((7376, 7387), 'numpy.where', 'np.where', (['h'], {}), '(h)\n', (7384, 7387), True, 'import numpy as np\n'), ((7405, 7417), 'numpy.where', 'np.where', (['rl'], {}), '(rl)\n', (7413, 7417), True, 'import numpy as np\n'), ((7434, 7446), 'numpy.where', 'np.where', (['ll'], {}), '(ll)\n', (7442, 7446), True, 'import numpy as np\n'), ((7473, 7491), 'numpy.where', 'np.where', (['(col == 0)'], {}), '(col == 0)\n', (7481, 7491), True, 'import numpy as np\n'), ((7525, 7538), 'numpy.where', 'np.where', (['rma'], {}), '(rma)\n', (7533, 7538), True, 'import numpy as np\n'), ((7558, 7571), 'numpy.where', 'np.where', (['lma'], {}), '(lma)\n', (7566, 7571), True, 'import numpy as np\n'), ((7607, 7620), 'numpy.where', 'np.where', (['ull'], {}), '(ull)\n', (7615, 7620), True, 'import numpy as np\n'), ((7644, 7657), 'numpy.where', 'np.where', (['url'], {}), '(url)\n', (7652, 7657), True, 'import numpy as np\n'), ((7689, 7701), 'numpy.where', 'np.where', (['rf'], {}), '(rf)\n', (7697, 7701), True, 'import numpy as np\n'), ((7719, 7731), 'numpy.where', 'np.where', (['lf'], {}), '(lf)\n', (7727, 7731), True, 'import numpy as np\n'), ((962, 975), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (970, 975), True, 'import numpy as np\n'), ((1376, 1389), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (1384, 1389), True, 'import numpy as np\n'), ((1804, 1817), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (1812, 1817), True, 'import numpy as np\n'), ((2236, 2249), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (2244, 2249), True, 'import numpy as np\n'), ((2660, 2673), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (2668, 2673), True, 'import numpy as np\n'), ((3088, 3101), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (3096, 3101), True, 'import numpy as np\n'), ((3474, 3487), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (3482, 3487), True, 'import numpy as np\n'), ((3930, 3943), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (3938, 3943), True, 'import numpy as np\n'), ((4373, 4386), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (4381, 4386), True, 'import numpy as np\n'), ((4793, 4806), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (4801, 4806), True, 'import numpy as np\n'), ((5248, 5261), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (5256, 5261), True, 'import numpy as np\n'), ((5690, 5703), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (5698, 5703), True, 'import numpy as np\n'), ((6110, 6123), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (6118, 6123), True, 'import numpy as np\n'), ((7161, 7174), 'numpy.where', 'np.where', (['col'], {}), '(col)\n', (7169, 7174), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import cv2
from utils import *
#Define U-net
class UNet:
def __init__(self,shape,classes):
#classes
self.classes=classes
def dconv_block(self, X_tensor, filters):
"""
Function to downsample (emcoder) input images.
:param X_tensor: placeholder for inputs
:param filters: number of filters to be used
:return:
downsampled image
"""
#layer1
s1 = downconv_(X_tensor,filters,filter_size= 3,strides=1,padding='SAME')
b1 = tf.layers.batch_normalization(s1)
a1 = tf.nn.relu(b1)
#layer2
s2 = downconv_(a1,filters,filter_size=3,strides=1,padding='SAME')
b2 = tf.layers.batch_normalization(s2)
a2 = tf.nn.relu(b2)
return a2
def upconv_block(self,X_tensor,filters,filter_size,skip_connection):
"""
Function to upsample (transposed-convolution) image and
use for building decoder part of the network
:param X_tensor: placeholder for inputs
:param filters: number of filters to be used
:param filter_size: size of the filter(kernel) to be used
:param skip_connection: part of decoder network to stich
:return:
upsampled and stiched image
"""
#layer1
e1 = upconv_(X_tensor,filters,filter_size=filter_size,strides =2,padding="SAME")
concat = tf.concat([e1,skip_connection],axis=-1)
#layer2
conv1 = downconv_(concat,filters,filter_size=3,strides=1,padding='SAME')
relu1 = tf.nn.relu(conv1)
#layer3
conv2 = downconv_(relu1,filters,filter_size=3,strides=1,padding='SAME')
relu2 = tf.nn.relu(conv2)
return relu2
def UNet(self,X_tensor):
"""
Encoder-Decoder components of UNet. Loss funtion used is Binary-crossentropy
filters: [32,64,128,256]
:param:
X_tesnor : placeholder for train images (X)
:return:
probability masks for each class in the image
"""
#encoder
d1 = self.dconv_block(X_tensor, 32)
m1 = max_pool(d1, ksize=2, stride=2, padding="SAME")
d2 = self.dconv_block(m1, 64)
m2 = max_pool(d2, ksize=2, stride=2, padding="SAME")
d3 = self.dconv_block(m2, 128)
m3 = max_pool(d3, ksize=2, stride=2, padding="SAME")
d4 = self.dconv_block(m3,256)
m4 = max_pool(d4, ksize=2, stride=2, padding="SAME")
#bottleneck
bridge = downconv_(m4, 1024, 3, 1, 'SAME')
bridge = downconv_(bridge, 1024, 3, 1, 'SAME')
#decoder
u1 = self.upconv_block(bridge, 256, 2, d4)
u2 = self.upconv_block(u1, 128, 2, d3)
u3 = self.upconv_block(u2, 64, 2, d2)
u4 = self.upconv_block(u3, 32, 2, d1)
#1x1 output conv
logits = downconv_(u4,1,self.classes,strides=1,padding="SAME")
return logits
def mini_batches_(self, X, Y, batch_size=64):
"""
function to produce minibatches for training
:param X: input placeholder
:param Y: mask placeholder
:param batch_size: size of each batch
:return:
minibatches for training
"""
train_length = len(X)
num_batches = int(np.floor(train_length / batch_size))
batches = []
for i in range(num_batches):
batch_x = X[i * batch_size: i * batch_size + batch_size, :, :, :]
batch_y = Y[i * batch_size:i * batch_size + batch_size, :, :]
batches.append([batch_x, batch_y])
return batches
| [
"tensorflow.concat",
"numpy.floor",
"tensorflow.nn.relu",
"tensorflow.layers.batch_normalization"
] | [((588, 621), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['s1'], {}), '(s1)\n', (617, 621), True, 'import tensorflow as tf\n'), ((636, 650), 'tensorflow.nn.relu', 'tf.nn.relu', (['b1'], {}), '(b1)\n', (646, 650), True, 'import tensorflow as tf\n'), ((759, 792), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['s2'], {}), '(s2)\n', (788, 792), True, 'import tensorflow as tf\n'), ((807, 821), 'tensorflow.nn.relu', 'tf.nn.relu', (['b2'], {}), '(b2)\n', (817, 821), True, 'import tensorflow as tf\n'), ((1480, 1521), 'tensorflow.concat', 'tf.concat', (['[e1, skip_connection]'], {'axis': '(-1)'}), '([e1, skip_connection], axis=-1)\n', (1489, 1521), True, 'import tensorflow as tf\n'), ((1638, 1655), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv1'], {}), '(conv1)\n', (1648, 1655), True, 'import tensorflow as tf\n'), ((1771, 1788), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv2'], {}), '(conv2)\n', (1781, 1788), True, 'import tensorflow as tf\n'), ((3398, 3433), 'numpy.floor', 'np.floor', (['(train_length / batch_size)'], {}), '(train_length / batch_size)\n', (3406, 3433), True, 'import numpy as np\n')] |
"""Prepare data for training, validation and testing."""
import os
import fnmatch
import extract_data
from numpy import array, concatenate, mean, split
from keras.utils import to_categorical
def create_samples(time_series, n_steps):
"""
Split a time series into samples of size n_steps.
Example :
time_series = [1, 2, 3, 4]
n_steps = 2
create_samples(time_series, n_steps) = [ [1, 2], [2, 3], [3, 4] ]
"""
# Split a univariable sequence into samples
X = list()
n = len(time_series)
for i in range(n):
# Find the end of this pattern
end_ix = i + n_steps
# Check if we are beyond the sequence
if end_ix > n - 1:
break
# Gather input and output parts of the pattern
X.append(time_series[i:end_ix])
return array(X, dtype="uint16")
def split_samples(time_series, n_steps):
ret = split(time_series, n_steps)
if ret[-1].shape[0] < n_steps:
return array(ret[:-1], dtype="uint16")
return array(ret, dtype="uint16")
def get_data_sets(cnn_n_input):
"""Prepare training, validation and testing sets."""
# Get list of labels
data_directory = "data_set/"
list_labels = extract_data.get_labels(data_directory + "labels.txt")
n_labels = len(list_labels)
# Dictionary that gives labels ID
label_to_int = dict()
for i in range(n_labels):
label_to_int[list_labels[i]] = i
# Dictionary that will count how many times each label appears
count_labels, count_labels2 = dict(), dict()
# Train/Validation/Test
trainX, trainy = list(), list()
validX, validy = list(), list()
testX, testy = list(), list()
# Loop over data_set directory
files = [f for f in os.listdir(data_directory) if fnmatch.fnmatch(f, "*_label.txt")]
for file in files:
# Get chorus code
chorus = file.split('_')[0]
# Get time series (data)
input_data = extract_data.extract_data_from_txt(data_directory + "MIN " + chorus + ".txt").Value.values\
.astype(dtype="uint16", copy=False)
# input_data = mean(input_data.reshape(-1, 3), 1)
# Get respective label
label = extract_data.extract_label_from_txt(data_directory + file)
# Increment label count
if label[0] in count_labels:
count_labels[label[0]] += 1
else:
count_labels[label[0]] = 1
if label[1] in count_labels2:
count_labels2[label[1]] += 1
else:
count_labels2[label[1]] = 1
# Decide whether these data should be used for training/validation/testing
label_id = label_to_int[label[0]]
# Split data into samples
X = split_samples(input_data, cnn_n_input)
X = X.reshape(X.shape[1], X.shape[0], 1)
# Create respective Y values
Y = to_categorical([[label_id] for _ in X], dtype="uint8", num_classes=n_labels)
if count_labels[label[0]] % 5 == 7: # 20% of data is for testing
testX.append(X)
testy.append(Y)
elif count_labels[label[0]] % 5 == 3: # 20% of data is for validation
# Append validation samples
validX.append(X)
validy.append(Y)
else: # 60% of data is for training
# Append training samples
trainX.append(X)
trainy.append(Y)
print("--\nInventaire des données globales :")
print(count_labels)
print(count_labels2)
# Concatenate all training and validation samples to get the final sets
TrainX = concatenate([x for x in trainX])
Trainy = concatenate([y for y in trainy])
ValidX = concatenate([x for x in validX])
Validy = concatenate([y for y in validy])
# TestX = concatenate([x for x in testX])
# Testy = concatenate([y for y in testy])
print("Training set:\n\t", TrainX.shape)
print("Validation set:\n\t", ValidX.shape)
# print("Test set:\n\t", TestX.shape)
return TrainX, Trainy, ValidX, Validy, None, None
| [
"os.listdir",
"extract_data.extract_data_from_txt",
"extract_data.extract_label_from_txt",
"keras.utils.to_categorical",
"numpy.array",
"numpy.split",
"extract_data.get_labels",
"fnmatch.fnmatch",
"numpy.concatenate"
] | [((826, 850), 'numpy.array', 'array', (['X'], {'dtype': '"""uint16"""'}), "(X, dtype='uint16')\n", (831, 850), False, 'from numpy import array, concatenate, mean, split\n'), ((904, 931), 'numpy.split', 'split', (['time_series', 'n_steps'], {}), '(time_series, n_steps)\n', (909, 931), False, 'from numpy import array, concatenate, mean, split\n'), ((1025, 1051), 'numpy.array', 'array', (['ret'], {'dtype': '"""uint16"""'}), "(ret, dtype='uint16')\n", (1030, 1051), False, 'from numpy import array, concatenate, mean, split\n'), ((1220, 1274), 'extract_data.get_labels', 'extract_data.get_labels', (["(data_directory + 'labels.txt')"], {}), "(data_directory + 'labels.txt')\n", (1243, 1274), False, 'import extract_data\n'), ((3649, 3681), 'numpy.concatenate', 'concatenate', (['[x for x in trainX]'], {}), '([x for x in trainX])\n', (3660, 3681), False, 'from numpy import array, concatenate, mean, split\n'), ((3695, 3727), 'numpy.concatenate', 'concatenate', (['[y for y in trainy]'], {}), '([y for y in trainy])\n', (3706, 3727), False, 'from numpy import array, concatenate, mean, split\n'), ((3741, 3773), 'numpy.concatenate', 'concatenate', (['[x for x in validX]'], {}), '([x for x in validX])\n', (3752, 3773), False, 'from numpy import array, concatenate, mean, split\n'), ((3787, 3819), 'numpy.concatenate', 'concatenate', (['[y for y in validy]'], {}), '([y for y in validy])\n', (3798, 3819), False, 'from numpy import array, concatenate, mean, split\n'), ((982, 1013), 'numpy.array', 'array', (['ret[:-1]'], {'dtype': '"""uint16"""'}), "(ret[:-1], dtype='uint16')\n", (987, 1013), False, 'from numpy import array, concatenate, mean, split\n'), ((2207, 2265), 'extract_data.extract_label_from_txt', 'extract_data.extract_label_from_txt', (['(data_directory + file)'], {}), '(data_directory + file)\n', (2242, 2265), False, 'import extract_data\n'), ((2871, 2947), 'keras.utils.to_categorical', 'to_categorical', (['[[label_id] for _ in X]'], {'dtype': '"""uint8"""', 'num_classes': 'n_labels'}), "([[label_id] for _ in X], dtype='uint8', num_classes=n_labels)\n", (2885, 2947), False, 'from keras.utils import to_categorical\n'), ((1755, 1781), 'os.listdir', 'os.listdir', (['data_directory'], {}), '(data_directory)\n', (1765, 1781), False, 'import os\n'), ((1785, 1818), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['f', '"""*_label.txt"""'], {}), "(f, '*_label.txt')\n", (1800, 1818), False, 'import fnmatch\n'), ((1961, 2038), 'extract_data.extract_data_from_txt', 'extract_data.extract_data_from_txt', (["(data_directory + 'MIN ' + chorus + '.txt')"], {}), "(data_directory + 'MIN ' + chorus + '.txt')\n", (1995, 2038), False, 'import extract_data\n')] |
import sklearn
import scipy
import numpy as np
from sklearn.linear_model import LogisticRegression
def vectorize_docs(vector_source, docs):
"""
vector_source should be a key contained in the doc, with a Numpy ndarray value.
"""
X = np.vstack([td[vector_source] for td in docs])
return X
def downsample_majority_class(majority_class, target_proportion, X, y):
minority_class = np.abs(majority_class - 1)
minority_inds = np.nonzero(y == minority_class)[0]
minority_count = len(minority_inds)
n_majority_class_to_keep = int(
np.ceil((minority_count / target_proportion) - minority_count)
)
majority_class_inds = np.nonzero(y == majority_class)[0]
if len(majority_class_inds) == n_majority_class_to_keep:
# no need to do any downsampling
# this should be a rare edge case caused by the use of np.ceil
# print(f"mistaken call to downsample {minority_class} / {minority_class} + {majority_class} = {minority_class / (minority_class + majority_class):.2f} with to_keep = {n_majority_class_to_keep}")
return X, y
assert len(majority_class_inds) > n_majority_class_to_keep
majority_inds_to_keep = np.random.choice(
majority_class_inds, size=n_majority_class_to_keep, replace=False
)
# print(f"Downsampled to {n_majority_class_to_keep} / {len(majority_class_inds)} majority class (label={majority_class}) documents (minority={minority_count}, downsampled minority pct ={n_majority_class_to_keep / (n_majority_class_to_keep + minority_count) *100:.2f}%, original = {len(majority_class_inds) / (len(majority_class_inds) + minority_count) *100:.2f}%).")
inds_to_keep = np.concatenate((majority_inds_to_keep, minority_inds))
assert len(inds_to_keep) > 0
X = X[inds_to_keep]
y = y[inds_to_keep]
assert X.shape[0] == len(inds_to_keep)
assert y.shape[0] == len(inds_to_keep)
return X, y
def train_model(X, y, X_valid, config):
"""
train_model uses the following config keys:
- learner
- use_bbsc
- undersample_to_proportion
"""
if config.undersample_to_proportion:
undersample_to_proportion = config.undersample_to_proportion
pos_count = np.sum(y)
pos_pct = pos_count / len(y) # what pct of the labels are 1?
if pos_pct < undersample_to_proportion:
# undersample until at least the target proportion is reached
X, y = downsample_majority_class(0, undersample_to_proportion, X, y)
elif pos_pct > (1 - undersample_to_proportion):
# need to undersample the positive class
X, y = downsample_majority_class(1, undersample_to_proportion, X, y)
if config.learner == "logreg":
clf = LogisticRegression(
C=1.0,
solver="liblinear",
)
clf.fit(X, y)
else:
raise ValueError(f"Unknown learner '{config.learner}'.")
y_unlabeled_valid_pred = clf.predict(X_valid)
unlabeled_valid_pct_pos = np.sum(y_unlabeled_valid_pred) / len(
y_unlabeled_valid_pred
)
if config.use_bbsc:
BBSC_MIN_LABELED_DATA_COUNT = 20
BBSC_MAX_TRAIN_FOLDS = 10 # should be in the config, but this is the number of folds used for predicting positive class proportion and thus the K-S test
if len(y) < BBSC_MIN_LABELED_DATA_COUNT:
# BBSC is highly unstable for small confusion matrices
# so for now we just prevent the use of BBSC when the
# available labeled sample is very small
return clf, unlabeled_valid_pct_pos
# use bbsc
y_valid_pred = np.zeros_like(y)
y_valid_pred_proba = np.zeros_like(y, dtype=float)
n_splits = min(BBSC_MAX_TRAIN_FOLDS, len(y))
kf = sklearn.model_selection.KFold(n_splits=n_splits, shuffle=False)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
pos_count = np.sum(y_train)
if pos_count == 0:
# the fold contains no positive training examples
# so predict negative class
y_pred = np.zeros_like(y_test)
y_valid_pred_proba[test_index] = 0.0
elif pos_count == len(y_train):
# the fold contains only positive training examples
# so predict positive class
y_pred = np.ones_like(y_test)
y_valid_pred_proba[test_index] = 1.0
else: # 1+ pos and neg training examples
cv_clf = LogisticRegression(C=1.0, solver="liblinear")
cv_clf.fit(X_train, y_train)
y_pred = cv_clf.predict(X_test)
y_valid_pred_proba[test_index] = cv_clf.predict_proba(X_test)[:, 1]
y_valid_pred[test_index] = y_pred
# use black-box shift correction
y_unlabeled_pred = clf.predict(X_valid)
y_unlabeled_pred_proba = clf.predict_proba(X_valid)[:, 1]
ks_result = scipy.stats.ks_2samp(y_valid_pred_proba, y_unlabeled_pred_proba)
p = ks_result.pvalue
source_predicted_y0 = np.sum(y_valid_pred == 0) / len(y_valid_pred)
source_predicted_y1 = np.sum(y_valid_pred == 1) / len(y_valid_pred)
labeled_y0 = np.sum(y == 0) / len(y)
labeled_y1 = np.sum(y == 1) / len(y)
v_est = np.array([labeled_y0, labeled_y1])
# C_est is the normalized confusion matrix on the validation data
C_est = np.zeros((2, 2))
C_est[0, 0] = np.sum((y == 0) & (y_valid_pred == 0))
C_est[0, 1] = np.sum((y == 1) & (y_valid_pred == 0))
C_est[1, 0] = np.sum((y == 0) & (y_valid_pred == 1))
C_est[1, 1] = np.sum((y == 1) & (y_valid_pred == 1))
C_est = C_est / len(y)
v_est = np.array([labeled_y0, labeled_y1])
target_predicted_y0 = np.sum(y_unlabeled_pred == 0) / len(y_unlabeled_pred)
target_predicted_y1 = np.sum(y_unlabeled_pred == 1) / len(y_unlabeled_pred)
mu_pred_est = np.array([target_predicted_y0, target_predicted_y1])
try:
w_est = np.matmul(np.linalg.inv(C_est), mu_pred_est)
except np.linalg.LinAlgError as ex:
# confusion matrix not invertible
# so we bail out without completing bbsc
# print(C_est)
return clf, unlabeled_valid_pct_pos
assert w_est.shape == (2,), w_est.shape
mu_est = np.matmul(np.diag(v_est), w_est)
assert mu_est.shape == (2,), mu_est.shape
w_est_nn = w_est.clip(
0
) # w_est_nn is the non-negative version of w_est, clipping class weights to 0
class_weights = {0: w_est_nn[0], 1: w_est_nn[1]}
sigma_min = np.min(np.linalg.eigvals(C_est))
# print(f"KS-test p={p:.3f}, predicted pos% = {mu_est[1]*100:.2f}% (raw pred pos% = {target_predicted_y1*100:.2f}%), class weights = {class_weights}, σ_min = {sigma_min:.3f}")
if p > 0.01 or sigma_min <= 0.05:
# don't use BBSC if no skew detected between labeled validation and unlabeled validation sets
return clf, unlabeled_valid_pct_pos
bbsc_clf = sklearn.linear_model.LogisticRegression(
solver="liblinear", penalty="l2", class_weight=class_weights
)
bbsc_clf.fit(X, y)
return bbsc_clf, target_predicted_y1
return clf, unlabeled_valid_pct_pos
| [
"numpy.abs",
"numpy.ceil",
"numpy.ones_like",
"numpy.random.choice",
"scipy.stats.ks_2samp",
"sklearn.linear_model.LogisticRegression",
"numpy.diag",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.linalg.eigvals",
"numpy.vstack",
"numpy.nonzero",
"numpy.concatenate",
"numpy.linalg.inv... | [((250, 295), 'numpy.vstack', 'np.vstack', (['[td[vector_source] for td in docs]'], {}), '([td[vector_source] for td in docs])\n', (259, 295), True, 'import numpy as np\n'), ((404, 430), 'numpy.abs', 'np.abs', (['(majority_class - 1)'], {}), '(majority_class - 1)\n', (410, 430), True, 'import numpy as np\n'), ((1190, 1277), 'numpy.random.choice', 'np.random.choice', (['majority_class_inds'], {'size': 'n_majority_class_to_keep', 'replace': '(False)'}), '(majority_class_inds, size=n_majority_class_to_keep,\n replace=False)\n', (1206, 1277), True, 'import numpy as np\n'), ((1678, 1732), 'numpy.concatenate', 'np.concatenate', (['(majority_inds_to_keep, minority_inds)'], {}), '((majority_inds_to_keep, minority_inds))\n', (1692, 1732), True, 'import numpy as np\n'), ((451, 482), 'numpy.nonzero', 'np.nonzero', (['(y == minority_class)'], {}), '(y == minority_class)\n', (461, 482), True, 'import numpy as np\n'), ((571, 631), 'numpy.ceil', 'np.ceil', (['(minority_count / target_proportion - minority_count)'], {}), '(minority_count / target_proportion - minority_count)\n', (578, 631), True, 'import numpy as np\n'), ((666, 697), 'numpy.nonzero', 'np.nonzero', (['(y == majority_class)'], {}), '(y == majority_class)\n', (676, 697), True, 'import numpy as np\n'), ((2216, 2225), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (2222, 2225), True, 'import numpy as np\n'), ((2739, 2784), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)', 'solver': '"""liblinear"""'}), "(C=1.0, solver='liblinear')\n", (2757, 2784), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2997, 3027), 'numpy.sum', 'np.sum', (['y_unlabeled_valid_pred'], {}), '(y_unlabeled_valid_pred)\n', (3003, 3027), True, 'import numpy as np\n'), ((3626, 3642), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (3639, 3642), True, 'import numpy as np\n'), ((3672, 3701), 'numpy.zeros_like', 'np.zeros_like', (['y'], {'dtype': 'float'}), '(y, dtype=float)\n', (3685, 3701), True, 'import numpy as np\n'), ((3768, 3831), 'sklearn.model_selection.KFold', 'sklearn.model_selection.KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(False)'}), '(n_splits=n_splits, shuffle=False)\n', (3797, 3831), False, 'import sklearn\n'), ((5064, 5128), 'scipy.stats.ks_2samp', 'scipy.stats.ks_2samp', (['y_valid_pred_proba', 'y_unlabeled_pred_proba'], {}), '(y_valid_pred_proba, y_unlabeled_pred_proba)\n', (5084, 5128), False, 'import scipy\n'), ((5418, 5452), 'numpy.array', 'np.array', (['[labeled_y0, labeled_y1]'], {}), '([labeled_y0, labeled_y1])\n', (5426, 5452), True, 'import numpy as np\n'), ((5543, 5559), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (5551, 5559), True, 'import numpy as np\n'), ((5582, 5620), 'numpy.sum', 'np.sum', (['((y == 0) & (y_valid_pred == 0))'], {}), '((y == 0) & (y_valid_pred == 0))\n', (5588, 5620), True, 'import numpy as np\n'), ((5643, 5681), 'numpy.sum', 'np.sum', (['((y == 1) & (y_valid_pred == 0))'], {}), '((y == 1) & (y_valid_pred == 0))\n', (5649, 5681), True, 'import numpy as np\n'), ((5704, 5742), 'numpy.sum', 'np.sum', (['((y == 0) & (y_valid_pred == 1))'], {}), '((y == 0) & (y_valid_pred == 1))\n', (5710, 5742), True, 'import numpy as np\n'), ((5765, 5803), 'numpy.sum', 'np.sum', (['((y == 1) & (y_valid_pred == 1))'], {}), '((y == 1) & (y_valid_pred == 1))\n', (5771, 5803), True, 'import numpy as np\n'), ((5851, 5885), 'numpy.array', 'np.array', (['[labeled_y0, labeled_y1]'], {}), '([labeled_y0, labeled_y1])\n', (5859, 5885), True, 'import numpy as np\n'), ((6078, 6130), 'numpy.array', 'np.array', (['[target_predicted_y0, target_predicted_y1]'], {}), '([target_predicted_y0, target_predicted_y1])\n', (6086, 6130), True, 'import numpy as np\n'), ((7221, 7326), 'sklearn.linear_model.LogisticRegression', 'sklearn.linear_model.LogisticRegression', ([], {'solver': '"""liblinear"""', 'penalty': '"""l2"""', 'class_weight': 'class_weights'}), "(solver='liblinear', penalty='l2',\n class_weight=class_weights)\n", (7260, 7326), False, 'import sklearn\n'), ((4028, 4043), 'numpy.sum', 'np.sum', (['y_train'], {}), '(y_train)\n', (4034, 4043), True, 'import numpy as np\n'), ((5189, 5214), 'numpy.sum', 'np.sum', (['(y_valid_pred == 0)'], {}), '(y_valid_pred == 0)\n', (5195, 5214), True, 'import numpy as np\n'), ((5265, 5290), 'numpy.sum', 'np.sum', (['(y_valid_pred == 1)'], {}), '(y_valid_pred == 1)\n', (5271, 5290), True, 'import numpy as np\n'), ((5333, 5347), 'numpy.sum', 'np.sum', (['(y == 0)'], {}), '(y == 0)\n', (5339, 5347), True, 'import numpy as np\n'), ((5378, 5392), 'numpy.sum', 'np.sum', (['(y == 1)'], {}), '(y == 1)\n', (5384, 5392), True, 'import numpy as np\n'), ((5917, 5946), 'numpy.sum', 'np.sum', (['(y_unlabeled_pred == 0)'], {}), '(y_unlabeled_pred == 0)\n', (5923, 5946), True, 'import numpy as np\n'), ((6001, 6030), 'numpy.sum', 'np.sum', (['(y_unlabeled_pred == 1)'], {}), '(y_unlabeled_pred == 1)\n', (6007, 6030), True, 'import numpy as np\n'), ((6502, 6516), 'numpy.diag', 'np.diag', (['v_est'], {}), '(v_est)\n', (6509, 6516), True, 'import numpy as np\n'), ((6794, 6818), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['C_est'], {}), '(C_est)\n', (6811, 6818), True, 'import numpy as np\n'), ((4210, 4231), 'numpy.zeros_like', 'np.zeros_like', (['y_test'], {}), '(y_test)\n', (4223, 4231), True, 'import numpy as np\n'), ((6174, 6194), 'numpy.linalg.inv', 'np.linalg.inv', (['C_est'], {}), '(C_est)\n', (6187, 6194), True, 'import numpy as np\n'), ((4466, 4486), 'numpy.ones_like', 'np.ones_like', (['y_test'], {}), '(y_test)\n', (4478, 4486), True, 'import numpy as np\n'), ((4619, 4664), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)', 'solver': '"""liblinear"""'}), "(C=1.0, solver='liblinear')\n", (4637, 4664), False, 'from sklearn.linear_model import LogisticRegression\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path
import numexpr as ne
import scipy as sp
import scipy.sparse
plt.ion()
import pybie2d
"""
Demonstrate how to use the pybie2d package to solve an interior/exterior
Laplace problem on a complicated domain using a global quadrature
And boundary collections
"""
N = 1000
NB1 = 500
NB2 = 600
NB3 = 600
# extract some functions for easy calling
squish = pybie2d.misc.curve_descriptions.squished_circle
star = pybie2d.misc.curve_descriptions.star
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Laplace_Layer_Singular_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Apply
Cauchy_Layer_Apply = pybie2d.kernels.high_level.cauchy.Cauchy_Layer_Apply
Find_Near_Points = pybie2d.misc.near_points.find_near_points
Pairing = pybie2d.pairing.Pairing
Boundary_Collection = pybie2d.boundaries.collection.BoundaryCollection
Evaluate_Tau = pybie2d.solvers.laplace_dirichlet.Evaluate_Tau
LaplaceDirichletSolver = pybie2d.solvers.laplace_dirichlet.LaplaceDirichletSolver
boundary1 = GSB(c=squish(NB1,r=2,b=0.3,rot=np.pi/4.0))
boundary2 = GSB(c=star(NB2,x=0.75,y=0.75,r=0.3,a=0.4,f=7,rot=np.pi/3.0))
boundary3 = GSB(c=star(NB3,x=-0.75,y=-0.75,r=0.4,a=0.05,f=11,rot=np.pi/3.0))
boundary = Boundary_Collection()
boundary.add([boundary1, boundary2, boundary3], ['i', 'e', 'e'])
boundary.amass_information()
def solution_func(x, y):
d2a = (x-0.75)**2 + (y-0.75)**2
d2b = (x+0.75)**2 + (y+0.75)**2
return ne.evaluate('log(sqrt(d2a)) + log(sqrt(d2b)) + 2*x + y')
bc1 = solution_func(boundary1.x, boundary1.y)
bc2 = solution_func(boundary2.x, boundary2.y)
bc3 = solution_func(boundary3.x, boundary3.y)
bc = np.concatenate([bc1, bc2, bc3])
def err_plot(up):
# compute the error
errorp = up - solution_func(full_grid.xg[phys], full_grid.yg[phys])
digitsp = -np.log10(np.abs(errorp)+1e-16)
digits = np.zeros_like(full_grid.xg)
digits[phys] = digitsp
mdigits = np.ma.array(digits, mask=ext)
# plot the error as a function of space (only good in interior)
fig, ax = plt.subplots(1,1)
clf = ax.imshow(mdigits[:,::-1].T, extent=[-2,2,-2,2],
cmap=mpl.cm.viridis_r)
ax.set_aspect('equal')
fig.colorbar(clf)
print('Error: {:0.2e}'.format(np.abs(errorp).max()))
################################################################################
# find physical region
full_grid = Grid([-2,2], N, [-2,2], N)
# this is hiding a lot of stuff!
phys1, ext1 = boundary1.find_interior_points(full_grid)
phys2, ext2 = boundary2.find_interior_points(full_grid)
phys3, ext3 = boundary3.find_interior_points(full_grid)
phys = full_grid.reshape(np.logical_and.reduce([phys1, ext2, ext3]))
ext = np.logical_not(phys)
################################################################################
# iteratively solve for the density
solver = LaplaceDirichletSolver(boundary, solve_type='iterative', check_close=False)
tau = solver.solve(bc, disp=True, restart=100, tol=1e-14)
################################################################################
# evaluate solution (no close corrections)
gridp = Grid([-2,2], N, [-2,2], N, mask=phys)
u = np.zeros_like(gridp.xg)
up = Evaluate_Tau(boundary, gridp, tau)
u[phys] = up
err_plot(up)
################################################################################
# make on-the-fly close corrections
| [
"numpy.abs",
"numpy.ma.array",
"numpy.logical_not",
"numpy.logical_and.reduce",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"matplotlib.pyplot.ion",
"numpy.zeros_like",
"numexpr.evaluate"
] | [((159, 168), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (166, 168), True, 'import matplotlib.pyplot as plt\n'), ((1889, 1920), 'numpy.concatenate', 'np.concatenate', (['[bc1, bc2, bc3]'], {}), '([bc1, bc2, bc3])\n', (1903, 1920), True, 'import numpy as np\n'), ((2881, 2901), 'numpy.logical_not', 'np.logical_not', (['phys'], {}), '(phys)\n', (2895, 2901), True, 'import numpy as np\n'), ((3341, 3364), 'numpy.zeros_like', 'np.zeros_like', (['gridp.xg'], {}), '(gridp.xg)\n', (3354, 3364), True, 'import numpy as np\n'), ((1688, 1744), 'numexpr.evaluate', 'ne.evaluate', (['"""log(sqrt(d2a)) + log(sqrt(d2b)) + 2*x + y"""'], {}), "('log(sqrt(d2a)) + log(sqrt(d2b)) + 2*x + y')\n", (1699, 1744), True, 'import numexpr as ne\n'), ((2083, 2110), 'numpy.zeros_like', 'np.zeros_like', (['full_grid.xg'], {}), '(full_grid.xg)\n', (2096, 2110), True, 'import numpy as np\n'), ((2146, 2175), 'numpy.ma.array', 'np.ma.array', (['digits'], {'mask': 'ext'}), '(digits, mask=ext)\n', (2157, 2175), True, 'import numpy as np\n'), ((2253, 2271), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2265, 2271), True, 'import matplotlib.pyplot as plt\n'), ((2831, 2873), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[phys1, ext2, ext3]'], {}), '([phys1, ext2, ext3])\n', (2852, 2873), True, 'import numpy as np\n'), ((2051, 2065), 'numpy.abs', 'np.abs', (['errorp'], {}), '(errorp)\n', (2057, 2065), True, 'import numpy as np\n'), ((2437, 2451), 'numpy.abs', 'np.abs', (['errorp'], {}), '(errorp)\n', (2443, 2451), True, 'import numpy as np\n')] |
from typing import List
import models.conv_lstm as conv_lstm
from pytorch_lightning import (
Callback,
LightningModule,
LightningDataModule,
Trainer,
seed_everything,
)
from core import utils
from pytorch_lightning.callbacks import LearningRateMonitor
import torch
import gc
import xarray as xr
import numpy as np
from numbers import Number
import pandas as pd
import torch
from itertools import chain
from torch.utils.data import random_split, DataLoader, Dataset
import pytorch_lightning as pl
import os
import pathlib
import gc
import dask
import pickle
import sys
from pytorch_lightning.callbacks import ModelCheckpoint
class SimpleDataset(Dataset):
def __init__(self, images, future_images):
self.future_images = future_images
self.images = images
def __len__(self):
return len(self.future_images)
def __getitem__(self, idx):
future_image = self.future_images[idx]
image = self.images[idx]
return image, future_image
def to_tensor(dataset):
return torch.from_numpy(np.array(dataset.variable)).float()
def get_spatial_region_of_interest(data_array, x_index_at_center: Number, y_index_at_center: Number) -> xr.DataArray:
x_and_y_index_at_center = pd.Series({"x_osgb": x_index_at_center, "y_osgb": y_index_at_center})
half_image_size_pixels = 256 // 2
min_x_and_y_index = x_and_y_index_at_center - half_image_size_pixels
max_x_and_y_index = x_and_y_index_at_center + half_image_size_pixels
data_array = data_array.isel(x=slice(min_x_and_y_index.x_osgb, max_x_and_y_index.x_osgb),
y=slice(min_x_and_y_index.y_osgb, max_x_and_y_index.y_osgb))
return data_array
if __name__ == '__main__':
torch.multiprocessing.set_sharing_strategy("file_system")
model = conv_lstm.EncoderDecoderConvLSTM(input_channels = 1, out_channels = 1, forecast_steps = 5)
new_epochs = 1000
checkpoint_callback = ModelCheckpoint(
dirpath='./new_model/lightning_logs/version_0/checkpoints/',
filename='checky', save_last=True)
trainer = pl.Trainer(strategy="ddp_spawn", gpus=[0], max_epochs=new_epochs, enable_checkpointing=True,
callbacks=[checkpoint_callback])
i = 0
SATELLITE_ZARR_PATH = "gs://public-datasets-eumetsat-solar-forecasting/satellite/EUMETSAT/SEVIRI_RSS/v3/eumetsat_seviri_hrv_uk.zarr"
data = xr.open_dataset(
SATELLITE_ZARR_PATH,
engine="zarr",
chunks="auto",
)
dask.config.set(**{"array.slicing.split_large_chunks": False})
data_array = data["data"]
data_array = data_array.sortby('time')
#data_array = data_array[:1000]
data_array = data_array[119:206]
gc.collect()
regions = []
centers = [(512, 512)]
for (x_osgb, y_osgb) in centers:
regions.append(get_spatial_region_of_interest(data_array, x_osgb, y_osgb))
X_tensors = [to_tensor(timestep[:-1]) for timestep in regions]
X_tensors = list(chain.from_iterable(X_tensors))
y_tensors = [to_tensor(timestep[1:]) for timestep in regions]
y_tensors = list(chain.from_iterable(y_tensors))
X_tensors = [torch.reshape(t, [1, 256, 256]) for t in X_tensors]
y_tensors = [torch.reshape(t, [1, 256, 256]) for t in y_tensors]
X_t = list(zip(*[iter(X_tensors)] * 5))
X_t = [torch.stack(x) for x in X_t][:-1]
y_t = list(zip(*[iter(y_tensors)] * 5))
y_t = [torch.stack(y) for y in y_t][:-1]
dataset = SimpleDataset(X_t, y_t)
train_size = int(0.9 * dataset.__len__())
val_size = int(dataset.__len__() - train_size)
print(f"""Train size = {train_size}""")
print(f"""Val size = {val_size}""")
train, val = torch.utils.data.random_split(dataset, [train_size, val_size])
log = utils.get_logger(__name__)
log.info("Starting training!")
trainer.fit(model, DataLoader(train, num_workers=0, batch_size=3), DataLoader(val, num_workers=0, batch_size=3))
torch.save(model.state_dict(), "./new_model/model.pth")
| [
"pandas.Series",
"pytorch_lightning.callbacks.ModelCheckpoint",
"dask.config.set",
"core.utils.get_logger",
"torch.utils.data.random_split",
"torch.utils.data.DataLoader",
"torch.stack",
"itertools.chain.from_iterable",
"numpy.array",
"pytorch_lightning.Trainer",
"models.conv_lstm.EncoderDecoder... | [((1243, 1312), 'pandas.Series', 'pd.Series', (["{'x_osgb': x_index_at_center, 'y_osgb': y_index_at_center}"], {}), "({'x_osgb': x_index_at_center, 'y_osgb': y_index_at_center})\n", (1252, 1312), True, 'import pandas as pd\n'), ((1742, 1799), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (1784, 1799), False, 'import torch\n'), ((1812, 1900), 'models.conv_lstm.EncoderDecoderConvLSTM', 'conv_lstm.EncoderDecoderConvLSTM', ([], {'input_channels': '(1)', 'out_channels': '(1)', 'forecast_steps': '(5)'}), '(input_channels=1, out_channels=1,\n forecast_steps=5)\n', (1844, 1900), True, 'import models.conv_lstm as conv_lstm\n'), ((1951, 2066), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'dirpath': '"""./new_model/lightning_logs/version_0/checkpoints/"""', 'filename': '"""checky"""', 'save_last': '(True)'}), "(dirpath='./new_model/lightning_logs/version_0/checkpoints/',\n filename='checky', save_last=True)\n", (1966, 2066), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((2094, 2223), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'strategy': '"""ddp_spawn"""', 'gpus': '[0]', 'max_epochs': 'new_epochs', 'enable_checkpointing': '(True)', 'callbacks': '[checkpoint_callback]'}), "(strategy='ddp_spawn', gpus=[0], max_epochs=new_epochs,\n enable_checkpointing=True, callbacks=[checkpoint_callback])\n", (2104, 2223), True, 'import pytorch_lightning as pl\n'), ((2403, 2469), 'xarray.open_dataset', 'xr.open_dataset', (['SATELLITE_ZARR_PATH'], {'engine': '"""zarr"""', 'chunks': '"""auto"""'}), "(SATELLITE_ZARR_PATH, engine='zarr', chunks='auto')\n", (2418, 2469), True, 'import xarray as xr\n'), ((2505, 2567), 'dask.config.set', 'dask.config.set', ([], {}), "(**{'array.slicing.split_large_chunks': False})\n", (2520, 2567), False, 'import dask\n'), ((2718, 2730), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2728, 2730), False, 'import gc\n'), ((3686, 3748), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset', '[train_size, val_size]'], {}), '(dataset, [train_size, val_size])\n', (3715, 3748), False, 'import torch\n'), ((3759, 3785), 'core.utils.get_logger', 'utils.get_logger', (['__name__'], {}), '(__name__)\n', (3775, 3785), False, 'from core import utils\n'), ((2983, 3013), 'itertools.chain.from_iterable', 'chain.from_iterable', (['X_tensors'], {}), '(X_tensors)\n', (3002, 3013), False, 'from itertools import chain\n'), ((3102, 3132), 'itertools.chain.from_iterable', 'chain.from_iterable', (['y_tensors'], {}), '(y_tensors)\n', (3121, 3132), False, 'from itertools import chain\n'), ((3151, 3182), 'torch.reshape', 'torch.reshape', (['t', '[1, 256, 256]'], {}), '(t, [1, 256, 256])\n', (3164, 3182), False, 'import torch\n'), ((3220, 3251), 'torch.reshape', 'torch.reshape', (['t', '[1, 256, 256]'], {}), '(t, [1, 256, 256])\n', (3233, 3251), False, 'import torch\n'), ((3844, 3890), 'torch.utils.data.DataLoader', 'DataLoader', (['train'], {'num_workers': '(0)', 'batch_size': '(3)'}), '(train, num_workers=0, batch_size=3)\n', (3854, 3890), False, 'from torch.utils.data import random_split, DataLoader, Dataset\n'), ((3892, 3936), 'torch.utils.data.DataLoader', 'DataLoader', (['val'], {'num_workers': '(0)', 'batch_size': '(3)'}), '(val, num_workers=0, batch_size=3)\n', (3902, 3936), False, 'from torch.utils.data import random_split, DataLoader, Dataset\n'), ((3327, 3341), 'torch.stack', 'torch.stack', (['x'], {}), '(x)\n', (3338, 3341), False, 'import torch\n'), ((3416, 3430), 'torch.stack', 'torch.stack', (['y'], {}), '(y)\n', (3427, 3430), False, 'import torch\n'), ((1058, 1084), 'numpy.array', 'np.array', (['dataset.variable'], {}), '(dataset.variable)\n', (1066, 1084), True, 'import numpy as np\n')] |
import numpy as np
from tgym.utils import calc_spread
def test_calc_spread():
spread_coefficients = [1, -0.1]
prices = np.array([1, 2, 10, 20])
spread_price = (-1, 1)
assert calc_spread(prices, spread_coefficients) == spread_price
| [
"numpy.array",
"tgym.utils.calc_spread"
] | [((129, 153), 'numpy.array', 'np.array', (['[1, 2, 10, 20]'], {}), '([1, 2, 10, 20])\n', (137, 153), True, 'import numpy as np\n'), ((192, 232), 'tgym.utils.calc_spread', 'calc_spread', (['prices', 'spread_coefficients'], {}), '(prices, spread_coefficients)\n', (203, 232), False, 'from tgym.utils import calc_spread\n')] |
#!/usr/bin/env python
##
#
# Reinforcement-Learning Based Controller
#
##
import rospy
import random
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Pose
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from std_msgs.msg import Int8
from std_srvs.srv import Empty
from tf.transformations import euler_from_quaternion
import tensorflow as tf
import numpy as np
import copy
import matplotlib.pyplot as plt
import subprocess
import time
home_dir = "/home/vjkurtz/"
base_dir = "/home/vjkurtz/catkin_ws/src/collision_avoidance"
# Sensor data stored in a global variable so it can be accessed asynchronously
sensor_data = LaserScan().ranges
odom_data = Odometry().twist.twist
is_crashed = False
# Collision frequencies for plotting are also global variables so we can
# access them even after the main program is shut down
iterations = []
collision_frequencies = []
cumulative_reward = []
######### Initialize Q-Network ################
# parameters
learning_rate = 0.001
n_hidden_1 = 100
n_hidden_2 = 300
n_hidden_3 = 100
n_input = 181 # lidar data (one distance per degree) plus angle to the goal (radians)
n_classes = 3 # commands: left, right, straight
# tf graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
# Layer weights and biases
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Dropout parameter
keep_prob = tf.placeholder(tf.float32)
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with sigmoid activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.sigmoid(layer_1)
layer_1 = tf.nn.dropout(layer_1, keep_prob) # apply dropout to hidden layer
# Hidden fully connected layer with sigmoid activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.sigmoid(layer_2)
layer_2 = tf.nn.dropout(layer_2, keep_prob)
# Hidden fully connected layer with sigmoid activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.sigmoid(layer_3)
layer_3 = tf.nn.dropout(layer_3, keep_prob)
# Output fully connected layer with linear activation
out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
return out_layer
# Construct model
pred = multilayer_perceptron(X)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.square(pred-Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Initializing the variables
init = tf.global_variables_initializer()
# Set up so we can save the session later
saver = tf.train.Saver()
# And start the tf session
sess = tf.Session()
sess.run(init)
###################################################
def update_twist(twist, q_vals):
"""
Given Q(s,a) for a certain state choose the
action that mazimizes Q and move accordingly.
Use epsilon-greedy exploration too.
"""
d_lin = 0.5 # step sizes
d_ang = 0.5
r = random.random() # draw from uniform[0,1]
epsilon = 0.2
possible_actions = [0,1,2] # left, straight, right
if (r < epsilon):
# Act completely randomly
action = random.choice(possible_actions)
else:
# Act greedily w.r.t Q-function
i = np.argmax(q_vals[0])
action = possible_actions[i]
if (action == 2): # right
twist.linear.x = d_lin/2 #slow down but keep going forward
twist.angular.z = -d_ang
elif (action == 0): # left
twist.linear.x = d_lin/2 #slow down but keep going forward
twist.angular.z = d_ang
elif (action == 1): # straight
twist.linear.x = d_lin
twist.angular.z = 0
return action
def teleport_random():
"""
Teleport the robot to a new random position on map
"""
x_min = -8 # bounds of the map
x_max = 8
y_min = -8
y_max = 8
# Randomly generate a pose
cmd_pose = Pose()
cmd_pose.position.x = random.uniform(x_min, x_max)
cmd_pose.position.y = random.uniform(y_min, y_max)
cmd_pose.orientation.z = random.uniform(-7,7) # janky way of getting most of the angles from a quaternarion
cmd_pose.orientation.w = 1
# ... and publish it as the new pose of the robot
time.sleep(0.3)
teleporter.publish(cmd_pose)
time.sleep(0.3) # wait (in real time) before and after jumping to avoid segfaults
def calc_reward(angle_to_goal):
"""
Give a scalar reward
"""
if is_crashed:
#reset_positions()
teleport_random()
return -1
elif reached_goal(odom_data):
print("Reached goal!!! HURRAY!")
time.sleep(1)
reset_positions()
return 1
else:
# Give some reward for facing the goal
return -abs(angle_to_goal / 20)
def moved_forward(action):
"""
Indicate if we've progressed towards the goal
"""
if (action == 1): # moved straight ahead
return True
return False
def close_to_obstacle(state):
"""
Return true or false depending if we're within
a certain distance of an obstacle.
"""
cutoff_dist = 0.5
closest_obstacle = min(state[0])
if (closest_obstacle < cutoff_dist):
return True
return False
def sensor_callback(data):
"""
Handle new sensor data by updating a global variable
"""
global sensor_data
sensor_data = data.ranges # raw numbers for each angle increment
def odom_callback(data):
global odom_data
odom_data = data
def crash_callback(data):
global is_crashed
if data.data:
is_crashed = True
else:
is_crashed = False
def correct_Q(action, state, reward, old_Q, next_Q):
"""
Produce a corrected Q(s,a) estimate according to:
Q(s,a) = R + gamma*Q(s+1,a+1)
"""
gamma = 0.5 # weights importance of future reward
new_Q = copy.copy(old_Q)
new_Q[action] = reward + gamma*next_Q[action] # action indexes are 0,1,2, corresponding to position in Q-function
return new_Q
def display_plot(iters, coll_freq, cu_reward):
"""
Display a plot of collision frequencies and cumulative reward
"""
fig, ax1 = plt.subplots()
ax1.plot(iters, coll_freq, 'r-')
ax1.set_xlabel("Iteration")
ax1.set_ylabel("Number of Collisions", color='r')
ax2 = ax1.twinx()
ax2.plot(iters, cu_reward, 'b-')
ax2.set_ylabel("Cumulative Reward", color='b')
fig.tight_layout()
#plt.save("collision_frequency_plot.png")
plt.show()
def reached_goal(odometry_data):
"""
Return true or false depending if we're in the target
position, defined at (x,y) = (7,7)
"""
target_x = 7
target_y = 7
tolerance = 1
robot_x = odometry_data.pose.pose.position.x
robot_y = odometry_data.pose.pose.position.y
if (abs(robot_x - target_x) < tolerance) and (abs(robot_y - target_y) < tolerance):
return True
return False
def get_angle_to_goal(odometry_data):
"""
Return the angle from the current position to the target location
"""
# The target is at (x,y) = (7, 7)
target_x = 7
target_y = 7
# Robot position
robot_x = odometry_data.pose.pose.position.x
robot_y = odometry_data.pose.pose.position.y
# Angle from our current position to the goal
theta = np.arctan((target_y - robot_y) / (target_x - robot_x))
# Angle we're actually facing
quaternion = (
odometry_data.pose.pose.orientation.x,
odometry_data.pose.pose.orientation.y,
odometry_data.pose.pose.orientation.z,
odometry_data.pose.pose.orientation.w)
euler = euler_from_quaternion(quaternion)
phi = euler[2]
return phi-theta
def reset_positions():
"""
Wrapper for service call to /reset_positions. Adds a delay
to avoid random segfaults.
"""
time.sleep(0.3)
reset_simulation()
time.sleep(0.3)
def estimate_uncertainty(input_data, n_passes=10, k_prob=0.8):
"""
Use dropout to estimate uncertainty. For a given input,
run through the network a bunch of times with different (Bernoulli)
dropout masks. High variance in the results implies high uncertainty.
n_passes is the number of different dropout masks to use
k_prob governs how many weights to drop out
"""
predictions = sess.run(pred, feed_dict={X: input_data, keep_prob: k_prob})
for i in range(n_passes - 1):
Q_predicted = sess.run(pred, feed_dict={X: input_data, keep_prob: k_prob})
predictions = np.vstack((predictions, Q_predicted))
# Calculate variances, one for each element in Q_predicted (left, forward, right)
variances = np.var(predictions, axis=0)
return variances
def partial_fit(x_data, y_data):
"""
Fit the network weights to the given data
"""
assert len(x_data) == len(y_data)
N = len(x_data) # the number of data points we're dealing with
print(x_data.shape, y_data.shape)
training_epochs = 100
display_step = 10
batch_size = 200
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(N/batch_size)
# Loop over all batches
for i in range(total_batch):
# Get next batch
batch_x = x_data[batch_size*i:batch_size*(i+1)]
batch_y = y_data[batch_size*i:batch_size*(i+1)]
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.9})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch + 1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost))
print("Optimization Finished!")
def main():
# set initial command velocities to 0
cmd_vel = Twist()
cmd_pose = Pose() # also initialize a pose for teleportation purposes
update_interval = 1000 # how many actions to take before retraining
X_rand = np.vstack([np.array([8*random.random() for i in range(181)]).reshape(1,-1) for i in range(update_interval)]) # random sensor input
y_rand = np.vstack([np.array([1 for i in range(3)]).reshape(1,-1) for i in range(update_interval)]) # start with equal value on all actions
# Train on random data initially, just so we can fit to something
#partial_fit(X_rand,y_rand)
last_action = 1
last_state = X_rand[-1] # take the last randomly generated entry to be the "previous state" for initialization
old_Q = sess.run(pred, feed_dict={X: X_rand, keep_prob: 0.8})
# initialize replay buffer
x = X_rand # stores states
y = y_rand # stores corrected Q-values values
# variables to plot results at the end
global iterations
global collision_frequencies
global cumulative_reward
it = 1 # iteration counter
rospy.sleep(1) # wait a second to be sure we have good state infos
while not rospy.is_shutdown():
cf = 0 # reset collision frequency counter
cr = 0 # reset cumulative reward counter
print("==> Running")
for i in range(update_interval):
# Sensor data updated asynchronously and stored in global var sensor_data
# Get state (sensor info + goal direction)
a2g = get_angle_to_goal(odom_data)
state = np.array( (a2g,) + sensor_data ).reshape(1,-1)
# Note that sensor data range is [0,4] and angle to goal range is [-pi, pi],
# so feature scaling won't be necessary
# calculate Q(s,a) with NN
Q_values = sess.run(pred, feed_dict={X: state, keep_prob: 0.8})
# estimate uncertainty using dropout
#q_variances = estimate_uncertainty(state) # TODO: figure out how to use this
#print(q_variances)
# Control accordingly
action = update_twist(cmd_vel, Q_values)
controller.publish(cmd_vel)
# Get reward from last action
R = calc_reward(a2g)
# update things that keep track of results
if (R == -1):
cf += 1 # we collided, iterate the counter
cr += R # add the reward to our running total
# Calculate correct Q(s,a) from last action
# Q(s,a) = R + gamma*Q(s+1,a+1)
corrected_Q = correct_Q(last_action, last_state, R, old_Q[0], Q_values[0])
# Update replay buffer with correct Q(s,a)
x = np.vstack((x, last_state))
y = np.vstack((y, corrected_Q))
# remember what we did this turn so we can see its result in the next step
last_state = state
last_action = action
old_Q = Q_values
rate.sleep()
# Drop old data from the replay buffer
x = x[update_interval:]
y = y[update_interval:]
# Update network from replay buffer
print("==> Retraining")
partial_fit(x,y)
# Reset the positions
#reset_positions()
teleport_random()
# add collision frequency data
iterations.append(it)
collision_frequencies.append(cf)
cumulative_reward.append(cr)
print("")
print("Iteration: %s" % iterations)
print("Coll Freq: %s" % collision_frequencies)
print("Reward: %s" % cumulative_reward)
print("")
it +=1
if __name__=='__main__':
try:
# Initialize ros node and publishers/subscribers
rospy.init_node('rl_controller', anonymous=False)
controller = rospy.Publisher('/robot_0/cmd_vel', Twist, queue_size=10)
teleporter = rospy.Publisher('/robot_0/cmd_pose', Pose, queue_size=10)
odometer = rospy.Subscriber('/robot_0/base_pose_ground_truth', Odometry, odom_callback) # so we know angle to the goal
sensor = rospy.Subscriber('/robot_0/base_scan', LaserScan, sensor_callback)
crash_tracker = rospy.Subscriber('/robot_0/is_crashed', Int8, crash_callback)
reset_simulation = rospy.ServiceProxy('reset_positions', Empty)
rate = rospy.Rate(10) # in hz
main()
except rospy.ROSInterruptException:
pass
finally: # Always do these things before quitting
# save the model parameters
save_name = "%s/tmp/RLCA_saved_model" % base_dir
#save_name += time.strftime("%Y%m%d%H%M") # add a unique timestamp
saver.save(sess, save_name)
print("\n\nSaved Parameters as %s\n\n" % save_name)
# display plots
display_plot(iterations, collision_frequencies, cumulative_reward)
# close the tf session
sess.close()
| [
"sensor_msgs.msg.LaserScan",
"rospy.init_node",
"time.sleep",
"numpy.array",
"rospy.Rate",
"tensorflow.nn.dropout",
"geometry_msgs.msg.Pose",
"copy.copy",
"nav_msgs.msg.Odometry",
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"rospy.ServiceProxy",
"tensorflow.... | [((1228, 1268), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_input]'], {}), "('float', [None, n_input])\n", (1242, 1268), True, 'import tensorflow as tf\n'), ((1273, 1315), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_classes]'], {}), "('float', [None, n_classes])\n", (1287, 1315), True, 'import tensorflow as tf\n'), ((1919, 1945), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1933, 1945), True, 'import tensorflow as tf\n'), ((2945, 2996), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2967, 2996), True, 'import tensorflow as tf\n'), ((3073, 3106), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3104, 3106), True, 'import tensorflow as tf\n'), ((3158, 3174), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3172, 3174), True, 'import tensorflow as tf\n'), ((3210, 3222), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3220, 3222), True, 'import tensorflow as tf\n'), ((663, 674), 'sensor_msgs.msg.LaserScan', 'LaserScan', ([], {}), '()\n', (672, 674), False, 'from sensor_msgs.msg import LaserScan\n'), ((2129, 2148), 'tensorflow.sigmoid', 'tf.sigmoid', (['layer_1'], {}), '(layer_1)\n', (2139, 2148), True, 'import tensorflow as tf\n'), ((2163, 2196), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_1', 'keep_prob'], {}), '(layer_1, keep_prob)\n', (2176, 2196), True, 'import tensorflow as tf\n'), ((2378, 2397), 'tensorflow.sigmoid', 'tf.sigmoid', (['layer_2'], {}), '(layer_2)\n', (2388, 2397), True, 'import tensorflow as tf\n'), ((2412, 2445), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_2', 'keep_prob'], {}), '(layer_2, keep_prob)\n', (2425, 2445), True, 'import tensorflow as tf\n'), ((2594, 2613), 'tensorflow.sigmoid', 'tf.sigmoid', (['layer_3'], {}), '(layer_3)\n', (2604, 2613), True, 'import tensorflow as tf\n'), ((2628, 2661), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['layer_3', 'keep_prob'], {}), '(layer_3, keep_prob)\n', (2641, 2661), True, 'import tensorflow as tf\n'), ((2914, 2933), 'tensorflow.square', 'tf.square', (['(pred - Y)'], {}), '(pred - Y)\n', (2923, 2933), True, 'import tensorflow as tf\n'), ((3536, 3551), 'random.random', 'random.random', ([], {}), '()\n', (3549, 3551), False, 'import random\n'), ((4479, 4485), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (4483, 4485), False, 'from geometry_msgs.msg import Pose\n'), ((4512, 4540), 'random.uniform', 'random.uniform', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (4526, 4540), False, 'import random\n'), ((4567, 4595), 'random.uniform', 'random.uniform', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (4581, 4595), False, 'import random\n'), ((4626, 4647), 'random.uniform', 'random.uniform', (['(-7)', '(7)'], {}), '(-7, 7)\n', (4640, 4647), False, 'import random\n'), ((4801, 4816), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (4811, 4816), False, 'import time\n'), ((4854, 4869), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (4864, 4869), False, 'import time\n'), ((6410, 6426), 'copy.copy', 'copy.copy', (['old_Q'], {}), '(old_Q)\n', (6419, 6426), False, 'import copy\n'), ((6710, 6724), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6722, 6724), True, 'import matplotlib.pyplot as plt\n'), ((7034, 7044), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7042, 7044), True, 'import matplotlib.pyplot as plt\n'), ((7849, 7903), 'numpy.arctan', 'np.arctan', (['((target_y - robot_y) / (target_x - robot_x))'], {}), '((target_y - robot_y) / (target_x - robot_x))\n', (7858, 7903), True, 'import numpy as np\n'), ((8174, 8207), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['quaternion'], {}), '(quaternion)\n', (8195, 8207), False, 'from tf.transformations import euler_from_quaternion\n'), ((8387, 8402), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (8397, 8402), False, 'import time\n'), ((8430, 8445), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (8440, 8445), False, 'import time\n'), ((9204, 9231), 'numpy.var', 'np.var', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (9210, 9231), True, 'import numpy as np\n'), ((10432, 10439), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (10437, 10439), False, 'from geometry_msgs.msg import Twist\n'), ((10455, 10461), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (10459, 10461), False, 'from geometry_msgs.msg import Pose\n'), ((11479, 11493), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (11490, 11493), False, 'import rospy\n'), ((694, 704), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (702, 704), False, 'from nav_msgs.msg import Odometry\n'), ((1382, 1421), 'tensorflow.random_normal', 'tf.random_normal', (['[n_input, n_hidden_1]'], {}), '([n_input, n_hidden_1])\n', (1398, 1421), True, 'import tensorflow as tf\n'), ((1450, 1492), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_1, n_hidden_2]'], {}), '([n_hidden_1, n_hidden_2])\n', (1466, 1492), True, 'import tensorflow as tf\n'), ((1521, 1563), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_2, n_hidden_3]'], {}), '([n_hidden_2, n_hidden_3])\n', (1537, 1563), True, 'import tensorflow as tf\n'), ((1593, 1634), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_3, n_classes]'], {}), '([n_hidden_3, n_classes])\n', (1609, 1634), True, 'import tensorflow as tf\n'), ((1675, 1705), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_1]'], {}), '([n_hidden_1])\n', (1691, 1705), True, 'import tensorflow as tf\n'), ((1734, 1764), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_2]'], {}), '([n_hidden_2])\n', (1750, 1764), True, 'import tensorflow as tf\n'), ((1793, 1823), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden_3]'], {}), '([n_hidden_3])\n', (1809, 1823), True, 'import tensorflow as tf\n'), ((1853, 1882), 'tensorflow.random_normal', 'tf.random_normal', (['[n_classes]'], {}), '([n_classes])\n', (1869, 1882), True, 'import tensorflow as tf\n'), ((2072, 2099), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['h1']"], {}), "(x, weights['h1'])\n", (2081, 2099), True, 'import tensorflow as tf\n'), ((2315, 2348), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['h2']"], {}), "(layer_1, weights['h2'])\n", (2324, 2348), True, 'import tensorflow as tf\n'), ((2531, 2564), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['h3']"], {}), "(layer_2, weights['h3'])\n", (2540, 2564), True, 'import tensorflow as tf\n'), ((2737, 2771), 'tensorflow.matmul', 'tf.matmul', (['layer_3', "weights['out']"], {}), "(layer_3, weights['out'])\n", (2746, 2771), True, 'import tensorflow as tf\n'), ((3726, 3757), 'random.choice', 'random.choice', (['possible_actions'], {}), '(possible_actions)\n', (3739, 3757), False, 'import random\n'), ((3820, 3840), 'numpy.argmax', 'np.argmax', (['q_vals[0]'], {}), '(q_vals[0])\n', (3829, 3840), True, 'import numpy as np\n'), ((9063, 9100), 'numpy.vstack', 'np.vstack', (['(predictions, Q_predicted)'], {}), '((predictions, Q_predicted))\n', (9072, 9100), True, 'import numpy as np\n'), ((11562, 11581), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (11579, 11581), False, 'import rospy\n'), ((14141, 14190), 'rospy.init_node', 'rospy.init_node', (['"""rl_controller"""'], {'anonymous': '(False)'}), "('rl_controller', anonymous=False)\n", (14156, 14190), False, 'import rospy\n'), ((14212, 14269), 'rospy.Publisher', 'rospy.Publisher', (['"""/robot_0/cmd_vel"""', 'Twist'], {'queue_size': '(10)'}), "('/robot_0/cmd_vel', Twist, queue_size=10)\n", (14227, 14269), False, 'import rospy\n'), ((14291, 14348), 'rospy.Publisher', 'rospy.Publisher', (['"""/robot_0/cmd_pose"""', 'Pose'], {'queue_size': '(10)'}), "('/robot_0/cmd_pose', Pose, queue_size=10)\n", (14306, 14348), False, 'import rospy\n'), ((14368, 14444), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/robot_0/base_pose_ground_truth"""', 'Odometry', 'odom_callback'], {}), "('/robot_0/base_pose_ground_truth', Odometry, odom_callback)\n", (14384, 14444), False, 'import rospy\n'), ((14494, 14560), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/robot_0/base_scan"""', 'LaserScan', 'sensor_callback'], {}), "('/robot_0/base_scan', LaserScan, sensor_callback)\n", (14510, 14560), False, 'import rospy\n'), ((14585, 14646), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/robot_0/is_crashed"""', 'Int8', 'crash_callback'], {}), "('/robot_0/is_crashed', Int8, crash_callback)\n", (14601, 14646), False, 'import rospy\n'), ((14674, 14718), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""reset_positions"""', 'Empty'], {}), "('reset_positions', Empty)\n", (14692, 14718), False, 'import rospy\n'), ((14734, 14748), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (14744, 14748), False, 'import rospy\n'), ((5185, 5198), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5195, 5198), False, 'import time\n'), ((13105, 13131), 'numpy.vstack', 'np.vstack', (['(x, last_state)'], {}), '((x, last_state))\n', (13114, 13131), True, 'import numpy as np\n'), ((13148, 13175), 'numpy.vstack', 'np.vstack', (['(y, corrected_Q)'], {}), '((y, corrected_Q))\n', (13157, 13175), True, 'import numpy as np\n'), ((11963, 11993), 'numpy.array', 'np.array', (['((a2g,) + sensor_data)'], {}), '((a2g,) + sensor_data)\n', (11971, 11993), True, 'import numpy as np\n'), ((10631, 10646), 'random.random', 'random.random', ([], {}), '()\n', (10644, 10646), False, 'import random\n')] |
import numpy as np
class Predefined_interp():
def __init__(self,x0,x):
self.indexes = self.Find_interp_indexes(x0,x)
self.n = len(self.indexes)
if self.n == 1:
self.x_diff_ratios = (x0-x[self.indexes[0]])/(x[self.indexes[0]+1] - x[self.indexes[0]])
else:
self.x_diff_ratios = [(x0[i]-x[self.indexes[i]])/(x[self.indexes[i]+1] - x[self.indexes[i]]) for i in range(self.n)]
def Find_interp_indexes(self,x0, x):
indexes = []
try:
len(x0)
except:
x0 = [x0]
for item_x0 in x0:
idx = 0
while x[idx] < item_x0:
idx += 1
indexes.append(idx-1)
return indexes
class Predefined_interp_for_float(Predefined_interp):
def __call__(self,y):
return y[self.indexes[0]]+((y[self.indexes[0]+1] - y[self.indexes[0]])*self.x_diff_ratios)
class Predefined_interp_for_list(Predefined_interp):
def __call__(self,y):
return [y[self.indexes[i]]+((y[self.indexes[i]+1] - y[self.indexes[i]])*self.x_diff_ratios[i]) for i in range(self.n)]
class Interpolate_1D():
def __init__(self, x, y, start_ahead_idx=0):
"""Interpolation looking for values in vicinity of where it has found the answer before."""
self.x = np.array(x) # placeholder for x coordinates
self.y = np.array(y) # placeholder for y coordinates
self.x_min = self.x[0]
self.x_max = self.x[-1]
self.ahead_idx = start_ahead_idx # where was the last found x coordinate
self.previous_call_x = x[0] # last lookup value of x for case we actually need to go back
def __call__(self,x):
try:
length=len(x)
return self.calculate_list(x,length)
except:
return self.calculate_item(x)
def calculate_item(self,x):
if x > self.previous_call_x: # check if x increased between calls
while x > self.x[self.ahead_idx+1] and x < self.x_max:
self.ahead_idx += 1
elif x < self.previous_call_x:
while x < self.x[self.ahead_idx+1] and x > self.x_min:
self.ahead_idx -= 1
self.previous_call_x = x
return self.y[self.ahead_idx]+((self.y[self.ahead_idx+1] - self.y[self.ahead_idx])/(self.x[self.ahead_idx+1] - self.x[self.ahead_idx]))*(x-self.x[self.ahead_idx])
def calculate_list(self,x,length):
y = np.zeros(length)
for i in range(length):
y[i] = self.calculate_item(x[i])
return y
| [
"numpy.array",
"numpy.zeros"
] | [((1310, 1321), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1318, 1321), True, 'import numpy as np\n'), ((1372, 1383), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1380, 1383), True, 'import numpy as np\n'), ((2446, 2462), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (2454, 2462), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.rgb.transfer_functions.\
nikon_nlog` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_NLog,
log_decoding_NLog,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLogEncoding_VLog',
'TestLogDecoding_VLog',
]
class TestLogEncoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition unit tests methods.
"""
def test_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition.
"""
self.assertAlmostEqual(
log_encoding_NLog(0.0), 0.124372627896372, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18), 0.363667770117139, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18, 12), 0.363667770117139, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18, 10, False), 0.351634850262366, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18, 10, False, False),
0.337584957293328,
places=7)
self.assertAlmostEqual(
log_encoding_NLog(1.0), 0.605083088954056, places=7)
def test_n_dimensional_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition n-dimensional arrays support.
"""
L_in = 0.18
V_out = log_encoding_NLog(L_in)
L_in = np.tile(L_in, 6)
V_out = np.tile(V_out, 6)
np.testing.assert_almost_equal(
log_encoding_NLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3))
V_out = np.reshape(V_out, (2, 3))
np.testing.assert_almost_equal(
log_encoding_NLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3, 1))
V_out = np.reshape(V_out, (2, 3, 1))
np.testing.assert_almost_equal(
log_encoding_NLog(L_in), V_out, decimal=7)
def test_domain_range_scale_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition domain and range scale support.
"""
L_in = 0.18
V_out = log_encoding_NLog(L_in)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_encoding_NLog(L_in * factor),
V_out * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition nan support.
"""
log_encoding_NLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLogDecoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition unit tests methods.
"""
def test_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition.
"""
self.assertAlmostEqual(
log_decoding_NLog(0.124372627896372), 0.0, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.363667770117139), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.363667770117139, 12), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.351634850262366, 10, False), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.337584957293328, 10, False, False),
0.18,
places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.605083088954056), 1.0, places=7)
def test_n_dimensional_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition n-dimensional arrays support.
"""
V_out = 0.363667770117139
L_in = log_decoding_NLog(V_out)
V_out = np.tile(V_out, 6)
L_in = np.tile(L_in, 6)
np.testing.assert_almost_equal(
log_decoding_NLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3))
L_in = np.reshape(L_in, (2, 3))
np.testing.assert_almost_equal(
log_decoding_NLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3, 1))
L_in = np.reshape(L_in, (2, 3, 1))
np.testing.assert_almost_equal(
log_decoding_NLog(V_out), L_in, decimal=7)
def test_domain_range_scale_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition domain and range scale support.
"""
V_out = 0.363667770117139
L_in = log_decoding_NLog(V_out)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_decoding_NLog(V_out * factor),
L_in * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition nan support.
"""
log_decoding_NLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == '__main__':
unittest.main()
| [
"numpy.tile",
"numpy.reshape",
"colour.models.rgb.transfer_functions.log_decoding_NLog",
"colour.models.rgb.transfer_functions.log_encoding_NLog",
"colour.utilities.domain_range_scale",
"numpy.array",
"unittest.main"
] | [((6033, 6048), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6046, 6048), False, 'import unittest\n'), ((1907, 1930), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['L_in'], {}), '(L_in)\n', (1924, 1930), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1947, 1963), 'numpy.tile', 'np.tile', (['L_in', '(6)'], {}), '(L_in, 6)\n', (1954, 1963), True, 'import numpy as np\n'), ((1980, 1997), 'numpy.tile', 'np.tile', (['V_out', '(6)'], {}), '(V_out, 6)\n', (1987, 1997), True, 'import numpy as np\n'), ((2109, 2133), 'numpy.reshape', 'np.reshape', (['L_in', '(2, 3)'], {}), '(L_in, (2, 3))\n', (2119, 2133), True, 'import numpy as np\n'), ((2150, 2175), 'numpy.reshape', 'np.reshape', (['V_out', '(2, 3)'], {}), '(V_out, (2, 3))\n', (2160, 2175), True, 'import numpy as np\n'), ((2287, 2314), 'numpy.reshape', 'np.reshape', (['L_in', '(2, 3, 1)'], {}), '(L_in, (2, 3, 1))\n', (2297, 2314), True, 'import numpy as np\n'), ((2331, 2359), 'numpy.reshape', 'np.reshape', (['V_out', '(2, 3, 1)'], {}), '(V_out, (2, 3, 1))\n', (2341, 2359), True, 'import numpy as np\n'), ((2707, 2730), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['L_in'], {}), '(L_in)\n', (2724, 2730), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4574, 4598), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['V_out'], {}), '(V_out)\n', (4591, 4598), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4616, 4633), 'numpy.tile', 'np.tile', (['V_out', '(6)'], {}), '(V_out, 6)\n', (4623, 4633), True, 'import numpy as np\n'), ((4649, 4665), 'numpy.tile', 'np.tile', (['L_in', '(6)'], {}), '(L_in, 6)\n', (4656, 4665), True, 'import numpy as np\n'), ((4778, 4803), 'numpy.reshape', 'np.reshape', (['V_out', '(2, 3)'], {}), '(V_out, (2, 3))\n', (4788, 4803), True, 'import numpy as np\n'), ((4819, 4843), 'numpy.reshape', 'np.reshape', (['L_in', '(2, 3)'], {}), '(L_in, (2, 3))\n', (4829, 4843), True, 'import numpy as np\n'), ((4956, 4984), 'numpy.reshape', 'np.reshape', (['V_out', '(2, 3, 1)'], {}), '(V_out, (2, 3, 1))\n', (4966, 4984), True, 'import numpy as np\n'), ((5000, 5027), 'numpy.reshape', 'np.reshape', (['L_in', '(2, 3, 1)'], {}), '(L_in, (2, 3, 1))\n', (5010, 5027), True, 'import numpy as np\n'), ((5388, 5412), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['V_out'], {}), '(V_out)\n', (5405, 5412), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1058, 1080), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(0.0)'], {}), '(0.0)\n', (1075, 1080), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1156, 1179), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(0.18)'], {}), '(0.18)\n', (1173, 1179), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1255, 1282), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(0.18)', '(12)'], {}), '(0.18, 12)\n', (1272, 1282), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1358, 1392), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(0.18)', '(10)', '(False)'], {}), '(0.18, 10, False)\n', (1375, 1392), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1468, 1509), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(0.18)', '(10)', '(False)', '(False)'], {}), '(0.18, 10, False, False)\n', (1485, 1509), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((1609, 1631), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(1.0)'], {}), '(1.0)\n', (1626, 1631), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((2050, 2073), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['L_in'], {}), '(L_in)\n', (2067, 2073), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((2228, 2251), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['L_in'], {}), '(L_in)\n', (2245, 2251), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((2412, 2435), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['L_in'], {}), '(L_in)\n', (2429, 2435), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((3265, 3316), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (3273, 3316), True, 'import numpy as np\n'), ((3712, 3748), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(0.124372627896372)'], {}), '(0.124372627896372)\n', (3729, 3748), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((3810, 3846), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(0.363667770117139)'], {}), '(0.363667770117139)\n', (3827, 3846), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((3909, 3949), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(0.363667770117139)', '(12)'], {}), '(0.363667770117139, 12)\n', (3926, 3949), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4012, 4059), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(0.351634850262366)', '(10)', '(False)'], {}), '(0.351634850262366, 10, False)\n', (4029, 4059), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4122, 4176), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(0.337584957293328)', '(10)', '(False)', '(False)'], {}), '(0.337584957293328, 10, False, False)\n', (4139, 4176), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4263, 4299), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(0.605083088954056)'], {}), '(0.605083088954056)\n', (4280, 4299), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4718, 4742), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['V_out'], {}), '(V_out)\n', (4735, 4742), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((4896, 4920), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['V_out'], {}), '(V_out)\n', (4913, 4920), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((5080, 5104), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['V_out'], {}), '(V_out)\n', (5097, 5104), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((5947, 5998), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]'], {}), '([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])\n', (5955, 5998), True, 'import numpy as np\n'), ((2836, 2861), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (2854, 2861), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((5518, 5543), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (5536, 5543), False, 'from colour.utilities import domain_range_scale, ignore_numpy_errors\n'), ((2931, 2963), 'colour.models.rgb.transfer_functions.log_encoding_NLog', 'log_encoding_NLog', (['(L_in * factor)'], {}), '(L_in * factor)\n', (2948, 2963), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n'), ((5613, 5646), 'colour.models.rgb.transfer_functions.log_decoding_NLog', 'log_decoding_NLog', (['(V_out * factor)'], {}), '(V_out * factor)\n', (5630, 5646), False, 'from colour.models.rgb.transfer_functions import log_encoding_NLog, log_decoding_NLog\n')] |
""" Majority of this code was copied directly from <NAME>'s gist:
https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 """
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import pickle
import gym
from gym import wrappers
# hyperparameters to tune
H = 200 # number of hidden layer neurons
batch_size = 10 # used to perform a RMS prop param update every batch_size steps
learning_rate = 1e-3 # learning rate used in RMS prop
gamma = 0.99 # discount factor for reward
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
# Config flags - video output and res
resume = False # resume training from previous checkpoint (from save.p file)?
render = False # render video output?
# model initialization
D = 75 * 80 # input dimensionality: 75x80 grid
if resume:
model = pickle.load(open('save.p', 'rb'))
else:
model = {}
model['W1'] = np.random.randn(H,D) / np.sqrt(D) # "Xavier" initialization - Shape will be H x D
model['W2'] = np.random.randn(H) / np.sqrt(H) # Shape will be H
grad_buffer = { k : np.zeros_like(v) for k,v in model.items() } # update buffers that add up gradients over a batch
rmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() } # rmsprop memory
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6000 (75x80) 1D float vector """
I = I[35:185] # crop - remove 35px from start & 25px from end of image in x, to reduce redundant parts of image (i.e. after ball passes paddle)
I = I[::2,::2,0] # downsample by factor of 2.
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1. this makes the image grayscale effectively
return I.astype(np.float).ravel() # ravel flattens an array and collapses it into a column vector
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
""" this function discounts from the action closest to the end of the completed game backwards
so that the most recent action has a greater weight """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)): # xrange is no longer supported in Python 3
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def policy_forward(x):
"""This is a manual implementation of a forward prop"""
h = np.dot(model['W1'], x) # (H x D) . (D x 1) = (H x 1) (200 x 1)
h[h<0] = 0 # ReLU introduces non-linearity
logp = np.dot(model['W2'], h) # This is a logits function and outputs a decimal. (1 x H) . (H x 1) = 1 (scalar)
p = sigmoid(logp) # squashes output to between 0 & 1 range
return p, h # return probability of taking action 2 (UP), and hidden state
def policy_backward(eph, epx, epdlogp):
""" backward pass. (eph is array of intermediate hidden states) """
""" Manual implementation of a backward prop"""
""" It takes an array of the hidden states that corresponds to all the images that were
fed to the NN (for the entire episode, so a bunch of games) and their corresponding logp"""
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model['W2'])
dh[eph <= 0] = 0 # backpro prelu
dW1 = np.dot(dh.T, epx)
return {'W1':dW1, 'W2':dW2}
env = gym.make("Pong-v0")
env = wrappers.Monitor(env, 'tmp/pong-base', force=True) # record the game as as an mp4 file
observation = env.reset()
prev_x = None # used in computing the difference frame
xs,hs,dlogps,drs = [],[],[],[]
running_reward = None
reward_sum = 0
episode_number = 0
while True:
if render: env.render()
# preprocess the observation, set input to network to be difference image
cur_x = prepro(observation)
# we take the difference in the pixel input, since this is more likely to account for interesting information
# e.g. motion
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
# forward the policy network and sample an action from the returned probability
aprob, h = policy_forward(x)
# The following step is randomly choosing a number which is the basis of making an action decision
# If the random number is less than the probability of UP output from our neural network given the image
# then go down. The randomness introduces 'exploration' of the Agent
action = 2 if np.random.uniform() < aprob else 3 # roll the dice! 2 is UP, 3 is DOWN, 0 is stay the same
# record various intermediates (needed later for backprop).
# This code would have otherwise been handled by a NN library
xs.append(x) # observation
hs.append(h) # hidden state
y = 1 if action == 2 else 0 # a "fake label" - this is the label that we're passing to the neural network
# to fake labels for supervised learning. It's fake because it is generated algorithmically, and not based
# on a ground truth, as is typically the case for Supervised learning
dlogps.append(y - aprob) # grad that encourages the action that was taken to be taken (see http://cs231n.github.io/neural-networks-2/#losses if confused)
# step the environment and get new measurements
observation, reward, done, info = env.step(action)
reward_sum += reward
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
if done: # an episode finished
episode_number += 1
# stack together all inputs, hidden states, action gradients, and rewards for this episode
epx = np.vstack(xs)
eph = np.vstack(hs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
xs,hs,dlogps,drs = [],[],[],[] # reset array memory
# compute the discounted reward backwards through time
discounted_epr = discount_rewards(epr)
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
epdlogp *= discounted_epr # modulate the gradient with advantage (Policy Grad magic happens right here.)
grad = policy_backward(eph, epx, epdlogp)
for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch
# perform rmsprop parameter update every batch_size episodes
if episode_number % batch_size == 0:
for k,v in model.items():
g = grad_buffer[k] # gradient
rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
grad_buffer[k] = np.zeros_like(v) # reset batch gradient buffer
# boring book-keeping
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print ('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))
if episode_number % 100 == 0: pickle.dump(model, open('save.p', 'wb'))
reward_sum = 0
observation = env.reset() # reset env
prev_x = None
if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.
print ('ep %d: game finished, reward: %f' % (episode_number, reward) + '' if reward == -1 else ' !!!!!!!!')
| [
"numpy.mean",
"numpy.sqrt",
"numpy.exp",
"numpy.dot",
"numpy.outer",
"numpy.random.randn",
"numpy.zeros",
"numpy.vstack",
"numpy.std",
"gym.wrappers.Monitor",
"numpy.random.uniform",
"numpy.zeros_like",
"gym.make"
] | [((3531, 3550), 'gym.make', 'gym.make', (['"""Pong-v0"""'], {}), "('Pong-v0')\n", (3539, 3550), False, 'import gym\n'), ((3557, 3607), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', '"""tmp/pong-base"""'], {'force': '(True)'}), "(env, 'tmp/pong-base', force=True)\n", (3573, 3607), False, 'from gym import wrappers\n'), ((1088, 1104), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1101, 1104), True, 'import numpy as np\n'), ((1206, 1222), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1219, 1222), True, 'import numpy as np\n'), ((2241, 2257), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (2254, 2257), True, 'import numpy as np\n'), ((2647, 2669), 'numpy.dot', 'np.dot', (["model['W1']", 'x'], {}), "(model['W1'], x)\n", (2653, 2669), True, 'import numpy as np\n'), ((2764, 2786), 'numpy.dot', 'np.dot', (["model['W2']", 'h'], {}), "(model['W2'], h)\n", (2770, 2786), True, 'import numpy as np\n'), ((3402, 3432), 'numpy.outer', 'np.outer', (['epdlogp', "model['W2']"], {}), "(epdlogp, model['W2'])\n", (3410, 3432), True, 'import numpy as np\n'), ((3476, 3493), 'numpy.dot', 'np.dot', (['dh.T', 'epx'], {}), '(dh.T, epx)\n', (3482, 3493), True, 'import numpy as np\n'), ((919, 940), 'numpy.random.randn', 'np.random.randn', (['H', 'D'], {}), '(H, D)\n', (934, 940), True, 'import numpy as np\n'), ((942, 952), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (949, 952), True, 'import numpy as np\n'), ((1017, 1035), 'numpy.random.randn', 'np.random.randn', (['H'], {}), '(H)\n', (1032, 1035), True, 'import numpy as np\n'), ((1038, 1048), 'numpy.sqrt', 'np.sqrt', (['H'], {}), '(H)\n', (1045, 1048), True, 'import numpy as np\n'), ((4133, 4144), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (4141, 4144), True, 'import numpy as np\n'), ((5695, 5708), 'numpy.vstack', 'np.vstack', (['xs'], {}), '(xs)\n', (5704, 5708), True, 'import numpy as np\n'), ((5719, 5732), 'numpy.vstack', 'np.vstack', (['hs'], {}), '(hs)\n', (5728, 5732), True, 'import numpy as np\n'), ((5747, 5764), 'numpy.vstack', 'np.vstack', (['dlogps'], {}), '(dlogps)\n', (5756, 5764), True, 'import numpy as np\n'), ((5775, 5789), 'numpy.vstack', 'np.vstack', (['drs'], {}), '(drs)\n', (5784, 5789), True, 'import numpy as np\n'), ((6067, 6090), 'numpy.mean', 'np.mean', (['discounted_epr'], {}), '(discounted_epr)\n', (6074, 6090), True, 'import numpy as np\n'), ((6113, 6135), 'numpy.std', 'np.std', (['discounted_epr'], {}), '(discounted_epr)\n', (6119, 6135), True, 'import numpy as np\n'), ((1306, 1316), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1312, 1316), True, 'import numpy as np\n'), ((3364, 3386), 'numpy.dot', 'np.dot', (['eph.T', 'epdlogp'], {}), '(eph.T, epdlogp)\n', (3370, 3386), True, 'import numpy as np\n'), ((4572, 4591), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4589, 4591), True, 'import numpy as np\n'), ((6727, 6743), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (6740, 6743), True, 'import numpy as np\n'), ((6668, 6693), 'numpy.sqrt', 'np.sqrt', (['rmsprop_cache[k]'], {}), '(rmsprop_cache[k])\n', (6675, 6693), True, 'import numpy as np\n')] |
import numpy as np
from mmcv.parallel import DataContainer as DC
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import to_tensor
@PIPELINES.register_module()
class ConcatVideoReferences(object):
"""Concat video references.
If the input list contains at least two dicts, concat the input list of
dict to one dict from 2-nd dict of the input list.
Args:
results (list[dict]): List of dict that contain keys such as 'img',
'img_metas', 'gt_masks','proposals', 'gt_bboxes',
'gt_bboxes_ignore', 'gt_labels','gt_semantic_seg',
'gt_instance_ids'.
Returns:
list[dict]: The first dict of outputs is the same as the first
dict of `results`. The second dict of outputs concats the
dicts in `results[1:]`.
"""
def __call__(self, results):
assert (isinstance(results, list)), 'results must be list'
outs = results[:1]
for i, result in enumerate(results[1:], 1):
if 'img' in result:
img = result['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
if i == 1:
result['img'] = np.expand_dims(img, -1)
else:
outs[1]['img'] = np.concatenate(
(outs[1]['img'], np.expand_dims(img, -1)), axis=-1)
for key in ['img_metas', 'gt_masks']:
if key in result:
if i == 1:
result[key] = [result[key]]
else:
outs[1][key].append(result[key])
for key in [
'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels',
'gt_instance_ids'
]:
if key not in result:
continue
value = result[key]
if value.ndim == 1:
value = value[:, None]
N = value.shape[0]
value = np.concatenate((np.full(
(N, 1), i - 1, dtype=np.float32), value),
axis=1)
if i == 1:
result[key] = value
else:
outs[1][key] = np.concatenate((outs[1][key], value),
axis=0)
if 'gt_semantic_seg' in result:
if i == 1:
result['gt_semantic_seg'] = result['gt_semantic_seg'][...,
None,
None]
else:
outs[1]['gt_semantic_seg'] = np.concatenate(
(outs[1]['gt_semantic_seg'],
result['gt_semantic_seg'][..., None, None]),
axis=-1)
if i == 1:
outs.append(result)
return outs
@PIPELINES.register_module()
class MultiImagesToTensor(object):
"""Multi images to tensor.
1. Transpose and convert image/multi-images to Tensor.
2. Add prefix to every key in the second dict of the inputs. Then, add
these keys and corresponding values into the outputs.
Args:
ref_prefix (str): The prefix of key added to the second dict of inputs.
Defaults to 'ref'.
"""
def __init__(self, ref_prefix='ref'):
self.ref_prefix = ref_prefix
def __call__(self, results):
"""Multi images to tensor.
1. Transpose and convert image/multi-images to Tensor.
2. Add prefix to every key in the second dict of the inputs. Then, add
these keys and corresponding values into the output dict.
Args:
results (list[dict]): List of two dicts.
Returns:
dict: Each key in the first dict of `results` remains unchanged.
Each key in the second dict of `results` adds `self.ref_prefix`
as prefix.
"""
outs = []
for _results in results:
_results = self.images_to_tensor(_results)
outs.append(_results)
data = {}
data.update(outs[0])
if len(outs) == 2:
for k, v in outs[1].items():
data[f'{self.ref_prefix}_{k}'] = v
return data
def images_to_tensor(self, results):
"""Transpose and convert images/multi-images to Tensor."""
if 'img' in results:
img = results['img']
if len(img.shape) == 3:
# (H, W, 3) to (3, H, W)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
else:
# (H, W, 3, N) to (N, 3, H, W)
img = np.ascontiguousarray(img.transpose(3, 2, 0, 1))
results['img'] = to_tensor(img)
if 'proposals' in results:
results['proposals'] = to_tensor(results['proposals'])
if 'img_metas' in results:
results['img_metas'] = DC(results['img_metas'], cpu_only=True)
return results
@PIPELINES.register_module()
class SeqDefaultFormatBundle(object):
"""Sequence Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"img_metas", "proposals", "gt_bboxes", "gt_instance_ids",
"gt_match_indices", "gt_bboxes_ignore", "gt_labels", "gt_masks" and
"gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- img_metas: (1)to DataContainer (cpu_only=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_instance_ids: (1)to tensor, (2)to DataContainer
- gt_match_indices: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
Args:
ref_prefix (str): The prefix of key added to the second dict of input
list. Defaults to 'ref'.
"""
def __init__(self, ref_prefix='ref'):
self.ref_prefix = ref_prefix
def __call__(self, results):
"""Sequence Default formatting bundle call function.
Args:
results (list[dict]): List of two dicts.
Returns:
dict: The result dict contains the data that is formatted with
default bundle. Each key in the second dict of the input list
adds `self.ref_prefix` as prefix.
"""
outs = []
for _results in results:
_results = self.default_format_bundle(_results)
outs.append(_results)
data = {}
data.update(outs[0])
for k, v in outs[1].items():
data[f'{self.ref_prefix}_{k}'] = v
return data
def default_format_bundle(self, results):
"""Transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
if 'img' in results:
img = results['img']
if len(img.shape) == 3:
img = np.ascontiguousarray(img.transpose(2, 0, 1))
else:
img = np.ascontiguousarray(img.transpose(3, 2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in [
'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels',
'gt_instance_ids', 'gt_match_indices'
]:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
for key in ['img_metas', 'gt_masks']:
if key in results:
results[key] = DC(results[key], cpu_only=True)
if 'gt_semantic_seg' in results:
semantic_seg = results['gt_semantic_seg']
if len(semantic_seg.shape) == 2:
semantic_seg = semantic_seg[None, ...]
else:
semantic_seg = np.ascontiguousarray(
semantic_seg.transpose(3, 2, 0, 1))
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg']), stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class VideoCollect(object):
"""Collect data from the loader relevant to the specific task.
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str]): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Defaults to None.
default_meta_keys (tuple): Default meta keys. Defaults to ('filename',
'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg',
'frame_id', 'is_video_data').
"""
def __init__(self,
keys,
meta_keys=None,
default_meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor',
'flip', 'flip_direction', 'img_norm_cfg',
'frame_id', 'is_video_data')):
self.keys = keys
self.meta_keys = default_meta_keys
if meta_keys is not None:
if isinstance(meta_keys, str):
meta_keys = (meta_keys, )
else:
assert isinstance(meta_keys, tuple), \
'meta_keys must be str or tuple'
self.meta_keys += meta_keys
def __call__(self, results):
"""Call function to collect keys in results.
The keys in ``meta_keys`` and ``default_meta_keys`` will be converted
to :obj:mmcv.DataContainer.
Args:
results (list[dict] | dict): List of dict or dict which contains
the data to collect.
Returns:
list[dict] | dict: List of dict or dict that contains the
following keys:
- keys in ``self.keys``
- ``img_metas``
"""
results_is_dict = isinstance(results, dict)
if results_is_dict:
results = [results]
outs = []
for _results in results:
_results = self._add_default_meta_keys(_results)
_results = self._collect_meta_keys(_results)
outs.append(_results)
if results_is_dict:
outs[0]['img_metas'] = DC(outs[0]['img_metas'], cpu_only=True)
return outs[0] if results_is_dict else outs
def _collect_meta_keys(self, results):
"""Collect `self.keys` and `self.meta_keys` from `results` (dict)."""
data = {}
img_meta = {}
for key in self.meta_keys:
if key in results:
img_meta[key] = results[key]
elif key in results['img_info']:
img_meta[key] = results['img_info'][key]
data['img_metas'] = img_meta
for key in self.keys:
data[key] = results[key]
return data
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
@PIPELINES.register_module()
class ToList(object):
"""Use list to warp each value of the input dict.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: Updated result dict contains the data to convert.
"""
def __call__(self, results):
out = {}
for k, v in results.items():
out[k] = [v]
return out
| [
"numpy.ones",
"mmdet.datasets.pipelines.to_tensor",
"mmcv.parallel.DataContainer",
"numpy.zeros",
"numpy.expand_dims",
"numpy.concatenate",
"numpy.full",
"mmdet.datasets.builder.PIPELINES.register_module"
] | [((160, 187), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (185, 187), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((3035, 3062), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (3060, 3062), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((5144, 5171), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (5169, 5171), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((8621, 8648), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (8646, 8648), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((12423, 12450), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (12448, 12450), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((4891, 4905), 'mmdet.datasets.pipelines.to_tensor', 'to_tensor', (['img'], {}), '(img)\n', (4900, 4905), False, 'from mmdet.datasets.pipelines import to_tensor\n'), ((4976, 5007), 'mmdet.datasets.pipelines.to_tensor', 'to_tensor', (["results['proposals']"], {}), "(results['proposals'])\n", (4985, 5007), False, 'from mmdet.datasets.pipelines import to_tensor\n'), ((5078, 5117), 'mmcv.parallel.DataContainer', 'DC', (["results['img_metas']"], {'cpu_only': '(True)'}), "(results['img_metas'], cpu_only=True)\n", (5080, 5117), True, 'from mmcv.parallel import DataContainer as DC\n'), ((10899, 10938), 'mmcv.parallel.DataContainer', 'DC', (["outs[0]['img_metas']"], {'cpu_only': '(True)'}), "(outs[0]['img_metas'], cpu_only=True)\n", (10901, 10938), True, 'from mmcv.parallel import DataContainer as DC\n'), ((7653, 7667), 'mmdet.datasets.pipelines.to_tensor', 'to_tensor', (['img'], {}), '(img)\n', (7662, 7667), False, 'from mmdet.datasets.pipelines import to_tensor\n'), ((7932, 7955), 'mmdet.datasets.pipelines.to_tensor', 'to_tensor', (['results[key]'], {}), '(results[key])\n', (7941, 7955), False, 'from mmdet.datasets.pipelines import to_tensor\n'), ((8065, 8096), 'mmcv.parallel.DataContainer', 'DC', (['results[key]'], {'cpu_only': '(True)'}), '(results[key], cpu_only=True)\n', (8067, 8096), True, 'from mmcv.parallel import DataContainer as DC\n'), ((8480, 8517), 'mmdet.datasets.pipelines.to_tensor', 'to_tensor', (["results['gt_semantic_seg']"], {}), "(results['gt_semantic_seg'])\n", (8489, 8517), False, 'from mmdet.datasets.pipelines import to_tensor\n'), ((1136, 1159), 'numpy.expand_dims', 'np.expand_dims', (['img', '(-1)'], {}), '(img, -1)\n', (1150, 1159), True, 'import numpy as np\n'), ((1223, 1246), 'numpy.expand_dims', 'np.expand_dims', (['img', '(-1)'], {}), '(img, -1)\n', (1237, 1246), True, 'import numpy as np\n'), ((2304, 2349), 'numpy.concatenate', 'np.concatenate', (['(outs[1][key], value)'], {'axis': '(0)'}), '((outs[1][key], value), axis=0)\n', (2318, 2349), True, 'import numpy as np\n'), ((2781, 2882), 'numpy.concatenate', 'np.concatenate', (["(outs[1]['gt_semantic_seg'], result['gt_semantic_seg'][..., None, None])"], {'axis': '(-1)'}), "((outs[1]['gt_semantic_seg'], result['gt_semantic_seg'][...,\n None, None]), axis=-1)\n", (2795, 2882), True, 'import numpy as np\n'), ((12263, 12303), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (12271, 12303), True, 'import numpy as np\n'), ((12325, 12364), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (12332, 12364), True, 'import numpy as np\n'), ((2062, 2102), 'numpy.full', 'np.full', (['(N, 1)', '(i - 1)'], {'dtype': 'np.float32'}), '((N, 1), i - 1, dtype=np.float32)\n', (2069, 2102), True, 'import numpy as np\n'), ((1363, 1386), 'numpy.expand_dims', 'np.expand_dims', (['img', '(-1)'], {}), '(img, -1)\n', (1377, 1386), True, 'import numpy as np\n')] |
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import argparse
import datetime
import json
import contextlib
from func_timeout import func_timeout, FunctionTimedOut
import multiprocessing
import numpy as np
import os
import sys
from job_id_pair import JobIdPair
from job_table import JobTable
import scheduler
import utils
def simulate_with_timeout(experiment_id, policy_name,
throughputs_file, cluster_spec, lam, seed, interval,
jobs_to_complete, fixed_job_duration, solver,
generate_multi_gpu_jobs,
generate_multi_priority_jobs, simulate_steady_state,
log_dir, timeout, verbose, checkpoint_threshold,
profiling_percentage, num_reference_models,
num_gpus_per_server, ideal):
lam_str = 'lambda=%f.log' % (lam)
checkpoint_file = None
if checkpoint_threshold is not None:
checkpoint_file = os.path.join(log_dir, 'lambda=%f.pickle' % lam)
cluster_spec_str = 'v100:%d|p100:%d|k80:%d' % (cluster_spec['v100'],
cluster_spec['p100'],
cluster_spec['k80'])
policy = utils.get_policy(policy_name, solver=solver, seed=seed)
if verbose:
current_time = datetime.datetime.now()
print('[%s] [Experiment ID: %2d] '
'Configuration: cluster_spec=%s, policy=%s, '
'seed=%d, lam=%f, '
'profiling_percentage=%f, '
'num_reference_models=%d' % (current_time,
experiment_id,
cluster_spec_str,
policy.name,
seed, lam,
profiling_percentage,
num_reference_models))
with open(os.path.join(log_dir, lam_str), 'w') as f:
with contextlib.redirect_stderr(f), contextlib.redirect_stdout(f):
sched = scheduler.Scheduler(
policy,
throughputs_file=throughputs_file,
seed=seed,
time_per_iteration=interval,
simulate=True,
profiling_percentage=profiling_percentage,
num_reference_models=num_reference_models)
if timeout is None:
sched.simulate(cluster_spec, lam=lam,
jobs_to_complete=jobs_to_complete,
fixed_job_duration=fixed_job_duration,
generate_multi_gpu_jobs=generate_multi_gpu_jobs,
generate_multi_priority_jobs=generate_multi_priority_jobs,
simulate_steady_state=simulate_steady_state,
checkpoint_file=checkpoint_file,
checkpoint_threshold=checkpoint_threshold,
num_gpus_per_server=num_gpus_per_server,
ideal=ideal)
average_jct = sched.get_average_jct(jobs_to_complete)
utilization = 1.0
if not ideal:
utilization = sched.get_cluster_utilization()
else:
try:
func_timeout(timeout, sched.simulate,
args=(cluster_spec,),
kwargs={
'lam': lam,
'jobs_to_complete': jobs_to_complete,
'fixed_job_duration': fixed_job_duration,
'generate_multi_gpu_jobs': generate_multi_gpu_jobs,
'generate_multi_priority_jobs': generate_multi_priority_jobs,
'simulate_steady_state': simulate_steady_state,
'checkpoint_file': checkpoint_file,
'checkpoint_threshold': checkpoint_threshold,
'num_gpus_per_server': num_gpus_per_server,
'ideal': ideal
})
average_jct = sched.get_average_jct(jobs_to_complete)
utilization = sched.get_cluster_utilization()
except FunctionTimedOut:
average_jct = float('inf')
utilization = 1.0
if verbose:
current_time = datetime.datetime.now()
print('[%s] [Experiment ID: %2d] '
'Results: average JCT=%f, utilization=%f' % (current_time,
experiment_id,
average_jct,
utilization))
sched.shutdown()
return average_jct, utilization
def main(args):
if args.window_start >= args.window_end:
raise ValueError('Window start must be < than window end.')
if (args.throughput_lower_bound is None or
args.throughput_upper_bound is None):
raise ValueError('Throughput range must be specified.')
cutoff_throughputs = {}
if args.cutoff_throughputs_file is not None:
cutoff_throughputs = json.load(open(args.cutoff_throughputs_file, 'r'))
throughputs_file = args.throughputs_file
policy_names = args.policies
profiling_percentages = args.profiling_percentages
all_num_reference_models = args.num_reference_models
estimate_throughputs = (min(profiling_percentages) < 1.0 or
min(all_num_reference_models) < len(JobTable))
job_range = (args.window_start, args.window_end)
experiment_id = 0
with open(throughputs_file, 'r') as f:
throughputs = json.load(f)
raw_logs_dir = os.path.join(args.log_dir, 'raw_logs')
if not os.path.isdir(raw_logs_dir):
os.mkdir(raw_logs_dir)
jobs_to_complete = set()
for i in range(job_range[0], job_range[1]):
jobs_to_complete.add(JobIdPair(i, None))
all_args_list = []
for cluster_spec_str in args.cluster_spec:
cluster_spec_str_split = cluster_spec_str.split(':')
if len(cluster_spec_str_split) != 3:
raise ValueError('Invalid cluster spec %s' % (cluster_spec_str))
cluster_spec = {
'v100': int(cluster_spec_str_split[0]),
'p100': int(cluster_spec_str_split[1]),
'k80': int(cluster_spec_str_split[2]),
}
num_gpus_per_server_split = args.num_gpus_per_server.split(':')
num_gpus_per_server = {
'v100': int(num_gpus_per_server_split[0]),
'p100': int(num_gpus_per_server_split[1]),
'k80': int(num_gpus_per_server_split[2]),
}
raw_logs_cluster_spec_subdir = \
os.path.join(raw_logs_dir,
'v100=%d.p100=%d.k80=%d' % (cluster_spec['v100'],
cluster_spec['p100'],
cluster_spec['k80']))
if not os.path.isdir(raw_logs_cluster_spec_subdir):
os.mkdir(raw_logs_cluster_spec_subdir)
for policy_name in policy_names:
raw_logs_policy_subdir = os.path.join(raw_logs_cluster_spec_subdir,
policy_name)
if not os.path.isdir(raw_logs_policy_subdir):
os.mkdir(raw_logs_policy_subdir)
for profiling_percentage in profiling_percentages:
if estimate_throughputs:
profiling_percentage_str = \
'profiling_percentage=%f' % (profiling_percentage)
raw_logs_profiling_subdir = \
os.path.join(raw_logs_policy_subdir,
profiling_percentage_str)
if not os.path.isdir(raw_logs_profiling_subdir):
os.mkdir(raw_logs_profiling_subdir)
else:
raw_logs_profiling_subdir = raw_logs_policy_subdir
for i, num_reference_models in enumerate(args.num_reference_models):
if estimate_throughputs:
num_reference_models_str = \
'num_reference_models=%d' % (num_reference_models)
raw_logs_num_reference_models_subdir = \
os.path.join(raw_logs_profiling_subdir,
num_reference_models_str)
if not os.path.isdir(raw_logs_num_reference_models_subdir):
os.mkdir(raw_logs_num_reference_models_subdir)
else:
raw_logs_num_reference_models_subdir = \
raw_logs_policy_subdir
throughputs = \
list(np.linspace(args.throughput_lower_bound,
args.throughput_upper_bound,
num=args.num_data_points))
if throughputs[0] == 0.0:
throughputs = throughputs[1:]
for throughput in throughputs:
if (cluster_spec_str in cutoff_throughputs and
policy_name in cutoff_throughputs[cluster_spec_str]):
cutoff_throughput = \
cutoff_throughputs[cluster_spec_str][policy_name]
if throughput >= cutoff_throughput:
print('Throughput of %f is too high '
'for policy %s with cluster '
'spec %s.' % (throughput,
policy_name,
cluster_spec_str))
continue
lam = 3600.0 / throughput
for seed in args.seeds:
seed_str = 'seed=%d' % (seed)
raw_logs_seed_subdir = os.path.join(
raw_logs_num_reference_models_subdir,
seed_str)
if not os.path.isdir(raw_logs_seed_subdir):
os.mkdir(raw_logs_seed_subdir)
all_args_list.append((experiment_id, policy_name,
throughputs_file,
cluster_spec,
lam, seed, args.interval,
jobs_to_complete,
args.fixed_job_duration,
args.solver,
args.generate_multi_gpu_jobs,
args.generate_multi_priority_jobs,
args.simulate_steady_state,
raw_logs_seed_subdir,
args.timeout,
args.verbose,
args.checkpoint_threshold,
profiling_percentage,
num_reference_models,
num_gpus_per_server,
args.ideal))
experiment_id += 1
if len(all_args_list) > 0:
current_time = datetime.datetime.now()
print('[%s] Running %d total experiment(s)...' % (current_time,
len(all_args_list)))
with multiprocessing.Pool(args.processes) as p:
# Sort args in order of decreasing lambda to prioritize
# short-running jobs.
all_args_list.sort(key=lambda x: x[4], reverse=True)
results = [p.apply_async(simulate_with_timeout, args_list)
for args_list in all_args_list]
results = [result.get() for result in results]
else:
raise ValueError('No work to be done!')
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='Sweep through lambda values')
fixed_range = parser.add_argument_group('Sweep over fixed range')
parser.add_argument('-l', '--log-dir', type=str, default='logs',
help='Log directory')
parser.add_argument('-s', '--window-start', type=int, default=0,
help='Measurement window start (job ID)')
parser.add_argument('-e', '--window-end', type=int, default=5000,
help='Measurement window end (job ID)')
parser.add_argument('-t', '--timeout', type=int, default=None,
help='Timeout (in seconds) for each run')
parser.add_argument('-j', '--processes', type=int, default=None,
help=('Number of processes to use in pool '
'(use as many as available if not specified)'))
parser.add_argument('-p', '--policies', type=str, nargs='+',
default=utils.get_available_policies(),
help='List of policies to sweep')
parser.add_argument('-c', '--cluster-spec', type=str, nargs='+',
default=['25:0:0', '12:12:0', '16:8:0', '8:8:8'],
help=('Cluster specification in the form of '
'#v100s:#p100s:#k80s'))
parser.add_argument('--num_gpus_per_server', type=str, default='1:1:1',
help=('Cluster specification in the form of '
'#v100s:#p100s:#k80s'))
parser.add_argument('--seeds', type=int, nargs='+',
default=[0, 1, 2, 3, 4],
help='List of random seeds')
parser.add_argument('-i', '--interval', type=int, default=360,
help='Interval length (in seconds)')
parser.add_argument('-f', '--fixed-job-duration', type=int, default=None,
help=('If set, fixes the duration of all jobs to the '
'specified value (in seconds)'))
parser.add_argument('--cutoff-throughputs-file', type=str, default=None,
help=('If set, uses the attached cutoff_throughputs '
'JSON file in sweep to limit args run'))
parser.add_argument('--throughputs-file', type=str,
default='simulation_throughputs.json',
help='Oracle throughputs file')
parser.add_argument('-m', '--generate-multi-gpu-jobs', action='store_true',
default=False,
help=('If set, generates multi-GPU jobs according to '
'a pre-defined distribution'))
parser.add_argument('--generate-multi-priority-jobs', action='store_true',
default=False,
help=('If set, generates some jobs with higher priority'))
parser.add_argument('--simulate-steady-state', action='store_true',
default=False,
help=('If set, adds as many jobs as there are workers '
'before beginning the simulation.'))
parser.add_argument('--solver', type=str, choices=['ECOS', 'GUROBI', 'SCS'],
default='ECOS', help='CVXPY solver')
parser.add_argument('-v', '--verbose', action='store_true', default=True,
help='Verbose')
parser.add_argument('--checkpoint-threshold', type=int, default=None,
help=('Checkpoint threshold, None if checkpointing is '
'disabled. Checkpoint is created after this '
'job ID is added.'))
parser.add_argument('--profiling_percentages', type=float, nargs='+',
default=[1.0],
help=('Percentages of machines dedicated to profiling '
'co-located job pairs'))
parser.add_argument('--num_reference_models', type=int, nargs='+',
default=[len(JobTable)],
help=('Number of reference models to use when '
'estimating throughputs'))
parser.add_argument('--ideal', action='store_true', default=False,
help='Run allocations 100%% ideally')
fixed_range.add_argument('-a', '--throughput-lower-bound', type=float,
default=None,
help=('Lower bound for throughput interval to '
'sweep'))
fixed_range.add_argument('-b', '--throughput-upper-bound', type=float,
default=None,
help=('Upper bound for throughput interval to '
'sweep'))
fixed_range.add_argument('-n', '--num-data-points', type=int, default=20,
help='Number of data points to sweep through')
args = parser.parse_args()
main(args)
| [
"contextlib.redirect_stdout",
"utils.get_policy",
"argparse.ArgumentParser",
"os.path.join",
"os.path.realpath",
"datetime.datetime.now",
"contextlib.redirect_stderr",
"os.path.isdir",
"job_id_pair.JobIdPair",
"os.mkdir",
"multiprocessing.Pool",
"utils.get_available_policies",
"json.load",
... | [((1353, 1408), 'utils.get_policy', 'utils.get_policy', (['policy_name'], {'solver': 'solver', 'seed': 'seed'}), '(policy_name, solver=solver, seed=seed)\n', (1369, 1408), False, 'import utils\n'), ((6202, 6240), 'os.path.join', 'os.path.join', (['args.log_dir', '"""raw_logs"""'], {}), "(args.log_dir, 'raw_logs')\n", (6214, 6240), False, 'import os\n'), ((12922, 12988), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sweep through lambda values"""'}), "(description='Sweep through lambda values')\n", (12945, 12988), False, 'import argparse\n'), ((1057, 1104), 'os.path.join', 'os.path.join', (['log_dir', "('lambda=%f.pickle' % lam)"], {}), "(log_dir, 'lambda=%f.pickle' % lam)\n", (1069, 1104), False, 'import os\n'), ((1448, 1471), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1469, 1471), False, 'import datetime\n'), ((4836, 4859), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4857, 4859), False, 'import datetime\n'), ((6169, 6181), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6178, 6181), False, 'import json\n'), ((6252, 6279), 'os.path.isdir', 'os.path.isdir', (['raw_logs_dir'], {}), '(raw_logs_dir)\n', (6265, 6279), False, 'import os\n'), ((6289, 6311), 'os.mkdir', 'os.mkdir', (['raw_logs_dir'], {}), '(raw_logs_dir)\n', (6297, 6311), False, 'import os\n'), ((7215, 7339), 'os.path.join', 'os.path.join', (['raw_logs_dir', "('v100=%d.p100=%d.k80=%d' % (cluster_spec['v100'], cluster_spec['p100'],\n cluster_spec['k80']))"], {}), "(raw_logs_dir, 'v100=%d.p100=%d.k80=%d' % (cluster_spec['v100'],\n cluster_spec['p100'], cluster_spec['k80']))\n", (7227, 7339), False, 'import os\n'), ((12242, 12265), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12263, 12265), False, 'import datetime\n'), ((2083, 2113), 'os.path.join', 'os.path.join', (['log_dir', 'lam_str'], {}), '(log_dir, lam_str)\n', (2095, 2113), False, 'import os\n'), ((2139, 2168), 'contextlib.redirect_stderr', 'contextlib.redirect_stderr', (['f'], {}), '(f)\n', (2165, 2168), False, 'import contextlib\n'), ((2170, 2199), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['f'], {}), '(f)\n', (2196, 2199), False, 'import contextlib\n'), ((2221, 2433), 'scheduler.Scheduler', 'scheduler.Scheduler', (['policy'], {'throughputs_file': 'throughputs_file', 'seed': 'seed', 'time_per_iteration': 'interval', 'simulate': '(True)', 'profiling_percentage': 'profiling_percentage', 'num_reference_models': 'num_reference_models'}), '(policy, throughputs_file=throughputs_file, seed=seed,\n time_per_iteration=interval, simulate=True, profiling_percentage=\n profiling_percentage, num_reference_models=num_reference_models)\n', (2240, 2433), False, 'import scheduler\n'), ((6419, 6437), 'job_id_pair.JobIdPair', 'JobIdPair', (['i', 'None'], {}), '(i, None)\n', (6428, 6437), False, 'from job_id_pair import JobIdPair\n'), ((7482, 7525), 'os.path.isdir', 'os.path.isdir', (['raw_logs_cluster_spec_subdir'], {}), '(raw_logs_cluster_spec_subdir)\n', (7495, 7525), False, 'import os\n'), ((7539, 7577), 'os.mkdir', 'os.mkdir', (['raw_logs_cluster_spec_subdir'], {}), '(raw_logs_cluster_spec_subdir)\n', (7547, 7577), False, 'import os\n'), ((7657, 7712), 'os.path.join', 'os.path.join', (['raw_logs_cluster_spec_subdir', 'policy_name'], {}), '(raw_logs_cluster_spec_subdir, policy_name)\n', (7669, 7712), False, 'import os\n'), ((12430, 12466), 'multiprocessing.Pool', 'multiprocessing.Pool', (['args.processes'], {}), '(args.processes)\n', (12450, 12466), False, 'import multiprocessing\n'), ((13902, 13932), 'utils.get_available_policies', 'utils.get_available_policies', ([], {}), '()\n', (13930, 13932), False, 'import utils\n'), ((79, 105), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'import os\n'), ((7775, 7812), 'os.path.isdir', 'os.path.isdir', (['raw_logs_policy_subdir'], {}), '(raw_logs_policy_subdir)\n', (7788, 7812), False, 'import os\n'), ((7830, 7862), 'os.mkdir', 'os.mkdir', (['raw_logs_policy_subdir'], {}), '(raw_logs_policy_subdir)\n', (7838, 7862), False, 'import os\n'), ((3604, 4092), 'func_timeout.func_timeout', 'func_timeout', (['timeout', 'sched.simulate'], {'args': '(cluster_spec,)', 'kwargs': "{'lam': lam, 'jobs_to_complete': jobs_to_complete, 'fixed_job_duration':\n fixed_job_duration, 'generate_multi_gpu_jobs': generate_multi_gpu_jobs,\n 'generate_multi_priority_jobs': generate_multi_priority_jobs,\n 'simulate_steady_state': simulate_steady_state, 'checkpoint_file':\n checkpoint_file, 'checkpoint_threshold': checkpoint_threshold,\n 'num_gpus_per_server': num_gpus_per_server, 'ideal': ideal}"}), "(timeout, sched.simulate, args=(cluster_spec,), kwargs={'lam':\n lam, 'jobs_to_complete': jobs_to_complete, 'fixed_job_duration':\n fixed_job_duration, 'generate_multi_gpu_jobs': generate_multi_gpu_jobs,\n 'generate_multi_priority_jobs': generate_multi_priority_jobs,\n 'simulate_steady_state': simulate_steady_state, 'checkpoint_file':\n checkpoint_file, 'checkpoint_threshold': checkpoint_threshold,\n 'num_gpus_per_server': num_gpus_per_server, 'ideal': ideal})\n", (3616, 4092), False, 'from func_timeout import func_timeout, FunctionTimedOut\n'), ((8166, 8228), 'os.path.join', 'os.path.join', (['raw_logs_policy_subdir', 'profiling_percentage_str'], {}), '(raw_logs_policy_subdir, profiling_percentage_str)\n', (8178, 8228), False, 'import os\n'), ((8293, 8333), 'os.path.isdir', 'os.path.isdir', (['raw_logs_profiling_subdir'], {}), '(raw_logs_profiling_subdir)\n', (8306, 8333), False, 'import os\n'), ((8359, 8394), 'os.mkdir', 'os.mkdir', (['raw_logs_profiling_subdir'], {}), '(raw_logs_profiling_subdir)\n', (8367, 8394), False, 'import os\n'), ((8843, 8908), 'os.path.join', 'os.path.join', (['raw_logs_profiling_subdir', 'num_reference_models_str'], {}), '(raw_logs_profiling_subdir, num_reference_models_str)\n', (8855, 8908), False, 'import os\n'), ((9316, 9416), 'numpy.linspace', 'np.linspace', (['args.throughput_lower_bound', 'args.throughput_upper_bound'], {'num': 'args.num_data_points'}), '(args.throughput_lower_bound, args.throughput_upper_bound, num=\n args.num_data_points)\n', (9327, 9416), True, 'import numpy as np\n'), ((8981, 9032), 'os.path.isdir', 'os.path.isdir', (['raw_logs_num_reference_models_subdir'], {}), '(raw_logs_num_reference_models_subdir)\n', (8994, 9032), False, 'import os\n'), ((9062, 9108), 'os.mkdir', 'os.mkdir', (['raw_logs_num_reference_models_subdir'], {}), '(raw_logs_num_reference_models_subdir)\n', (9070, 9108), False, 'import os\n'), ((10582, 10642), 'os.path.join', 'os.path.join', (['raw_logs_num_reference_models_subdir', 'seed_str'], {}), '(raw_logs_num_reference_models_subdir, seed_str)\n', (10594, 10642), False, 'import os\n'), ((10751, 10786), 'os.path.isdir', 'os.path.isdir', (['raw_logs_seed_subdir'], {}), '(raw_logs_seed_subdir)\n', (10764, 10786), False, 'import os\n'), ((10820, 10850), 'os.mkdir', 'os.mkdir', (['raw_logs_seed_subdir'], {}), '(raw_logs_seed_subdir)\n', (10828, 10850), False, 'import os\n')] |
import gym
import numpy as np
from gym import spaces
class GridDrawBwEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.grid_size = 14
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(low=0, high=255,
shape=(2, self.grid_size, self.grid_size), dtype=np.float32)
self.current_state = None
self.done = None
self.position = None
def step(self, action):
if self.done:
raise RuntimeError("Episode has finished. Call env.reset() to start a new episode.")
if action == 0:
self.current_state[0][tuple(self.position)] += 25 / 255.
np.clip(self.current_state[0][tuple(self.position)], 0., 1.)
return self.current_state, 0, False, None
self.current_state[1][tuple(self.position)] = 0
self.position[0] += 1
self.position[0] %= self.grid_size
self.position[1] += int(self.position[0] == 0)
if self.position[1] == self.grid_size:
self.current_state[1][self.grid_size - 1, self.grid_size - 1] = 1
return self.current_state, 0, True, None
self.current_state[1][tuple(self.position)] = 1
return self.current_state, 0, False, None
def reset(self):
canvas = np.zeros((self.grid_size, self.grid_size))
position_matrix = np.zeros((self.grid_size, self.grid_size))
self.position = np.array([0, 0])
position_matrix[tuple(self.position)] = 1
self.current_state = np.stack([canvas, position_matrix])
self.done = False
return self.current_state
def render(self, mode='human', close=False):
return
| [
"gym.spaces.Discrete",
"gym.spaces.Box",
"numpy.stack",
"numpy.array",
"numpy.zeros"
] | [((209, 227), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (224, 227), False, 'from gym import spaces\n'), ((261, 353), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(2, self.grid_size, self.grid_size)', 'dtype': 'np.float32'}), '(low=0, high=255, shape=(2, self.grid_size, self.grid_size),\n dtype=np.float32)\n', (271, 353), False, 'from gym import spaces\n'), ((1363, 1405), 'numpy.zeros', 'np.zeros', (['(self.grid_size, self.grid_size)'], {}), '((self.grid_size, self.grid_size))\n', (1371, 1405), True, 'import numpy as np\n'), ((1432, 1474), 'numpy.zeros', 'np.zeros', (['(self.grid_size, self.grid_size)'], {}), '((self.grid_size, self.grid_size))\n', (1440, 1474), True, 'import numpy as np\n'), ((1500, 1516), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1508, 1516), True, 'import numpy as np\n'), ((1598, 1633), 'numpy.stack', 'np.stack', (['[canvas, position_matrix]'], {}), '([canvas, position_matrix])\n', (1606, 1633), True, 'import numpy as np\n')] |
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
import tensorflow as tf
from tensorflow.core.protobuf import config_pb2
import os
import numpy as np
from PIL import Image
from tqdm import trange
import skimage.transform
import networks
import ops
import utils
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def stylize(content_img,
style_img,
# Brushstroke optimizer params
resolution=512,
num_strokes=5000,
num_steps=100,
S=10,
K=20,
canvas_color='gray',
width_scale=0.1,
length_scale=1.1,
content_weight=1.0,
style_weight=3.0,
tv_weight=0.008,
curviture_weight=4.0,
# Pixel optimizer params
pixel_resolution=1024,
num_steps_pixel=2000
):
stroke_optim = BrushstrokeOptimizer(content_img,
style_img,
resolution=resolution,
num_strokes=num_strokes,
num_steps=num_steps,
S=S,
K=K,
canvas_color=canvas_color,
width_scale=width_scale,
length_scale=length_scale,
content_weight=content_weight,
style_weight=style_weight,
tv_weight=tv_weight,
curviture_weight=curviture_weight)
print('Stroke optimization:')
canvas = stroke_optim.optimize()
pixel_optim = PixelOptimizer(canvas,
style_img,
resolution=pixel_resolution,
num_steps=num_steps_pixel,
content_weight=1.0,
style_weight=10000.0)
print('Pixel optimization:')
canvas = pixel_optim.optimize()
return canvas
class BrushstrokeOptimizer:
def __init__(self,
content_img, # Content image (PIL.Image).
style_img, # Style image (PIL.Image).
draw_curve_position_path = None, # Set of points that represent the drawn curves, denoted as P_i in Sec. B of the paper (str).
draw_curve_vector_path = None, # Set of tangent vectors for the points of the drawn curves, denoted as v_i in Sec. B of the paper (str).
draw_strength = 100, # Strength of the influence of the drawn curves, denoted L in Sec. B of the paper (int).
resolution = 512, # Resolution of the canvas (int).
num_strokes = 5000, # Number of brushstrokes (int).
num_steps = 100, # Number of optimization steps (int).
S = 10, # Number of points to sample on each curve, see Sec. 4.2.1 of the paper (int).
K = 20, # Number of brushstrokes to consider for each pixel, see Sec. C.2 of the paper (int).
canvas_color = 'gray', # Color of the canvas (str).
width_scale = 0.1, # Scale parameter for the brushstroke width (float).
length_scale = 1.1, # Scale parameter for the brushstroke length (float).
content_weight = 1.0, # Weight for the content loss (float).
style_weight = 3.0, # Weight for the style loss (float).
tv_weight = 0.008, # Weight for the total variation loss (float).
draw_weight = 100.0, # Weight for the drawing projection loss (float)
curviture_weight = 4.0, # Weight for the curviture loss (float).
streamlit_pbar = None, # Progressbar for streamlit app (obj).
dtype = 'float32', # Data type (str).
init = "sp",
init_prob = None,
offset=0.5,
init_width=None,
width_fixed = False,
optim_rate=0.1
):
self.draw_strength = draw_strength
self.draw_weight = draw_weight
self.resolution = resolution
self.num_strokes = num_strokes
self.num_steps = num_steps
self.S = S
self.K = K
self.canvas_color = canvas_color
self.width_scale = width_scale
self.length_scale = length_scale
self.content_weight = content_weight
self.style_weight = style_weight
self.tv_weight = tv_weight
self.curviture_weight = curviture_weight
self.streamlit_pbar = streamlit_pbar
self.dtype = dtype
self.init = init
self.init_prob = init_prob
self.offset = offset
self.init_width = init_width
self.width_fixed = width_fixed
self.optim_rate = optim_rate
# Set canvas size (set smaller side of content image to 'resolution' and scale other side accordingly)
W, H = content_img.size
if H < W:
new_H = resolution
new_W = int((W / H) * new_H)
next_W = int(2*(W / H) * new_H)
next_H = 2*resolution
else:
new_W = resolution
new_H = int((H / W) * new_W)
next_H = int(2*(H / W) * new_W)
next_W = 2*resolution
self.canvas_height = new_H
self.canvas_width = new_W
content_img = content_img.resize((self.canvas_width, self.canvas_height))
style_img = style_img.resize((self.canvas_width, self.canvas_height))
if self.init_prob is not None:
self.init_prob = skimage.transform.resize(self.init_prob,(new_H,new_W,),order=3)
if isinstance(self.canvas_color,str) is False:
self.canvas_color = skimage.transform.resize(self.canvas_color,(new_H,new_W,),order=3)
content_img = np.array(content_img).astype(self.dtype)
style_img = np.array(style_img).astype(self.dtype)
content_img /= 255.0
style_img /= 255.0
self.content_img_np = content_img
self.style_img_np = style_img
if draw_curve_position_path is not None and draw_curve_vector_path is not None:
self.draw_curve_position_np = np.load(draw_curve_position_path)
self.draw_curve_vector_np = np.load(draw_curve_vector_path)
self.draw_curve_position_np[..., 0] *= self.canvas_width
self.draw_curve_position_np[..., 1] *= self.canvas_height
ckpt_path = utils.download_weights(url='https://www.dropbox.com/s/hv7b4eajrj7isyq/vgg_weights.pickle?dl=1',
name='vgg_weights.pickle')
self.vgg = networks.VGG(ckpt_path=ckpt_path)
def optimize(self):
self._initialize()
self._render()
self._losses()
self._optimizer()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
steps = trange(self.num_steps, desc='', leave=True)
for step in steps:
I_, loss_dict_, params_dict_, _,s,e,c,l,colours,lm,w = \
sess.run(fetches=[self.I,
self.loss_dict,
self.params_dict,
self.optim_step_with_constraints,
self.curve_s,
self.curve_e,
self.curve_c,
self.location,
self.color,
self.lossmaps[-1],
self.width],
options=config_pb2.RunOptions(report_tensor_allocations_upon_oom=True)
)
steps.set_description(f'content_loss: {loss_dict_["content"]:.6f}')
#s = ''
#for key in loss_dict_:
# loss = loss_dict_[key]
# s += key + f': {loss_dict_[key]:.4f}, '
#steps.set_description(s[:-2])
#print(s)
steps.refresh()
if self.streamlit_pbar is not None: self.streamlit_pbar.update(1)
return Image.fromarray(np.array(np.clip(I_, 0, 1) * 255, dtype=np.uint8)),s,e,c,l,colours,lm,w
def _initialize(self):
location, s, e, c, width, color = utils.initialize_brushstrokes(self.content_img_np,
self.num_strokes,
self.canvas_height,
self.canvas_width,
self.length_scale,
self.width_scale,
init=self.init,
init_prob = self.init_prob,
offset=self.offset,
init_width=self.init_width)
self.curve_s = tf.Variable(name='curve_s', initial_value=s, dtype=self.dtype)
self.curve_e = tf.Variable(name='curve_e', initial_value=e, dtype=self.dtype)
self.curve_c = tf.Variable(name='curve_c', initial_value=c, dtype=self.dtype)
self.color = tf.Variable(name='color', initial_value=color, dtype=self.dtype)
self.location = tf.Variable(name='location', initial_value=location, dtype=self.dtype)
self.width = tf.Variable(name='width', initial_value=width, dtype=self.dtype)
self.content_img = tf.constant(name='content_img', value=self.content_img_np, dtype=self.dtype)
self.style_img = tf.constant(name='style_img', value=self.style_img_np, dtype=self.dtype)
if hasattr(self, 'draw_curve_position_np') and hasattr(self, 'draw_curve_vector_np'):
self.draw_curve_position = tf.constant(name='draw_curve_position', value=self.draw_curve_position_np, dtype=self.dtype)
self.draw_curve_vector = tf.constant(name='draw_curve_vector', value=self.draw_curve_vector_np, dtype=self.dtype)
self.params_dict = {'location': self.location,
'curve_s': self.curve_s,
'curve_e': self.curve_e,
'curve_c': self.curve_c,
'width': self.width,
'color': self.color}
def _render(self):
curve_points,locs,colors,widths = ops.sample_quadratic_bezier_curve2(s=self.curve_s + self.location,
e=self.curve_e + self.location,
c=self.curve_c + self.location,
colors = self.color,
widths = self.width,
num_points=self.S,
dtype=self.dtype)
self.I = ops.renderer(curve_points,
locs,
colors,
widths,
self.canvas_height,
self.canvas_width,
self.K,
canvas_color=self.canvas_color,
dtype=self.dtype)
def _losses(self):
# resize images to save memory
rendered_canvas_resized = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.I),
size=(int(self.canvas_height), int(self.canvas_width)))
rendered_canvas_resized2 = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.I),
size=(int(2*self.canvas_height), int(2*self.canvas_width)))
content_img_resized = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.content_img),
size=(int(self.canvas_height), int(self.canvas_width)))
content_img_resized2 = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.content_img),
size=(int(2*self.canvas_height), int(2*self.canvas_width)))
style_img_resized = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.style_img),
size=(int(self.canvas_height), int(self.canvas_width)))
self.loss_dict = {}
canvas_feats = self.vgg.extract_features(rendered_canvas_resized)
content_feats = self.vgg.extract_features(content_img_resized)
layers=['conv4_2', 'conv5_2']
self.loss_dict['content'] = ops.content_loss(canvas_feats,
content_feats,
#layers=['conv1_2', 'conv2_2', 'conv3_2', 'conv4_2', 'conv5_2'],
layers=layers,
weights=[1, 1],
scale_by_y=True)
canvas_feats2 = self.vgg.extract_features(rendered_canvas_resized2)
content_feats2 = self.vgg.extract_features(content_img_resized2)
self.lossmaps = []
for layer in layers:
self.lossmaps.append(tf.reduce_mean(tf.square(canvas_feats2[layer]-content_feats2[layer]) * tf.minimum(content_feats2[layer], tf.sigmoid(content_feats2[layer])),-1))
self.loss_dict['content'] *= self.content_weight
#self.loss_dict['style'] = ops.style_loss(self.vgg.extract_features(rendered_canvas_resized),
# self.vgg.extract_features(style_img_resized),
# layers=['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'],
# weights=[1, 1, 1, 1, 1])
#self.loss_dict['style'] *= self.style_weight
self.loss_dict['curviture'] = ops.curviture_loss(self.curve_s, self.curve_e, self.curve_c)
self.loss_dict['curviture'] *= self.curviture_weight
self.loss_dict['tv'] = ops.total_variation_loss(x_loc=self.location, s=self.curve_s, e=self.curve_e, K=10)
self.loss_dict['tv'] *= self.tv_weight
if hasattr(self, 'draw_curve_position') and hasattr(self, 'draw_curve_vector'):
self.loss_dict['drawing'] = ops.draw_projection_loss(self.location,
self.curve_s,
self.curve_e,
self.draw_curve_position,
self.draw_curve_vector,
self.draw_strength)
self.loss_dict['drawing'] *= self.draw_weight
def _optimizer(self):
loss = tf.constant(0.0)
for key in self.loss_dict:
loss += self.loss_dict[key]
step_ops = []
var_list = [self.location, self.curve_s, self.curve_e, self.curve_c, self.width]
if self.width_fixed:
var_list = [self.location, self.curve_s, self.curve_e, self.curve_c]
optim_step = tf.train.AdamOptimizer(self.optim_rate).minimize(
loss=loss,
var_list=var_list)
step_ops.append(optim_step)
#optim_step_color = tf.train.AdamOptimizer(0.01).minimize(
# loss=self.loss_dict['style'],
# var_list=self.color)
#step_ops.append(optim_step_color)
# constraint parameters to certain range
with tf.control_dependencies(step_ops.copy()):
step_ops.append(tf.assign(self.color, tf.clip_by_value(self.color, 0, 1)))
coord_x, coord_y = tf.gather(self.location, axis=-1, indices=[0]), tf.gather(self.location, axis=-1, indices=[1])
coord_clip = tf.concat([tf.clip_by_value(coord_x, 0, self.canvas_height), tf.clip_by_value(coord_y, 0, self.canvas_width)], axis=-1)
step_ops.append(tf.assign(self.location, coord_clip))
if self.width_fixed == False:
step_ops.append(tf.assign(self.width, tf.nn.relu(self.width)))
else:
step_ops.append(tf.assign(self.width, tf.clip_by_value(self.width,self.init_width-0.1,self.init_width+0.1)))
self.optim_step_with_constraints = tf.group(*step_ops)
class PixelOptimizer:
def __init__(self,
canvas, # Canvas (PIL.Image).
style_img, # Style image (PIL.Image).
resolution = 1024, # Resolution of the canvas.
num_steps = 2000, # Number of optimization steps.
content_weight = 1.0, # Weight for the content loss.
style_weight = 10000.0, # Weight for the style loss.
tv_weight = 0.0, # Weight for the total variation loss.
streamlit_pbar = None, # Progressbar for streamlit app (obj).
dtype = 'float32' # Data type.
):
self.resolution = resolution
self.num_steps = num_steps
self.content_weight = content_weight
self.style_weight = style_weight
self.tv_weight = tv_weight
self.streamlit_pbar = streamlit_pbar
self.dtype = dtype
# Set canvas size (set smaller side of content image to 'resolution' and scale other side accordingly)
W, H = canvas.size
if H < W:
new_H = resolution
new_W = int((W / H) * new_H)
else:
new_W = resolution
new_H = int((H / W) * new_W)
self.canvas_height = new_H
self.canvas_width = new_W
canvas = canvas.resize((self.canvas_width, self.canvas_height))
style_img = style_img.resize((self.canvas_width, self.canvas_height))
canvas = np.array(canvas).astype(self.dtype)
style_img = np.array(style_img).astype(self.dtype)
canvas /= 255.0
style_img /= 255.0
self.canvas_np = canvas
self.content_img_np = canvas
self.style_img_np = style_img
ckpt_path = utils.download_weights(url='https://www.dropbox.com/s/hv7b4eajrj7isyq/vgg_weights.pickle?dl=1',
name='vgg_weights.pickle')
self.vgg = networks.VGG(ckpt_path=ckpt_path)
def optimize(self):
self._initialize()
self._losses()
self._optimizer()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
steps = trange(self.num_steps, desc='', leave=True)
for step in steps:
canvas_, loss_dict_, _ = \
sess.run(fetches=[self.canvas,
self.loss_dict,
self.optim_step_with_constraints],
options=config_pb2.RunOptions(report_tensor_allocations_upon_oom=True)
)
s = ''
for key in loss_dict_:
loss = loss_dict_[key]
s += key + f': {loss_dict_[key]:.6f}, '
steps.set_description(s[:-2])
steps.refresh()
if self.streamlit_pbar is not None: self.streamlit_pbar.update(1)
return Image.fromarray(np.array(np.clip(canvas_, 0, 1) * 255, dtype=np.uint8))
def _initialize(self):
self.canvas = tf.Variable(name='canvas', initial_value=self.canvas_np, dtype=self.dtype)
self.content_img = tf.constant(name='content_img', value=self.content_img_np, dtype=self.dtype)
self.style_img = tf.constant(name='style_img', value=self.style_img_np, dtype=self.dtype)
def _losses(self):
# resize images to save memory
rendered_canvas_resized = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.canvas),
size=(int(self.canvas_height), int(self.canvas_width)))
content_img_resized = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.content_img),
size=(int(self.canvas_height), int(self.canvas_width)))
style_img_resized = \
tf.image.resize_nearest_neighbor(images=ops.preprocess_img(self.style_img),
size=(int(self.canvas_height), int(self.canvas_width)))
self.loss_dict = {}
self.loss_dict['content'] = ops.content_loss(self.vgg.extract_features(rendered_canvas_resized),
self.vgg.extract_features(content_img_resized),
layers=['conv1_2_pool', 'conv2_2_pool', 'conv3_3_pool', 'conv4_3_pool', 'conv5_3_pool'],
weights=[1, 1, 1, 1, 1])
self.loss_dict['content'] *= self.content_weight
self.loss_dict['style'] = ops.style_loss(self.vgg.extract_features(rendered_canvas_resized),
self.vgg.extract_features(style_img_resized),
layers=['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'],
weights=[1, 1, 1, 1, 1])
self.loss_dict['style'] *= self.style_weight
self.loss_dict['tv'] = ((tf.nn.l2_loss(self.canvas[1:, :, :] - self.canvas[:-1, :, :]) / self.canvas.shape.as_list()[0]) +
(tf.nn.l2_loss(self.canvas[:, 1:, :] - self.canvas[:, :-1, :]) / self.canvas.shape.as_list()[1]))
self.loss_dict['tv'] *= self.tv_weight
def _optimizer(self):
loss = tf.constant(0.0)
for key in self.loss_dict:
loss += self.loss_dict[key]
step_ops = []
optim_step = tf.train.AdamOptimizer(0.01).minimize(loss=loss, var_list=self.canvas)
step_ops.append(optim_step)
# constraint parameters to certain range
with tf.control_dependencies(step_ops.copy()):
step_ops.append(tf.assign(self.canvas, tf.clip_by_value(self.canvas, 0, 1)))
self.optim_step_with_constraints = tf.group(*step_ops)
| [
"numpy.clip",
"ops.content_loss",
"tensorflow.group",
"numpy.array",
"networks.VGG",
"ops.renderer",
"tensorflow.Session",
"tensorflow.compat.v1.logging.set_verbosity",
"ops.curviture_loss",
"tensorflow.assign",
"tensorflow.clip_by_value",
"tensorflow.square",
"tensorflow.train.AdamOptimizer... | [((395, 457), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (429, 457), True, 'import tensorflow as tf\n'), ((21, 46), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (44, 46), False, 'import warnings\n'), ((52, 110), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (75, 110), False, 'import warnings\n'), ((115, 172), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (138, 172), False, 'import warnings\n'), ((7748, 7879), 'utils.download_weights', 'utils.download_weights', ([], {'url': '"""https://www.dropbox.com/s/hv7b4eajrj7isyq/vgg_weights.pickle?dl=1"""', 'name': '"""vgg_weights.pickle"""'}), "(url=\n 'https://www.dropbox.com/s/hv7b4eajrj7isyq/vgg_weights.pickle?dl=1',\n name='vgg_weights.pickle')\n", (7770, 7879), False, 'import utils\n'), ((7933, 7966), 'networks.VGG', 'networks.VGG', ([], {'ckpt_path': 'ckpt_path'}), '(ckpt_path=ckpt_path)\n', (7945, 7966), False, 'import networks\n'), ((9712, 9959), 'utils.initialize_brushstrokes', 'utils.initialize_brushstrokes', (['self.content_img_np', 'self.num_strokes', 'self.canvas_height', 'self.canvas_width', 'self.length_scale', 'self.width_scale'], {'init': 'self.init', 'init_prob': 'self.init_prob', 'offset': 'self.offset', 'init_width': 'self.init_width'}), '(self.content_img_np, self.num_strokes, self.\n canvas_height, self.canvas_width, self.length_scale, self.width_scale,\n init=self.init, init_prob=self.init_prob, offset=self.offset,\n init_width=self.init_width)\n', (9741, 9959), False, 'import utils\n'), ((10626, 10688), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""curve_s"""', 'initial_value': 's', 'dtype': 'self.dtype'}), "(name='curve_s', initial_value=s, dtype=self.dtype)\n", (10637, 10688), True, 'import tensorflow as tf\n'), ((10712, 10774), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""curve_e"""', 'initial_value': 'e', 'dtype': 'self.dtype'}), "(name='curve_e', initial_value=e, dtype=self.dtype)\n", (10723, 10774), True, 'import tensorflow as tf\n'), ((10798, 10860), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""curve_c"""', 'initial_value': 'c', 'dtype': 'self.dtype'}), "(name='curve_c', initial_value=c, dtype=self.dtype)\n", (10809, 10860), True, 'import tensorflow as tf\n'), ((10882, 10946), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""color"""', 'initial_value': 'color', 'dtype': 'self.dtype'}), "(name='color', initial_value=color, dtype=self.dtype)\n", (10893, 10946), True, 'import tensorflow as tf\n'), ((10971, 11041), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""location"""', 'initial_value': 'location', 'dtype': 'self.dtype'}), "(name='location', initial_value=location, dtype=self.dtype)\n", (10982, 11041), True, 'import tensorflow as tf\n'), ((11063, 11127), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""width"""', 'initial_value': 'width', 'dtype': 'self.dtype'}), "(name='width', initial_value=width, dtype=self.dtype)\n", (11074, 11127), True, 'import tensorflow as tf\n'), ((11155, 11231), 'tensorflow.constant', 'tf.constant', ([], {'name': '"""content_img"""', 'value': 'self.content_img_np', 'dtype': 'self.dtype'}), "(name='content_img', value=self.content_img_np, dtype=self.dtype)\n", (11166, 11231), True, 'import tensorflow as tf\n'), ((11257, 11329), 'tensorflow.constant', 'tf.constant', ([], {'name': '"""style_img"""', 'value': 'self.style_img_np', 'dtype': 'self.dtype'}), "(name='style_img', value=self.style_img_np, dtype=self.dtype)\n", (11268, 11329), True, 'import tensorflow as tf\n'), ((12067, 12282), 'ops.sample_quadratic_bezier_curve2', 'ops.sample_quadratic_bezier_curve2', ([], {'s': '(self.curve_s + self.location)', 'e': '(self.curve_e + self.location)', 'c': '(self.curve_c + self.location)', 'colors': 'self.color', 'widths': 'self.width', 'num_points': 'self.S', 'dtype': 'self.dtype'}), '(s=self.curve_s + self.location, e=self.\n curve_e + self.location, c=self.curve_c + self.location, colors=self.\n color, widths=self.width, num_points=self.S, dtype=self.dtype)\n', (12101, 12282), False, 'import ops\n'), ((12637, 12787), 'ops.renderer', 'ops.renderer', (['curve_points', 'locs', 'colors', 'widths', 'self.canvas_height', 'self.canvas_width', 'self.K'], {'canvas_color': 'self.canvas_color', 'dtype': 'self.dtype'}), '(curve_points, locs, colors, widths, self.canvas_height, self.\n canvas_width, self.K, canvas_color=self.canvas_color, dtype=self.dtype)\n', (12649, 12787), False, 'import ops\n'), ((14461, 14558), 'ops.content_loss', 'ops.content_loss', (['canvas_feats', 'content_feats'], {'layers': 'layers', 'weights': '[1, 1]', 'scale_by_y': '(True)'}), '(canvas_feats, content_feats, layers=layers, weights=[1, 1],\n scale_by_y=True)\n', (14477, 14558), False, 'import ops\n'), ((15806, 15866), 'ops.curviture_loss', 'ops.curviture_loss', (['self.curve_s', 'self.curve_e', 'self.curve_c'], {}), '(self.curve_s, self.curve_e, self.curve_c)\n', (15824, 15866), False, 'import ops\n'), ((15960, 16048), 'ops.total_variation_loss', 'ops.total_variation_loss', ([], {'x_loc': 'self.location', 's': 'self.curve_s', 'e': 'self.curve_e', 'K': '(10)'}), '(x_loc=self.location, s=self.curve_s, e=self.\n curve_e, K=10)\n', (15984, 16048), False, 'import ops\n'), ((16789, 16805), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (16800, 16805), True, 'import tensorflow as tf\n'), ((18292, 18311), 'tensorflow.group', 'tf.group', (['*step_ops'], {}), '(*step_ops)\n', (18300, 18311), True, 'import tensorflow as tf\n'), ((20672, 20803), 'utils.download_weights', 'utils.download_weights', ([], {'url': '"""https://www.dropbox.com/s/hv7b4eajrj7isyq/vgg_weights.pickle?dl=1"""', 'name': '"""vgg_weights.pickle"""'}), "(url=\n 'https://www.dropbox.com/s/hv7b4eajrj7isyq/vgg_weights.pickle?dl=1',\n name='vgg_weights.pickle')\n", (20694, 20803), False, 'import utils\n'), ((20857, 20890), 'networks.VGG', 'networks.VGG', ([], {'ckpt_path': 'ckpt_path'}), '(ckpt_path=ckpt_path)\n', (20869, 20890), False, 'import networks\n'), ((21997, 22071), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""canvas"""', 'initial_value': 'self.canvas_np', 'dtype': 'self.dtype'}), "(name='canvas', initial_value=self.canvas_np, dtype=self.dtype)\n", (22008, 22071), True, 'import tensorflow as tf\n'), ((22099, 22175), 'tensorflow.constant', 'tf.constant', ([], {'name': '"""content_img"""', 'value': 'self.content_img_np', 'dtype': 'self.dtype'}), "(name='content_img', value=self.content_img_np, dtype=self.dtype)\n", (22110, 22175), True, 'import tensorflow as tf\n'), ((22201, 22273), 'tensorflow.constant', 'tf.constant', ([], {'name': '"""style_img"""', 'value': 'self.style_img_np', 'dtype': 'self.dtype'}), "(name='style_img', value=self.style_img_np, dtype=self.dtype)\n", (22212, 22273), True, 'import tensorflow as tf\n'), ((24303, 24319), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (24314, 24319), True, 'import tensorflow as tf\n'), ((24792, 24811), 'tensorflow.group', 'tf.group', (['*step_ops'], {}), '(*step_ops)\n', (24800, 24811), True, 'import tensorflow as tf\n'), ((7482, 7515), 'numpy.load', 'np.load', (['draw_curve_position_path'], {}), '(draw_curve_position_path)\n', (7489, 7515), True, 'import numpy as np\n'), ((7556, 7587), 'numpy.load', 'np.load', (['draw_curve_vector_path'], {}), '(draw_curve_vector_path)\n', (7563, 7587), True, 'import numpy as np\n'), ((8106, 8118), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8116, 8118), True, 'import tensorflow as tf\n'), ((8204, 8247), 'tqdm.trange', 'trange', (['self.num_steps'], {'desc': '""""""', 'leave': '(True)'}), "(self.num_steps, desc='', leave=True)\n", (8210, 8247), False, 'from tqdm import trange\n'), ((11464, 11560), 'tensorflow.constant', 'tf.constant', ([], {'name': '"""draw_curve_position"""', 'value': 'self.draw_curve_position_np', 'dtype': 'self.dtype'}), "(name='draw_curve_position', value=self.draw_curve_position_np,\n dtype=self.dtype)\n", (11475, 11560), True, 'import tensorflow as tf\n'), ((11594, 11686), 'tensorflow.constant', 'tf.constant', ([], {'name': '"""draw_curve_vector"""', 'value': 'self.draw_curve_vector_np', 'dtype': 'self.dtype'}), "(name='draw_curve_vector', value=self.draw_curve_vector_np,\n dtype=self.dtype)\n", (11605, 11686), True, 'import tensorflow as tf\n'), ((16220, 16362), 'ops.draw_projection_loss', 'ops.draw_projection_loss', (['self.location', 'self.curve_s', 'self.curve_e', 'self.draw_curve_position', 'self.draw_curve_vector', 'self.draw_strength'], {}), '(self.location, self.curve_s, self.curve_e, self.\n draw_curve_position, self.draw_curve_vector, self.draw_strength)\n', (16244, 16362), False, 'import ops\n'), ((21006, 21018), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (21016, 21018), True, 'import tensorflow as tf\n'), ((21104, 21147), 'tqdm.trange', 'trange', (['self.num_steps'], {'desc': '""""""', 'leave': '(True)'}), "(self.num_steps, desc='', leave=True)\n", (21110, 21147), False, 'from tqdm import trange\n'), ((7105, 7126), 'numpy.array', 'np.array', (['content_img'], {}), '(content_img)\n', (7113, 7126), True, 'import numpy as np\n'), ((7166, 7185), 'numpy.array', 'np.array', (['style_img'], {}), '(style_img)\n', (7174, 7185), True, 'import numpy as np\n'), ((8149, 8182), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8180, 8182), True, 'import tensorflow as tf\n'), ((13182, 13208), 'ops.preprocess_img', 'ops.preprocess_img', (['self.I'], {}), '(self.I)\n', (13200, 13208), False, 'import ops\n'), ((13400, 13426), 'ops.preprocess_img', 'ops.preprocess_img', (['self.I'], {}), '(self.I)\n', (13418, 13426), False, 'import ops\n'), ((13617, 13653), 'ops.preprocess_img', 'ops.preprocess_img', (['self.content_img'], {}), '(self.content_img)\n', (13635, 13653), False, 'import ops\n'), ((13850, 13886), 'ops.preprocess_img', 'ops.preprocess_img', (['self.content_img'], {}), '(self.content_img)\n', (13868, 13886), False, 'import ops\n'), ((14076, 14110), 'ops.preprocess_img', 'ops.preprocess_img', (['self.style_img'], {}), '(self.style_img)\n', (14094, 14110), False, 'import ops\n'), ((17132, 17171), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.optim_rate'], {}), '(self.optim_rate)\n', (17154, 17171), True, 'import tensorflow as tf\n'), ((17683, 17729), 'tensorflow.gather', 'tf.gather', (['self.location'], {'axis': '(-1)', 'indices': '[0]'}), '(self.location, axis=-1, indices=[0])\n', (17692, 17729), True, 'import tensorflow as tf\n'), ((17731, 17777), 'tensorflow.gather', 'tf.gather', (['self.location'], {'axis': '(-1)', 'indices': '[1]'}), '(self.location, axis=-1, indices=[1])\n', (17740, 17777), True, 'import tensorflow as tf\n'), ((17951, 17987), 'tensorflow.assign', 'tf.assign', (['self.location', 'coord_clip'], {}), '(self.location, coord_clip)\n', (17960, 17987), True, 'import tensorflow as tf\n'), ((20396, 20412), 'numpy.array', 'np.array', (['canvas'], {}), '(canvas)\n', (20404, 20412), True, 'import numpy as np\n'), ((20452, 20471), 'numpy.array', 'np.array', (['style_img'], {}), '(style_img)\n', (20460, 20471), True, 'import numpy as np\n'), ((21049, 21082), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (21080, 21082), True, 'import tensorflow as tf\n'), ((22425, 22456), 'ops.preprocess_img', 'ops.preprocess_img', (['self.canvas'], {}), '(self.canvas)\n', (22443, 22456), False, 'import ops\n'), ((22644, 22680), 'ops.preprocess_img', 'ops.preprocess_img', (['self.content_img'], {}), '(self.content_img)\n', (22662, 22680), False, 'import ops\n'), ((22866, 22900), 'ops.preprocess_img', 'ops.preprocess_img', (['self.style_img'], {}), '(self.style_img)\n', (22884, 22900), False, 'import ops\n'), ((23986, 24047), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.canvas[1:, :, :] - self.canvas[:-1, :, :])'], {}), '(self.canvas[1:, :, :] - self.canvas[:-1, :, :])\n', (23999, 24047), True, 'import tensorflow as tf\n'), ((24117, 24178), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.canvas[:, 1:, :] - self.canvas[:, :-1, :])'], {}), '(self.canvas[:, 1:, :] - self.canvas[:, :-1, :])\n', (24130, 24178), True, 'import tensorflow as tf\n'), ((24447, 24475), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.01)'], {}), '(0.01)\n', (24469, 24475), True, 'import tensorflow as tf\n'), ((17615, 17649), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.color', '(0)', '(1)'], {}), '(self.color, 0, 1)\n', (17631, 17649), True, 'import tensorflow as tf\n'), ((17814, 17862), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['coord_x', '(0)', 'self.canvas_height'], {}), '(coord_x, 0, self.canvas_height)\n', (17830, 17862), True, 'import tensorflow as tf\n'), ((17864, 17911), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['coord_y', '(0)', 'self.canvas_width'], {}), '(coord_y, 0, self.canvas_width)\n', (17880, 17911), True, 'import tensorflow as tf\n'), ((21900, 21922), 'numpy.clip', 'np.clip', (['canvas_', '(0)', '(1)'], {}), '(canvas_, 0, 1)\n', (21907, 21922), True, 'import numpy as np\n'), ((24710, 24745), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.canvas', '(0)', '(1)'], {}), '(self.canvas, 0, 1)\n', (24726, 24745), True, 'import tensorflow as tf\n'), ((9004, 9066), 'tensorflow.core.protobuf.config_pb2.RunOptions', 'config_pb2.RunOptions', ([], {'report_tensor_allocations_upon_oom': '(True)'}), '(report_tensor_allocations_upon_oom=True)\n', (9025, 9066), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((9579, 9596), 'numpy.clip', 'np.clip', (['I_', '(0)', '(1)'], {}), '(I_, 0, 1)\n', (9586, 9596), True, 'import numpy as np\n'), ((15138, 15193), 'tensorflow.square', 'tf.square', (['(canvas_feats2[layer] - content_feats2[layer])'], {}), '(canvas_feats2[layer] - content_feats2[layer])\n', (15147, 15193), True, 'import tensorflow as tf\n'), ((18083, 18105), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.width'], {}), '(self.width)\n', (18093, 18105), True, 'import tensorflow as tf\n'), ((18178, 18252), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.width', '(self.init_width - 0.1)', '(self.init_width + 0.1)'], {}), '(self.width, self.init_width - 0.1, self.init_width + 0.1)\n', (18194, 18252), True, 'import tensorflow as tf\n'), ((21439, 21501), 'tensorflow.core.protobuf.config_pb2.RunOptions', 'config_pb2.RunOptions', ([], {'report_tensor_allocations_upon_oom': '(True)'}), '(report_tensor_allocations_upon_oom=True)\n', (21460, 21501), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((15228, 15261), 'tensorflow.sigmoid', 'tf.sigmoid', (['content_feats2[layer]'], {}), '(content_feats2[layer])\n', (15238, 15261), True, 'import tensorflow as tf\n')] |
import numpy as np
class MultiArmedBandit:
"""
MultiArmedBandit reinforcement learning agent.
Arguments:
epsilon - (float) The probability of randomly exploring the action space
rather than exploiting the best action.
"""
def __init__(self, epsilon=0.2):
self.epsilon = epsilon
def fit(self, env, steps=1000):
"""
Trains the MultiArmedBandit on an OpenAI Gym environment.
Arguments:
env - (Env) An OpenAI Gym environment with discrete actions and
observations. See the OpenAI Gym documentation for example use
cases (https://gym.openai.com/docs/).
steps - (int) The number of actions to perform within the environment
during training.
Returns:
state_action_values - (np.array) The values assigned by the algorithm
to each state-action pair as a 2D numpy array. The dimensionality
of the numpy array should be S x A, where S is the number of
states in the environment and A is the number of possible actions.
rewards - (np.array) A 1D sequence of averaged rewards of length 100.
Let s = np.floor(steps / 100), then rewards[0] should contain the
average reward over the first s steps, rewards[1] should contain
the average reward over the next s steps, etc.
"""
env.reset()
action_values = np.zeros((env.action_space.n, ))
N_actions_performed = np.zeros((env.action_space.n, ), dtype=int)
rewards = np.zeros((100, ))
s = np.floor(steps / 100)
s_count = 0
reward_sum = 0
idx = 0
for step in range(steps):
# generate random num
p = np.random.random()
# check probability
action = env.action_space.sample(
) # your agent here (this takes random actions)
if p >= self.epsilon and len(set(action_values)) != 1:
action = np.argmax(action_values) # take highest Q action
# bandit
observation, reward, done, info = env.step(action)
# update values
N_actions_performed[action] += 1
action_values[action] += 1 / N_actions_performed[action] * (
reward - action_values[action])
reward_sum += reward
# check s
s_count += 1
if s == s_count:
rewards[idx] = reward_sum / (step + 1)
s_count = 0
idx += 1
if done:
observation = env.reset()
# done
return np.repeat([action_values], env.observation_space.n,
axis=0), rewards
def predict(self, env, state_action_values):
"""
Runs prediction on an OpenAI environment using the policy defined by
the MultiArmedBandit algorithm and the state action values. Predictions
are run for exactly one episode. Note that one episode may produce a
variable number of steps.
Returns:
states - (np.array) The sequence of states visited by the agent over
the course of the episode. Does not include the starting state.
Should be of length K, where K is the number of steps taken within
the episode.
actions - (np.array) The sequence of actions taken by the agent over
the course of the episode. Should be of length K, where K is the
number of steps taken within the episode.
rewards - (np.array) The sequence of rewards received by the agent
over the course of the episode. Should be of length K, where K is
the number of steps taken within the episode.
"""
states, actions, rewards = [], [], []
env.reset()
while True:
action = np.argmax(state_action_values[0]) # take highest Q action
# bandit
observation, reward, done, info = env.step(action)
# record data
states.append(observation)
actions.append(action)
rewards.append(reward)
if done:
break
return np.array(states), np.array(actions), np.array(rewards) | [
"numpy.repeat",
"numpy.random.random",
"numpy.floor",
"numpy.argmax",
"numpy.array",
"numpy.zeros"
] | [((1446, 1477), 'numpy.zeros', 'np.zeros', (['(env.action_space.n,)'], {}), '((env.action_space.n,))\n', (1454, 1477), True, 'import numpy as np\n'), ((1509, 1551), 'numpy.zeros', 'np.zeros', (['(env.action_space.n,)'], {'dtype': 'int'}), '((env.action_space.n,), dtype=int)\n', (1517, 1551), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.zeros', 'np.zeros', (['(100,)'], {}), '((100,))\n', (1579, 1587), True, 'import numpy as np\n'), ((1602, 1623), 'numpy.floor', 'np.floor', (['(steps / 100)'], {}), '(steps / 100)\n', (1610, 1623), True, 'import numpy as np\n'), ((1768, 1786), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1784, 1786), True, 'import numpy as np\n'), ((2658, 2717), 'numpy.repeat', 'np.repeat', (['[action_values]', 'env.observation_space.n'], {'axis': '(0)'}), '([action_values], env.observation_space.n, axis=0)\n', (2667, 2717), True, 'import numpy as np\n'), ((3904, 3937), 'numpy.argmax', 'np.argmax', (['state_action_values[0]'], {}), '(state_action_values[0])\n', (3913, 3937), True, 'import numpy as np\n'), ((4242, 4258), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (4250, 4258), True, 'import numpy as np\n'), ((4260, 4277), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (4268, 4277), True, 'import numpy as np\n'), ((4279, 4296), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (4287, 4296), True, 'import numpy as np\n'), ((2018, 2042), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (2027, 2042), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
#
# The parameters used in the functions below.
#
standard_parameters = {
# baseline irradiance parameter
'irr0':5.0,
# maximum rate in Michaelis Menten formulation
'Vmax':10.0,
# nutrient half saturation in Michaelis Menten formulation
'nuthalfsat':0.5,
# multiplicative grazing parameter
'grazphy':0.25,
# grazing parameter used in exponential functions
'grazlambda':0.5,
# maximum grazing rate
'grazmax':0.25,
# phytoplankton mortality rate
'mort_phy':0.2,
# zooplankton mortality rate
'mort_zoo':0.1,
}
#
# A selection of light response functions. Compare Table 1 in Franks (2002).
#
def lightresponse_linear(irr, parameters):
return irr/parameters['irr0']
def lightresponse_saturating(irr, parameters):
return irr/(parameters['irr0']+irr)
def lightresponse_exp(irr, parameters):
return 1.0 - np.exp(-irr/parameters['irr0'])
def lightresponse_tanh(irr, parameters):
return np.tanh(-irr/parameters['irr0'])
def lightresponse_inhibit(irr, parameters):
irr_norm = irr/parameters['irr0']
return irr_norm * np.exp(1.0-irr_norm)
#
# A selection of nutrient uptake functions. Compare Table 2 in Franks (2002).
#
def nutrientuptake_michaelismenten(nut, parameters):
return parameters['Vmax']/(parameters['nuthalfsat']+nut)
#
# A selection of zooplankton grazing functions. Compare Table 3 in Franks (2002).
#
def grazing_linear(phy, parameters):
return parameters['grazphy']*phy
def grazing_bilinear(phy, parameters):
return np.min(parameters['grazphy']*phy,parameters['grazmax'])
def grazing_ivlev(phy, parameters):
return parameters['grazmax']*(1.0 - np.exp(-parameters['grazlambda']*phy))
#
# A selection of phytoplankton loss functions. Compare Table 4 in Franks (2002).
#
def phytoplanktonloss_linear(phy, parameters):
return parameters['mort_phy']
def phytoplanktonloss_quadratic(phy, parameters):
return parameters['mort_phy']*phy
#
# A selection of zooplankton loss functions. Compare Table 4 in Franks (2002).
#
def zooplanktonloss_linear(zoo, parameters):
return parameters['mort_zoo']
def zooplanktonloss_quadratic(zoo, parameters):
return parameters['mort_zoo']*zoo
#
# A generic function that can be used in place of any of the above in order to
# "switch off" a given segment. Using generic_nomod as the zooplankton grazing
# function, for example, will turn zooplankton grazing to zero.
#
def generic_nomod(*args, **kwargs):
return 0.0
| [
"numpy.exp",
"numpy.tanh",
"numpy.min"
] | [((1015, 1049), 'numpy.tanh', 'np.tanh', (["(-irr / parameters['irr0'])"], {}), "(-irr / parameters['irr0'])\n", (1022, 1049), True, 'import numpy as np\n'), ((1585, 1643), 'numpy.min', 'np.min', (["(parameters['grazphy'] * phy)", "parameters['grazmax']"], {}), "(parameters['grazphy'] * phy, parameters['grazmax'])\n", (1591, 1643), True, 'import numpy as np\n'), ((930, 963), 'numpy.exp', 'np.exp', (["(-irr / parameters['irr0'])"], {}), "(-irr / parameters['irr0'])\n", (936, 963), True, 'import numpy as np\n'), ((1153, 1175), 'numpy.exp', 'np.exp', (['(1.0 - irr_norm)'], {}), '(1.0 - irr_norm)\n', (1159, 1175), True, 'import numpy as np\n'), ((1718, 1757), 'numpy.exp', 'np.exp', (["(-parameters['grazlambda'] * phy)"], {}), "(-parameters['grazlambda'] * phy)\n", (1724, 1757), True, 'import numpy as np\n')] |
from PyQt5 import QtCore
from datetime import datetime
from nsls2ptycho.core.ptycho_param import Param
import sys, os
import pickle # dump param into disk
import subprocess # call mpirun from shell
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
import numpy as np
import traceback
from nsls2ptycho.core.databroker_api import load_metadata, save_data
from nsls2ptycho.core.utils import use_mpi_machinefile, set_flush_early
class PtychoReconWorker(QtCore.QThread):
update_signal = QtCore.pyqtSignal(int, object) # (interation number, chi arrays)
process = None # subprocess
def __init__(self, param:Param=None, parent=None):
super().__init__(parent)
self.param = param
self.return_value = None
def _parse_message(self, tokens):
def _parser(current, upper_limit, target_list):
for j in range(upper_limit):
target_list.append(float(tokens[current+2+j]))
# assuming tokens (stdout line) is split but not yet processed
it = int(tokens[2])
# first remove brackets
empty_index_list = []
for i, token in enumerate(tokens):
tokens[i] = token.replace('[', '').replace(']', '')
if tokens[i] == '':
empty_index_list.append(i)
counter = 0
for i in empty_index_list:
del tokens[i-counter]
counter += 1
# next parse based on param and the known format
prb_list = []
obj_list = []
for i, token in enumerate(tokens):
if token == 'probe_chi':
if self.param.mode_flag:
_parser(i, self.param.prb_mode_num, prb_list)
#elif self.param.multislice_flag:
#TODO: maybe multislice will have multiple prb in the future?
else:
_parser(i, 1, prb_list)
if token == 'object_chi':
if self.param.mode_flag:
_parser(i, self.param.obj_mode_num, obj_list)
elif self.param.multislice_flag:
_parser(i, self.param.slice_num, obj_list)
else:
_parser(i, 1, obj_list)
# return a dictionary
result = {'probe_chi':prb_list, 'object_chi':obj_list}
return it, result
def _test_stdout_completeness(self, stdout):
counter = 0
for token in stdout:
if token == '=':
counter += 1
return counter
def _parse_one_line(self):
stdout_2 = self.process.stdout.readline().decode('utf-8')
print(stdout_2, end='') # because the line already ends with '\n'
return stdout_2.split()
def recon_api(self, param:Param, update_fcn=None):
# "1" is just a placeholder to be overwritten soon
mpirun_command = ["mpirun", "-n", "1", "python", "-W", "ignore", "-m","nsls2ptycho.core.ptycho.recon_ptycho_gui"]
if param.mpi_file_path == '':
if param.gpu_flag:
mpirun_command[2] = str(len(param.gpus))
else:
mpirun_command[2] = str(param.processes) if param.processes > 1 else str(1)
else:
# regardless if GPU is used or not --- trust users to know this
mpirun_command = use_mpi_machinefile(mpirun_command, param.mpi_file_path)
mpirun_command = set_flush_early(mpirun_command)
# for CuPy v8.0+
os.environ['CUPY_ACCELERATORS'] = 'cub'
try:
self.return_value = None
with subprocess.Popen(mpirun_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=dict(os.environ, mpi_warn_on_fork='0')) as run_ptycho:
self.process = run_ptycho # register the subprocess
# idea: if we attempts to readline from an empty pipe, it will block until
# at least one line is piped in. However, stderr is ususally empty, so reading
# from it is very likely to block the output until the subprocess ends, which
# is bad. Thus, we want to set the O_NONBLOCK flag for stderr, see
# http://eyalarubas.com/python-subproc-nonblock.html
#
# Note that it is unclear if readline in Python 3.5+ is guaranteed safe with
# non-blocking pipes or not. See https://bugs.python.org/issue1175#msg56041
# and https://stackoverflow.com/questions/375427/
# If this is a concern, using the asyncio module could be a safer approach?
# One could also process stdout in one loop and then stderr in another, which
# will not have the blocking issue.
flags = fcntl(run_ptycho.stderr, F_GETFL) # first get current stderr flags
fcntl(run_ptycho.stderr, F_SETFL, flags | O_NONBLOCK)
while True:
stdout = run_ptycho.stdout.readline()
stderr = run_ptycho.stderr.readline() # without O_NONBLOCK this will very likely block
if (run_ptycho.poll() is not None) and (stdout==b'') and (stderr==b''):
break
if stdout:
stdout = stdout.decode('utf-8')
print(stdout, end='') # because the line already ends with '\n'
stdout = stdout.split()
if len(stdout) > 2 and stdout[0] == "[INFO]" and update_fcn is not None:
# TEST: check if stdout is complete by examining the number of "="
# TODO: improve this ugly hack...
while True:
counter = self._test_stdout_completeness(stdout)
if counter == 3:
break
elif counter < 3:
stdout += self._parse_one_line()
else: # counter > 3, we read one more line!
raise Exception("parsing error")
it, result = self._parse_message(stdout)
#print(result['probe_chi'])
update_fcn(it+1, result)
elif len(stdout) == 3 and stdout[0] == "shared" and update_fcn is not None:
update_fcn(-1, "init_mmap")
if stderr:
stderr = stderr.decode('utf-8')
print(stderr, file=sys.stderr, end='')
# get the return value
self.return_value = run_ptycho.poll()
if self.return_value != 0:
message = "At least one MPI process returned a nonzero value, so the whole job is aborted.\n"
message += "If you did not manually terminate it, consult the Traceback above to identify the problem."
raise Exception(message)
except Exception as ex:
traceback.print_exc()
#print(ex, file=sys.stderr)
#raise ex
finally:
# clean up temp file
filepath = param.working_directory + "/." + param.shm_name + ".txt"
if os.path.isfile(filepath):
os.remove(filepath)
def run(self):
print('Ptycho thread started')
try:
self.recon_api(self.param, self.update_signal.emit)
except IndexError:
print("[ERROR] IndexError --- most likely a wrong MPI machine file is given?", file=sys.stderr)
except:
# whatever happened in the MPI processes will always (!) generate traceback,
# so do nothing here
pass
else:
# let preview window load results
if self.param.preview_flag and self.return_value == 0:
self.update_signal.emit(self.param.n_iterations+1, None)
finally:
print('finally?')
def kill(self):
if self.process is not None:
print('killing the subprocess...')
self.process.terminate()
self.process.wait()
# a worker that does the rest of hard work for us
class HardWorker(QtCore.QThread):
update_signal = QtCore.pyqtSignal(int, object) # connect to MainWindow???
def __init__(self, task=None, *args, parent=None):
super().__init__(parent)
self.task = task
self.args = args
self.exception_handler = None
#self.update_signal = QtCore.pyqtSignal(int, object) # connect to MainWindow???
def run(self):
try:
if self.task == "save_h5":
self._save_h5(self.update_signal.emit)
elif self.task == "fetch_data":
self._fetch_data(self.update_signal.emit)
# TODO: put other heavy lifting works here
# TODO: consider merge other worker threads to this one?
except ValueError as ex:
# from _fetch_data(), print it and quit
print(ex, file=sys.stderr)
print("[ERROR] possible reason: no image available for the selected detector/scan", file=sys.stderr)
except Exception as ex:
# use MainWindow's exception handler
if self.exception_handler is not None:
self.exception_handler(ex)
def kill(self):
pass
def _save_h5(self, update_fcn=None):
'''
args = [db, param, scan_num, roi_width, roi_height, cx, cy, threshold, bad_pixels]
'''
print("saving data to h5, this may take a while...")
save_data(*self.args)
print("h5 saved.")
def _fetch_data(self, update_fcn=None):
'''
args = [db, scan_id, det_name]
'''
if update_fcn is not None:
print("loading begins, this may take a while...", end='')
metadata = load_metadata(*self.args)
# sanity checks
if metadata['nz'] == 0:
raise ValueError("nz = 0")
#print("databroker connected, parsing experimental parameters...", end='')
update_fcn(0, metadata) # 0 is just a placeholder
class PtychoReconFakeWorker(QtCore.QThread):
update_signal = QtCore.pyqtSignal(int, object)
def __init__(self, param:Param=None, parent=None):
super().__init__(parent)
self.param = param
def _get_random_message(self, it):
object_chi = np.random.random()
probe_chi = np.random.random()
diff_chi = np.random.random()
return '[INFO] DM {:d} object_chi = {:f} probe_chi = {:f} diff_chi = {:f}'.format(
it, object_chi, probe_chi, diff_chi)
def _array_to_str(self, arr):
arrstr = ''
for v in arr: arrstr += '{:f} '.format(v)
return arrstr
def _get_random_message_multi(self, it):
object_chi = np.random.random(4)
probe_chi = np.random.random(4)
diff_chi = np.random.random(4)
object_chi_str = self._array_to_str(object_chi)
probe_chi_str = self._array_to_str(probe_chi)
diff_chi_str = self._array_to_str(diff_chi)
return '[INFO] DM {:d} object_chi = {:s} probe_chi = {:s} diff_chi = {:s}'.format(
it, object_chi_str, probe_chi_str, diff_chi_str)
def _parse_message(self, message):
message = str(message).replace('[', '').replace(']', '')
tokens = message.split()
id, alg, it = tokens[0], tokens[1], int(tokens[2])
metric_tokens = tokens[3:]
metric = {}
name = 'Unknown'
data = []
for i in range(len(metric_tokens)):
token = str(metric_tokens[i])
if token == '=': continue
if i < len(metric_tokens) - 2 and metric_tokens[i+1] == '=':
if len(data): metric[name] = list(data)
name = token
data = []
continue
data.append(float(token))
if len(data):
metric[name] = data
return id, alg, it, metric
def run(self):
from time import sleep
update_fcn = self.update_signal.emit
for it in range(self.param.n_iterations):
message = self._get_random_message(it)
_id, _alg, _it, _metric = self._parse_message(message)
update_fcn(_it+1, _metric)
sleep(.1)
print("finished")
def kill(self):
pass
| [
"PyQt5.QtCore.pyqtSignal",
"numpy.random.random",
"time.sleep",
"nsls2ptycho.core.utils.set_flush_early",
"os.path.isfile",
"fcntl.fcntl",
"traceback.print_exc",
"nsls2ptycho.core.databroker_api.save_data",
"nsls2ptycho.core.utils.use_mpi_machinefile",
"nsls2ptycho.core.databroker_api.load_metadat... | [((511, 541), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'object'], {}), '(int, object)\n', (528, 541), False, 'from PyQt5 import QtCore\n'), ((8500, 8530), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'object'], {}), '(int, object)\n', (8517, 8530), False, 'from PyQt5 import QtCore\n'), ((10483, 10513), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'object'], {}), '(int, object)\n', (10500, 10513), False, 'from PyQt5 import QtCore\n'), ((3418, 3449), 'nsls2ptycho.core.utils.set_flush_early', 'set_flush_early', (['mpirun_command'], {}), '(mpirun_command)\n', (3433, 3449), False, 'from nsls2ptycho.core.utils import use_mpi_machinefile, set_flush_early\n'), ((9847, 9868), 'nsls2ptycho.core.databroker_api.save_data', 'save_data', (['*self.args'], {}), '(*self.args)\n', (9856, 9868), False, 'from nsls2ptycho.core.databroker_api import load_metadata, save_data\n'), ((10691, 10709), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10707, 10709), True, 'import numpy as np\n'), ((10730, 10748), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10746, 10748), True, 'import numpy as np\n'), ((10768, 10786), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10784, 10786), True, 'import numpy as np\n'), ((11121, 11140), 'numpy.random.random', 'np.random.random', (['(4)'], {}), '(4)\n', (11137, 11140), True, 'import numpy as np\n'), ((11161, 11180), 'numpy.random.random', 'np.random.random', (['(4)'], {}), '(4)\n', (11177, 11180), True, 'import numpy as np\n'), ((11200, 11219), 'numpy.random.random', 'np.random.random', (['(4)'], {}), '(4)\n', (11216, 11219), True, 'import numpy as np\n'), ((3335, 3391), 'nsls2ptycho.core.utils.use_mpi_machinefile', 'use_mpi_machinefile', (['mpirun_command', 'param.mpi_file_path'], {}), '(mpirun_command, param.mpi_file_path)\n', (3354, 3391), False, 'from nsls2ptycho.core.utils import use_mpi_machinefile, set_flush_early\n'), ((7485, 7509), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (7499, 7509), False, 'import sys, os\n'), ((10132, 10157), 'nsls2ptycho.core.databroker_api.load_metadata', 'load_metadata', (['*self.args'], {}), '(*self.args)\n', (10145, 10157), False, 'from nsls2ptycho.core.databroker_api import load_metadata, save_data\n'), ((12617, 12627), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (12622, 12627), False, 'from time import sleep\n'), ((4887, 4920), 'fcntl.fcntl', 'fcntl', (['run_ptycho.stderr', 'F_GETFL'], {}), '(run_ptycho.stderr, F_GETFL)\n', (4892, 4920), False, 'from fcntl import fcntl, F_GETFL, F_SETFL\n'), ((4970, 5023), 'fcntl.fcntl', 'fcntl', (['run_ptycho.stderr', 'F_SETFL', '(flags | O_NONBLOCK)'], {}), '(run_ptycho.stderr, F_SETFL, flags | O_NONBLOCK)\n', (4975, 5023), False, 'from fcntl import fcntl, F_GETFL, F_SETFL\n'), ((7256, 7277), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7275, 7277), False, 'import traceback\n'), ((7527, 7546), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (7536, 7546), False, 'import sys, os\n')] |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy import interpolate, signal
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Definition
----------
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
Input
-----
x, y array_like; arrays to be modified
Optional Input
--------------
xlim, ylim data range; the assumed plot range for the modification.
If not specified, they will be guessed from the data
mag float; the magnitude of the distortion (default: 1.0)
f1, f2, f3 int, float, int; filtering parameters.
f1 gives the size of the window (default: 50)
f2 gives the high-frequency cutoff (default: 0.01)
f3 gives the size of the filter (default: 15)
Output
------
x, y ndarrays; the modified lines
References
----------
See xkcd below.
Examples
--------
for line in ax.lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3)
line.set_data(x_int, y_int)
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2013-2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Mar 2013
"""
# assure array
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None: xlim = (x.min(), x.max())
if ylim is None: ylim = (y.min(), y.max())
if xlim[1] == xlim[0]: xlim = ylim
if ylim[1] == ylim[0]: ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx*dx + dy*dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
# k = min(3, len(x) - 1)
k = min(3, x.size - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
# horizontal or vertical lines
# np.sign(np.cumsum(np.random.random(dx.size)-0.5)) emulates something like a Brownian motion
# i.e. auto-correlated random walks around 0; just the sign interests here.
eps = np.maximum(np.abs(np.amax(x_scaled)-np.amin(x_scaled)), np.abs(np.amax(y_scaled)-np.amin(y_scaled)))/Nu
if np.all(np.abs(dx) < eps):
dx = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps
if np.all(np.abs(dy) < eps):
dy = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps
# equal distances
if np.all(np.sign(dx) == np.sign(dx[0])):
dx *= np.sign(np.cumsum(np.random.random(dx.size)-0.5))
if np.all(np.sign(dy) == np.sign(dy[0])):
dy *= np.sign(np.cumsum(np.random.random(dx.size)-0.5))
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
# coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
coeffs = mag * np.random.normal(0, 0.01, x_int.size - 2)
b = signal.firwin(f1, f2*dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def xkcd(ax,
mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
title_size=None,
xaxis_loc=None, yaxis_loc=None,
xaxis_arrow='+', yaxis_arrow='+',
ax_extend=0.1,
xlabel_inside=0., ylabel_inside=0.,
ticks=False,
xticks_inside=0., yticks_inside=0.,
):
"""
Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots, a webcomic from <NAME>. Other plot elements are not modified.
Definition
----------
def xkcd(ax,
mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
title_size=None,
xaxis_loc=None, yaxis_loc=None,
xaxis_arrow='+', yaxis_arrow='+',
ax_extend=0.1,
xlabel_inside=0., ylabel_inside=0.,
ticks=False,
xticks_inside=0., yticks_inside=0.,
):
Input
-----
ax Axes instance the axes instance to be modified.
Optional Input
--------------
mag float; the magnitude of the distortion (default: 1.0)
f1, f2, f3 int, float, int; filtering parameters.
f1 gives the size of the window (default: 50)
f2 gives the high-frequency cutoff (default: 0.01)
f3 gives the size of the filter (default: 15)
bgcolor str; color around lines so that axis look brocken,
i.e. lines are overdrawn on axis (default: 'w')
titel_size float; poitn size of plot title. If None, same size as axis labels.
(default: None)
xaxis_loc, yaxis_log float; The locations to draw the x and y axes in data coordinates.
If not specified, they will be drawn from the bottom left of the plot.
(default: None)
xaxis_arrow, yaxis_arrow str; where to draw arrows on the x/y axes
Options are '+', '-', '+-', or '' (default: '+')
ax_extend float; How far (fractionally) to extend the drawn axes beyond
the original axes limits (default: 0.1)
xlabel_inside, ylabel_inside float: By how much the labels are shifted (default: 0.0)
The last two options are not working how with mc_plot_template
ticks True: change tick labels; False: no tick labels are drawn (default: False)
xticks_inside, yticks_inside float: By how much the ticks are shifted (default: 0.0)
Output
------
ax is basically empty and all former elements are redrawn on plot.
Note
----
For reproducible plots, seed the random number generator before each new plot.
If a new line was added, the old lines will look the same. The legend will be different though.
References
----------
This is the modified XKCD plot generator of Jake Vanderplas
http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb
The idea for this comes from work by <NAME>
http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html
Examples
--------
import matplotlib.pylab as plt
fig = plt.figure(1)
ax = fig.add_axes([0.1,0.1,0.5,0.5])
ax.plot(range(10), label='Line')
ax.set_title('Title')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.legend()
xkcd(ax)
License
-------
This file is part of the JAMS Python package, distributed under the MIT License.
Copyright (c) 2013 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Mar 2013
"""
import matplotlib.pylab as plt
import matplotlib.font_manager as fm
# remember random state for later resetting
random_state = np.random.get_state()
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None: xaxis_loc = ylim[0]
if yaxis_loc is None: yaxis_loc = xlim[0]
# Draw axes
acolor = ax.get_xaxis().get_gridlines()[0].get_color()
xaxis = plt.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color=acolor)
yaxis = plt.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color=acolor)
# adjust the axes
if ticks:
for x, xtext in zip(ax.get_xticks(), ax.get_xticklabels()):
ax.text(x, xaxis_loc - 0.08 * yspan * (2 * xticks_inside - 1), xtext.get_text(),
fontsize=xtext.get_size(), ha='center', va='bottom' if xticks_inside else 'top', rotation=0)
for y, ytext in zip(ax.get_yticks(), ax.get_yticklabels()):
ax.text(yaxis_loc + 0.02 * xspan * (2 * yticks_inside - 1), y, ytext.get_text(),
fontsize=ytext.get_size(), ha='left' if yticks_inside else 'right', va='center', rotation=0)
# Label axes
siz = ax.get_xaxis().get_label().get_size()
ax.text(xax_lim[1], xaxis_loc - 0.2 * yspan * (2 * xlabel_inside - 1), ax.get_xlabel(),
fontsize=siz, ha='right', va='bottom' if xlabel_inside else 'top', rotation=0)
ax.text(yaxis_loc + 0.04 * xspan * (2 * ylabel_inside - 1), yax_lim[1], ax.get_ylabel(),
fontsize=siz, ha='right', va='bottom' if ylabel_inside else 'top', rotation=84)
# Title - default: same size as axis labels
if title_size is not None:
siz2 = title_size
else:
siz2 = siz
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1], ax.get_title(),
ha='center', va='bottom', fontsize=siz2)
# Draw arrow-heads at the end of axes lines
arr1 = 0.04 * np.array([-1, 0, -1])
arr2 = 0.03 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005 / 2, 2)
arr2[::2] += np.random.normal(0, 0.005 / 2, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color=acolor, lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color=acolor, lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect**2,
y[-1] + arr1 * yspan / aspect,
color=acolor, lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect**2,
y[0] - arr1 * yspan / aspect,
color=acolor, lw=2)
# Set the axis limits
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
# The lines
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
ls = line.get_linestyle()
if ls != 'None':
x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3)
else:
x_int, y_int = x, y
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2*lw)
line.set_data(x_int, y_int)
# White surrounding of line makes them look overplot on axis
if (line is not xaxis) and (line is not yaxis) and ls != 'None':
line_bg = plt.Line2D(x_int, y_int, color=bgcolor, linewidth=2*lw+4)
ax.add_line(line_bg)
ax.add_line(line)
# Change all the fonts to humor-sans.
# from jams.find_in_path import find_in_path
# fhumor = find_in_path('Humor-Sans.ttf') # in jams_python
import os
fhumor = os.path.join(os.path.dirname(__file__), 'Humor-Sans.ttf') # in jams_python/jams
for text in ax.texts:
tsize = text.get_size()
prop = fm.FontProperties(fname=fhumor, size=tsize)
text.set_fontproperties(prop)
# modify legend
leg = ax.get_legend()
if leg is not None:
np.random.set_state(random_state) # restate random number generator for reproducible results
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, plt.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=10.*mag, f1=2*f1, f2=f2/10.))
child.set_linewidth(2*child.get_linewidth())
if isinstance(child, plt.Text):
tsize = child.get_size()
prop = fm.FontProperties(fname=fhumor, size=tsize)
child.set_fontproperties(prop)
# remove standard axis
ax.set_title('')
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_off()
return ax
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# import numpy as np
# import jams
# from position import position
# pdffile='test_xkcd.pdf'
# usetex = False
# textsize = 13 # standard text size
# lwidth = 1.5 # linewidth
# alwidth = 1.0 # axis line width
# if (pdffile == ''):
# outtype = 'x'
# else:
# outtype = 'pdf'
# import matplotlib as mpl
# if (outtype == 'pdf'):
# mpl.use('PDF') # set directly after import matplotlib
# import matplotlib.pyplot as plt
# from matplotlib.backends.backend_pdf import PdfPages
# # Customize: http://matplotlib.sourceforge.net/users/customizing.html
# mpl.rc('ps', papersize='a4', usedistiller='xpdf') # ps2pdf
# mpl.rc('figure', figsize=(8.27,11.69)) # a4 portrait
# if usetex:
# mpl.rc('text', usetex=True)
# else:
# #mpl.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# mpl.rc('font',**{'family':'serif','serif':['times']})
# mpl.rc('text.latex', unicode=True)
# mpl.rc('font', size=textsize)
# else:
# import matplotlib.pyplot as plt
# mpl.rc('figure', figsize=(4./5.*8.27,4./5.*11.69)) # a4 portrait
# mpl.rc('font', size=textsize)
# mpl.rc('lines', linewidth=lwidth, color='black')
# mpl.rc('axes', linewidth=alwidth, labelcolor='black')
# mpl.rc('path', simplify=False) # do not remove
# if (outtype == 'pdf'):
# print('Plot PDF ', pdffile)
# pdf_pages = PdfPages(pdffile)
# else:
# print('Plot X')
# figsize = mpl.rcParams['figure.figsize']
# # figsize = [6.616, 9.352]
# ifig = 0
# nrow = 2
# ncol = 1
# # ----------------------------------------------------------------------------------
# # Example 1
# np.random.seed(1)
# iplot = 1
# fig = plt.figure(ifig)
# pos = position(nrow,ncol,iplot,golden=False,figsize=figsize,left=0.1)
# ax = fig.add_axes(pos)
# x = np.linspace(0, 10, 100)
# ax.plot(x, np.sin(x) * np.exp(-0.1 * (x - 5) ** 2), 'b', lw=1, label='sine')
# ax.plot(x, -np.cos(x) * np.exp(-0.1 * (x - 5) ** 2), 'r', lw=1, label='cosine')
# ax.set_title('check it out!')
# ax.set_xlabel('x label')
# ax.set_ylabel('y label')
# ax.legend(loc='upper left', bbox_to_anchor=(0.7,0.4), ncol=1, handlelength=0)
# xkcd(ax, xaxis_loc=0.0, yaxis_loc=1.0,
# xaxis_arrow='+-', yaxis_arrow='+-', xlabel_inside=1., title_size=textsize+2)
# if (outtype == 'pdf'):
# pdf_pages.savefig(fig)
# plt.close()
# # ----------------------------------------------------------------------------------
# # Example 1 with third line
# np.random.seed(1)
# iplot = 1
# fig = plt.figure(ifig)
# pos = position(nrow,ncol,iplot,golden=False,figsize=figsize,left=0.1)
# ax = fig.add_axes(pos)
# x = np.linspace(0, 10, 100)
# ax.plot(x, np.sin(x) * np.exp(-0.1 * (x - 5) ** 2), 'b', lw=1, label='sine')
# ax.plot(x, -np.cos(x) * np.exp(-0.1 * (x - 5) ** 2), 'r', lw=1, label='cosine')
# ax.plot(x, -np.cos(x+1.0) * np.exp(-0.1 * (x - 5) ** 2), 'm', lw=1, label='shift')
# ax.set_title('check it out!')
# ax.set_xlabel('x label')
# ax.set_ylabel('y label')
# ax.legend(loc='upper left', bbox_to_anchor=(0.7,0.4), ncol=1, handlelength=0)
# xkcd(ax, xaxis_loc=0.0, yaxis_loc=1.0,
# xaxis_arrow='+-', yaxis_arrow='+-', xlabel_inside=1., title_size=textsize+2)
# if (outtype == 'pdf'):
# pdf_pages.savefig(fig)
# plt.close()
# # ----------------------------------------------------------------------------------
# # Example 2
# # Some helper functions
# def norm(x, x0, sigma):
# return np.exp(-0.5 * (x - x0) ** 2 / sigma ** 2)
# def sigmoid(x, x0, alpha):
# return 1. / (1. + np.exp(- (x - x0) / alpha))
# # define the curves
# x = np.linspace(0, 1, 100)
# y1 = np.sqrt(norm(x, 0.7, 0.05)) + 0.2 * (1.5 - sigmoid(x, 0.8, 0.05))
# y2 = 0.2 * norm(x, 0.5, 0.2) + np.sqrt(norm(x, 0.6, 0.05)) + 0.1 * (1 - sigmoid(x, 0.75, 0.05))
# y3 = 0.05 + 1.4 * norm(x, 0.85, 0.08)
# y3[x > 0.85] = 0.05 + 1.4 * norm(x[x > 0.85], 0.85, 0.3)
# ifig += 1
# iplot = 1
# fig = plt.figure(ifig)
# ax = fig.add_axes(position(nrow,ncol,iplot,golden=False,figsize=figsize,left=0.1))
# # draw the curves
# ax.plot(x, y1, c='gray')
# ax.plot(x, y2, c='blue')
# ax.plot(x, y3, c='red')
# ax.text(0.3, -0.1, "Yard")
# ax.text(0.5, -0.1, "Steps")
# ax.text(0.7, -0.1, "Door")
# ax.text(0.9, -0.1, "Inside")
# ax.text(0.05, 1.1, "fear that\nthere's\nsomething\nbehind me")
# ax.plot([0.15, 0.2], [1.0, 0.2], '-k', lw=0.5)
# ax.text(0.25, 0.8, "forward\nspeed")
# ax.plot([0.32, 0.35], [0.75, 0.35], '-k', lw=0.5)
# ax.text(0.9, 0.4, "embarrassment")
# ax.plot([0.8, 1.0], [1.05, 0.55], '-k', lw=0.5)
# ax.set_title("Walking back to my\nfront door at night:")
# ax.set_xlim(0, 1)
# ax.set_ylim(0, 1.5)
# # modify all the axes elements in-place
# xkcd(ax)
# if (outtype == 'pdf'):
# pdf_pages.savefig(fig)
# plt.close()
# if (outtype == 'pdf'):
# pdf_pages.close()
# else:
# plt.show()
| [
"numpy.random.get_state",
"numpy.sqrt",
"numpy.random.set_state",
"numpy.array",
"numpy.arange",
"numpy.random.random",
"numpy.asarray",
"scipy.interpolate.splev",
"doctest.testmod",
"numpy.random.normal",
"numpy.abs",
"numpy.amin",
"scipy.signal.firwin",
"os.path.dirname",
"numpy.sign",... | [((3016, 3029), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3026, 3029), True, 'import numpy as np\n'), ((3038, 3051), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (3048, 3051), True, 'import numpy as np\n'), ((3858, 3909), 'scipy.interpolate.splprep', 'interpolate.splprep', (['[x_scaled, y_scaled]'], {'s': '(0)', 'k': 'k'}), '([x_scaled, y_scaled], s=0, k=k)\n', (3877, 3909), False, 'from scipy import interpolate, signal\n'), ((3929, 3957), 'scipy.interpolate.splev', 'interpolate.splev', (['u', 'res[0]'], {}), '(u, res[0])\n', (3946, 3957), False, 'from scipy import interpolate, signal\n'), ((4860, 4886), 'numpy.sqrt', 'np.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (4867, 4886), True, 'import numpy as np\n'), ((5080, 5135), 'scipy.signal.firwin', 'signal.firwin', (['f1', '(f2 * dist_tot)'], {'window': "('kaiser', f3)"}), "(f1, f2 * dist_tot, window=('kaiser', f3))\n", (5093, 5135), False, 'from scipy import interpolate, signal\n'), ((5153, 5181), 'scipy.signal.lfilter', 'signal.lfilter', (['b', '(1)', 'coeffs'], {}), '(b, 1, coeffs)\n', (5167, 5181), False, 'from scipy import interpolate, signal\n'), ((10897, 10918), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (10916, 10918), True, 'import numpy as np\n'), ((11505, 11598), 'matplotlib.pylab.Line2D', 'plt.Line2D', (['[xax_lim[0], xax_lim[1]]', '[xaxis_loc, xaxis_loc]'], {'linestyle': '"""-"""', 'color': 'acolor'}), "([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc], linestyle='-',\n color=acolor)\n", (11515, 11598), True, 'import matplotlib.pylab as plt\n'), ((11629, 11722), 'matplotlib.pylab.Line2D', 'plt.Line2D', (['[yaxis_loc, yaxis_loc]', '[yax_lim[0], yax_lim[1]]'], {'linestyle': '"""-"""', 'color': 'acolor'}), "([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]], linestyle='-',\n color=acolor)\n", (11639, 11722), True, 'import matplotlib.pylab as plt\n'), ((13170, 13203), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.005 / 2)', '(2)'], {}), '(0, 0.005 / 2, 2)\n', (13186, 13203), True, 'import numpy as np\n'), ((13221, 13254), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.005 / 2)', '(2)'], {}), '(0, 0.005 / 2, 2)\n', (13237, 13254), True, 'import numpy as np\n'), ((16237, 16294), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.NORMALIZE_WHITESPACE'}), '(optionflags=doctest.NORMALIZE_WHITESPACE)\n', (16252, 16294), False, 'import doctest\n'), ((3549, 3575), 'numpy.sqrt', 'np.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (3556, 3575), True, 'import numpy as np\n'), ((5019, 5060), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)', '(x_int.size - 2)'], {}), '(0, 0.01, x_int.size - 2)\n', (5035, 5060), True, 'import numpy as np\n'), ((13086, 13107), 'numpy.array', 'np.array', (['[-1, 0, -1]'], {}), '([-1, 0, -1])\n', (13094, 13107), True, 'import numpy as np\n'), ((13132, 13152), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (13140, 13152), True, 'import numpy as np\n'), ((15115, 15140), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15130, 15140), False, 'import os\n'), ((15256, 15299), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'fname': 'fhumor', 'size': 'tsize'}), '(fname=fhumor, size=tsize)\n', (15273, 15299), True, 'import matplotlib.font_manager as fm\n'), ((15417, 15450), 'numpy.random.set_state', 'np.random.set_state', (['random_state'], {}), '(random_state)\n', (15436, 15450), True, 'import numpy as np\n'), ((3680, 3701), 'numpy.arange', 'np.arange', (['(-1)', '(Nu + 1)'], {}), '(-1, Nu + 1)\n', (3689, 3701), True, 'import numpy as np\n'), ((4417, 4427), 'numpy.abs', 'np.abs', (['dx'], {}), '(dx)\n', (4423, 4427), True, 'import numpy as np\n'), ((4519, 4529), 'numpy.abs', 'np.abs', (['dy'], {}), '(dy)\n', (4525, 4529), True, 'import numpy as np\n'), ((4643, 4654), 'numpy.sign', 'np.sign', (['dx'], {}), '(dx)\n', (4650, 4654), True, 'import numpy as np\n'), ((4658, 4672), 'numpy.sign', 'np.sign', (['dx[0]'], {}), '(dx[0])\n', (4665, 4672), True, 'import numpy as np\n'), ((4753, 4764), 'numpy.sign', 'np.sign', (['dy'], {}), '(dy)\n', (4760, 4764), True, 'import numpy as np\n'), ((4768, 4782), 'numpy.sign', 'np.sign', (['dy[0]'], {}), '(dy[0])\n', (4775, 4782), True, 'import numpy as np\n'), ((14803, 14864), 'matplotlib.pylab.Line2D', 'plt.Line2D', (['x_int', 'y_int'], {'color': 'bgcolor', 'linewidth': '(2 * lw + 4)'}), '(x_int, y_int, color=bgcolor, linewidth=2 * lw + 4)\n', (14813, 14864), True, 'import matplotlib.pylab as plt\n'), ((15920, 15963), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'fname': 'fhumor', 'size': 'tsize'}), '(fname=fhumor, size=tsize)\n', (15937, 15963), True, 'import matplotlib.font_manager as fm\n'), ((4317, 4334), 'numpy.amax', 'np.amax', (['x_scaled'], {}), '(x_scaled)\n', (4324, 4334), True, 'import numpy as np\n'), ((4335, 4352), 'numpy.amin', 'np.amin', (['x_scaled'], {}), '(x_scaled)\n', (4342, 4352), True, 'import numpy as np\n'), ((4362, 4379), 'numpy.amax', 'np.amax', (['y_scaled'], {}), '(y_scaled)\n', (4369, 4379), True, 'import numpy as np\n'), ((4380, 4397), 'numpy.amin', 'np.amin', (['y_scaled'], {}), '(y_scaled)\n', (4387, 4397), True, 'import numpy as np\n'), ((4707, 4732), 'numpy.random.random', 'np.random.random', (['dx.size'], {}), '(dx.size)\n', (4723, 4732), True, 'import numpy as np\n'), ((4817, 4842), 'numpy.random.random', 'np.random.random', (['dx.size'], {}), '(dx.size)\n', (4833, 4842), True, 'import numpy as np\n'), ((4467, 4492), 'numpy.random.random', 'np.random.random', (['dx.size'], {}), '(dx.size)\n', (4483, 4492), True, 'import numpy as np\n'), ((4569, 4594), 'numpy.random.random', 'np.random.random', (['dx.size'], {}), '(dx.size)\n', (4585, 4594), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 13:44:19 2018
@author: sven
"""
import scipy.stats as sps
import scipy.spatial as spp
import numpy as np
import copy
from ..utils.utils import MyException
def trim_mean(x,trimming,axis=0):
"""
computes the trimmed mean of array x according to axis.
Input :
x : input data as numpy array
trimming, float : trimming percentage to be used
axis, int or None : Axis along which the trimmed means are computed
Output:
The trimmed mean of x according to axis.
"""
if trimming == 0:
return(np.mean(x,axis=axis))
else:
return(sps.trim_mean(x,trimming,axis=axis))
def trimvar(x,trimming):
"""
computes the trimmed variance of array x .
Input :
x : input data as numpy array
trimming, float : trimming percentage to be used
Output:
The trimmed variance of x.
"""
# division by n
return(sps.trim_mean(np.square(x - sps.trim_mean(x,trimming)),trimming))
def identity(x):
return(x)
def trim_mom(x,y,locest,order,trimming,option,fscorr=True):
"""
computes trimmed comoment between x and y. order represents the order of
the comoment.
input :
x : Input data as matrix
y : Input data as matrix or 1d vector
order, int : order of the comoment
trimming, float : trimming percentage to be used.
option, int : option to select the type of co-moment (order 3: option 1 = com(x,x,y))
fscor, bool: if True, a finite sample correction is applied to the comoment.
output :
the trimmed comoment between x and y
"""
# division by n
if order == 0:
como = 0
elif order == 1:
como = locest(x,trimming)
else:
if order > 2:
iter_stop_2 = option
iter_stop_1 = order - option
else:
iter_stop_1 = 1
iter_stop_2 = 1
if locest == np.median:
trimming = 0
factor = 1
if (x==y).all():
wrapper = abs
power = 1/order
if power == 0.5:
factor = 1.4826
else:
wrapper = identity
power = 1
else:
n = len(x)
wrapper = identity
power = 1
if fscorr:
ntrim = round(n * (1-trimming))
factor = ntrim
factor /= np.product(ntrim - np.arange(max(1,order-2),order))
else:
factor = 1
xc = wrapper(x - locest(x,trimming))
yc = wrapper(y - locest(y,trimming))
factor1 = np.power(xc,iter_stop_1)
factor2 = np.power(yc,iter_stop_2)
como = locest(np.power(np.multiply(factor1,factor2),power),trimming)*factor
# como = sps.trim_mean(np.multiply(x - sps.trim_mean(x,trimming),y - sps.trim_mean(y,trimming)),trimming)*ntrim/(ntrim-1)
if len(como.shape)>1:
como = como[0,0]
else:
if type(como) is np.ndarray:
como = como[0]
return(como)
def double_center_flex(a, center='mean', **kwargs):
"""
Double centered function adapted to accommodate for location types different
from mean.
Input :
a : input data as matrix
center, str : which location estimate to use for centering. either 'mean or 'median'
kwargs :
trimming, float : trimming percentage to be used.
biascorr, bool : if True, bias correction is applied during double centering.
Output :
The double centered version of the matrix a.
"""
# print(kwargs)
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
# print('trimming is: ' + str(trimming))
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
out = copy.deepcopy(a)
dim = np.size(a, 0)
n1 = dim
# mu = np.sum(a) / (dim * dim)
if center=='mean':
mu = trim_mean(a.reshape((dim**2,1)),trimming)
if biascorr:
n1 = np.round(dim*(1-trimming))
# print(n1)
mu *= (n1**2) / ((n1-1) * (n1-2))
mu_cols = trim_mean(a, trimming, axis=0).reshape((1,dim))
mu_rows = trim_mean(a, trimming, axis=1).reshape((dim,1))
if biascorr:
mu_cols *= n1/(n1 - 2)
mu_rows *= n1/(n1 - 2)
mu_cols = np.ones((dim, 1)).dot(mu_cols)
mu_rows = mu_rows.dot(np.ones((1, dim)))
elif center=='median':
mu = np.median(a.reshape((dim**2,1)))
mu_cols = np.median(a,axis=0).reshape((1,dim))
mu_rows = np.median(a,axis=1).reshape((dim,1))
mu_cols = np.ones((dim, 1)).dot(mu_cols)
mu_rows = mu_rows.dot(np.ones((1, dim)))
else:
raise(ValueError('Center should be mean or median'))
# Do one operation at a time, to improve broadcasting memory usage.
out -= mu_rows
out -= mu_cols
out += mu
if biascorr:
out[np.eye(dim, dtype=bool)] = 0
return out,n1
def distance_matrix_centered(x,**kwargs):
"""
Computes the trimmed double centered distance matrix of x.
Input :
x : input data as matrix.
kwargs :
trimming, float : trimming percentage to be used.
biascorr, bool : if True, bias correction is applied during double centering.
center, str : which location estimate to use for centering. either 'mean or 'median'
dmetric, str : which distance metric to use. Default is euclidean distance.
Output :
the trimmed double centered distance matrix of x
"""
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'center' not in kwargs:
center = 'mean'
else:
center = kwargs.get('center')
if 'dmetric' not in kwargs:
dmetric = 'euclidean'
else:
dmetric = kwargs.get('dmetric')
dx = spp.distance.squareform(spp.distance.pdist(x,metric=dmetric))
dmx, n1 = double_center_flex(dx,biascorr=biascorr,
trimming=trimming,center=center)
return dmx,n1
def distance_moment(dmx,dmy,**kwargs):
"""
Computes the trimmed distance comoment between x and y based on their distance matrices.
Input :
dmx : distance matrix of x
dmy : distance matrix of y
kwargs :
trimming, float : trimming percentage to be used.
biascorr, bool : if True, bias correction is applied during double centering.
center, str : which location estimate to use for centering. either 'mean or 'median'
dmetric, str : which distance metric to use. Default is euclidean distance.
order, int : order of the comoment to be computed, default is 2 for covariance.
option, int : option to be used during the computation.
Output :
The trimmed distance comoment between x and y
"""
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'center' not in kwargs:
center = 'mean'
else:
center = kwargs.get('center')
if 'order' not in kwargs:
order = 2
else:
order = kwargs.get('order')
if order > 2:
if 'option' not in kwargs:
option = 1
else:
option = kwargs.get('option')
iter_stop_2 = option
iter_stop_1 = order - option
else:
option = 0
iter_stop_1 = 1
iter_stop_2 = 1
nx = dmx.shape[0]
ny = dmy.shape[0]
if nx!=ny:
raise(ValueError)
if biascorr:
if trimming == 0:
n1 = nx
elif 'n1' not in kwargs:
raise(MyException('n1 needs to be provided when correcting for bias'))
else:
n1 = kwargs.get('n1')
corr4bias = n1**2/(n1*(n1-3))
else:
corr4bias = 1
if order>2:
i = 1
while i < iter_stop_1:
dmx *= dmx
i += 1
i = 1
while i < iter_stop_2:
dmy *= dmy
i += 1
if center=='mean':
moment = trim_mean((dmx*dmy).reshape((nx**2,1)),trimming)
moment *= corr4bias
moment = moment[0]
moment = (-1)**order*abs(moment)**(1/order)
elif center=='median':
moment = np.median(dmx*dmy)
return(moment)
def difference_divergence(X,Y,**kwargs):
"""
This function computes the (U)Martingale Difference Divergence of Y given X.
input :
X : A matrix or data frame, where rows represent samples, and columns represent variables.
Y : The response variable or matrix.
biascorr, bool : if True, uses U centering to produce an unbiased estimator of MDD
output:
returns the squared martingale difference divergence of Y given X.
"""
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'center' not in kwargs:
center = 'mean'
else:
center = kwargs.get('center')
if 'dmetric' not in kwargs:
dmetric = 'euclidean'
else:
dmetric = kwargs.get('dmetric')
A, Adim = distance_matrix_centered(X,biascorr=biascorr,trimming=trimming,center=center)
dy= spp.distance.squareform(spp.distance.pdist(Y.reshape(-1, 1),metric=dmetric)**2)
B,Bdim = double_center_flex(0.5*dy,biascorr=biascorr,trimming=trimming,center=center)
if biascorr:
return(U_inner(A,B,trimming))
else:
return(D_inner(A,B,trimming))
def U_inner(X,Y,trimming=0):
"""
Computes the inner product in the space of U centered matrices, between matrices X and Y.
The matrices have to be square matrices.
"""
nx = X.shape[0]
ny = Y.shape[0]
if nx != ny:
raise(MyException('Please feed x and y data of equal length'))
#((1/(nx*(nx-3))) *(np.sum(arr)))
arr= np.multiply(X,Y)
arr=arr.flatten()
lowercut = int(trimming * (nx**2))
uppercut = (nx**2) - lowercut
atmp = np.partition(arr, (lowercut, uppercut - 1), axis=0)
sl = [slice(None)] * atmp.ndim
sl[0] = slice(lowercut, uppercut)
res= atmp[tuple(sl)]
n = np.sqrt(len(res))
return((1/(n*(n-3)))*np.sum(atmp[tuple(sl)], axis=0))
def D_inner(X,Y,trimming=0):
"""
Computes the inner product in the space of D centered matrices, between Double centered matrices X and Y.
The matrices have to be square matrices.
"""
nx = X.shape[0]
ny = Y.shape[0]
if nx != ny:
raise(MyException('Please feed x and y data of equal length'))
#arr= (1/(nx*nx))*np.multiply(X,Y)
arr= np.multiply(X,Y)
arr=arr.flatten()
lowercut = int(trimming * (nx**2))
uppercut = (nx**2) - lowercut
atmp = np.partition(arr, (lowercut, uppercut - 1), axis=0)
sl = [slice(None)] * atmp.ndim
sl[0] = slice(lowercut, uppercut)
res= atmp[tuple(sl)]
n = np.sqrt(len(res))
return((1/(n*n))*np.sum(res, axis=0))
def MDDM(X,Y):
"""Computes the MDDM(Y|X)
for more details, see the article by <NAME> & <NAME>;
Martingale Difference Divergence Matrix and Its
Application to Dimension Reduction for Stationary
Multivariate Time Series;
Journal of the American Statistical Association; 2018;521;
216--229
Input:
X --- ndarray of shape (n,p)
Y --- ndarray of shape(n,q)
Output:
MDDM(Y|X)
"""
if X.shape[0] != Y.shape[0]:
raise(MyException('Please feed x and y data of equal length'))
n,q = Y.shape
n,p = X.shape
MDDM = np.zeros((q,q))
Y_mean = np.mean(Y,axis=0).reshape(1,-1)
Y_center = Y - np.matmul(np.ones((n,1)),Y_mean)
for i in range(n):
if(p==1):
X_dist = np.abs(X[i]-X)
else:
X_diff= (( X.T - np.vstack(X[i,:])).T)**2
X_sum = np.sum(X_diff,axis=1)
X_dist = np.sqrt(X_sum).reshape(-1,n)
MDDM = MDDM + np.matmul(Y_center[i,:].reshape(q,-1), np.matmul(X_dist,Y_center))
MDDM = (-MDDM)/(n**2)
return(MDDM)
| [
"numpy.mean",
"numpy.multiply",
"numpy.eye",
"numpy.median",
"numpy.ones",
"numpy.abs",
"numpy.power",
"numpy.sqrt",
"scipy.spatial.distance.pdist",
"numpy.size",
"numpy.partition",
"numpy.sum",
"numpy.zeros",
"scipy.stats.trim_mean",
"numpy.matmul",
"numpy.vstack",
"copy.deepcopy",
... | [((4125, 4141), 'copy.deepcopy', 'copy.deepcopy', (['a'], {}), '(a)\n', (4138, 4141), False, 'import copy\n'), ((4157, 4170), 'numpy.size', 'np.size', (['a', '(0)'], {}), '(a, 0)\n', (4164, 4170), True, 'import numpy as np\n'), ((10938, 10955), 'numpy.multiply', 'np.multiply', (['X', 'Y'], {}), '(X, Y)\n', (10949, 10955), True, 'import numpy as np\n'), ((11061, 11112), 'numpy.partition', 'np.partition', (['arr', '(lowercut, uppercut - 1)'], {'axis': '(0)'}), '(arr, (lowercut, uppercut - 1), axis=0)\n', (11073, 11112), True, 'import numpy as np\n'), ((11706, 11723), 'numpy.multiply', 'np.multiply', (['X', 'Y'], {}), '(X, Y)\n', (11717, 11723), True, 'import numpy as np\n'), ((11829, 11880), 'numpy.partition', 'np.partition', (['arr', '(lowercut, uppercut - 1)'], {'axis': '(0)'}), '(arr, (lowercut, uppercut - 1), axis=0)\n', (11841, 11880), True, 'import numpy as np\n'), ((12699, 12715), 'numpy.zeros', 'np.zeros', (['(q, q)'], {}), '((q, q))\n', (12707, 12715), True, 'import numpy as np\n'), ((642, 663), 'numpy.mean', 'np.mean', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (649, 663), True, 'import numpy as np\n'), ((689, 726), 'scipy.stats.trim_mean', 'sps.trim_mean', (['x', 'trimming'], {'axis': 'axis'}), '(x, trimming, axis=axis)\n', (702, 726), True, 'import scipy.stats as sps\n'), ((6437, 6474), 'scipy.spatial.distance.pdist', 'spp.distance.pdist', (['x'], {'metric': 'dmetric'}), '(x, metric=dmetric)\n', (6455, 6474), True, 'import scipy.spatial as spp\n'), ((12026, 12045), 'numpy.sum', 'np.sum', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (12032, 12045), True, 'import numpy as np\n'), ((2814, 2839), 'numpy.power', 'np.power', (['xc', 'iter_stop_1'], {}), '(xc, iter_stop_1)\n', (2822, 2839), True, 'import numpy as np\n'), ((2857, 2882), 'numpy.power', 'np.power', (['yc', 'iter_stop_2'], {}), '(yc, iter_stop_2)\n', (2865, 2882), True, 'import numpy as np\n'), ((4336, 4366), 'numpy.round', 'np.round', (['(dim * (1 - trimming))'], {}), '(dim * (1 - trimming))\n', (4344, 4366), True, 'import numpy as np\n'), ((4735, 4752), 'numpy.ones', 'np.ones', (['(1, dim)'], {}), '((1, dim))\n', (4742, 4752), True, 'import numpy as np\n'), ((5274, 5297), 'numpy.eye', 'np.eye', (['dim'], {'dtype': 'bool'}), '(dim, dtype=bool)\n', (5280, 5297), True, 'import numpy as np\n'), ((9099, 9119), 'numpy.median', 'np.median', (['(dmx * dmy)'], {}), '(dmx * dmy)\n', (9108, 9119), True, 'import numpy as np\n'), ((12728, 12746), 'numpy.mean', 'np.mean', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (12735, 12746), True, 'import numpy as np\n'), ((12789, 12804), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (12796, 12804), True, 'import numpy as np\n'), ((12879, 12895), 'numpy.abs', 'np.abs', (['(X[i] - X)'], {}), '(X[i] - X)\n', (12885, 12895), True, 'import numpy as np\n'), ((12996, 13018), 'numpy.sum', 'np.sum', (['X_diff'], {'axis': '(1)'}), '(X_diff, axis=1)\n', (13002, 13018), True, 'import numpy as np\n'), ((1048, 1074), 'scipy.stats.trim_mean', 'sps.trim_mean', (['x', 'trimming'], {}), '(x, trimming)\n', (1061, 1074), True, 'import scipy.stats as sps\n'), ((4674, 4691), 'numpy.ones', 'np.ones', (['(dim, 1)'], {}), '((dim, 1))\n', (4681, 4691), True, 'import numpy as np\n'), ((5016, 5033), 'numpy.ones', 'np.ones', (['(1, dim)'], {}), '((1, dim))\n', (5023, 5033), True, 'import numpy as np\n'), ((13151, 13178), 'numpy.matmul', 'np.matmul', (['X_dist', 'Y_center'], {}), '(X_dist, Y_center)\n', (13160, 13178), True, 'import numpy as np\n'), ((4845, 4865), 'numpy.median', 'np.median', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (4854, 4865), True, 'import numpy as np\n'), ((4900, 4920), 'numpy.median', 'np.median', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (4909, 4920), True, 'import numpy as np\n'), ((4955, 4972), 'numpy.ones', 'np.ones', (['(dim, 1)'], {}), '((dim, 1))\n', (4962, 4972), True, 'import numpy as np\n'), ((13039, 13053), 'numpy.sqrt', 'np.sqrt', (['X_sum'], {}), '(X_sum)\n', (13046, 13053), True, 'import numpy as np\n'), ((2918, 2947), 'numpy.multiply', 'np.multiply', (['factor1', 'factor2'], {}), '(factor1, factor2)\n', (2929, 2947), True, 'import numpy as np\n'), ((12951, 12969), 'numpy.vstack', 'np.vstack', (['X[i, :]'], {}), '(X[i, :])\n', (12960, 12969), True, 'import numpy as np\n')] |
from nutils import mesh, function, solver, util, export, cli, testing
import numpy as np, treelog
from CoolProp.CoolProp import PropsSI
import scipy.special as sc
from matplotlib import pyplot as plt
from scipy.stats import norm
from matplotlib import collections, colors
import pandas as pd
# import seaborn as sns
import matplotlib.pyplot as plt
import math
#################### Doublet model library #########################
#Objects
class Aquifer:
def __init__(self, aquifer):
#if stoichastic params not used
self.H = aquifer['H']
self.φ = aquifer['porosity']
self.K = aquifer['K']
self.Q = aquifer['Q'] # pumping rate from well (negative value = extraction)
#deterministic
self.dtop = aquifer['dtop'] # depth to top aquifer
self.dsensor = aquifer['dsensor'] # depth to esp sensor
self.dpump = aquifer['dpump'] # depth to pump location
self.labda = aquifer['labda'] # geothermal gradient
self.Tsur = aquifer['Tsurface']
self.ρf = self.rhof = aquifer['rhof']
self.rhos = aquifer['rhos']
self.cpf = aquifer['cpf']
self.cps = aquifer['cps'] # stone specific heat capacity (limestone) [J/kg K]
self.labdas = aquifer['labdas'] # thermal conductivity solid [W/mK]
self.labdaf = aquifer['labdaf'] # thermal conductivity fluid [W/mK]
self.mu = aquifer['viscosity']
self.pref = aquifer['pref'] # initial reservoir pressure [Pa]
self.Tref = aquifer['Tref'] # initial reservoir temperature [K]
self.rw = aquifer['rw'] # well radius [m]
self.rmax = aquifer['rmax'] # well radius of influence [m]
self.mdot = self.Q * aquifer['rhof']
self.D = 2 * aquifer['rw']
self.Aw = 2 * np.pi * aquifer['rw']
self.g = 9.81
self.L = aquifer['L'] # distance between injection well and production well
self.Tinj = aquifer['Tinj'] # initial temperature of injection well (reinjection temperature)
self.patm = aquifer['patm'] # atmospheric pressure
self.ε = aquifer['ε'] # tubing roughness [m]
self.ct = aquifer['ct']
# total system (rock + fluid) variable
self.ρ = self.φ * self.rhof + (1 - self.φ) * self.rhos
self.cp = self.φ * self.cpf + (1 - self.φ) * self.cps
self.λ = self.φ * self.labdaf + (1 - self.φ) * self.labdas
# class Well:
#
# def __init__(self, well, aquifer):
#
# self.Q = well['Q'] # pumping rate from well (negative value = extraction)
# self.mdot = self.Q * aquifer['rho_f']
# self.D = 2 * aquifer['rw']
# self.Aw = 2 * np.pi * aquifer['rw']
class DoubletGenerator:
"""Generates all properties for a doublet
Args:
"""
def __init__(self, aquifer, sol, params=None):
# Initialize deterministic parameters
self.aquifer = aquifer
self.time = 365*24*60*60 #1 year [s]
self.H = self.aquifer.H
self.Q = self.aquifer.Q
self.alpha = self.aquifer.labdas / ( self.aquifer.rhos * self.aquifer.cps) #thermal diffusion of rock
self.gamma = 0.577216 #euler constant
self.pnode9 = sol[0]
self.Tnode9 = sol[1]
self.Tinj = self.aquifer.Tinj * np.ones_like(self.Tnode9)
# if params:
# Stoichastic parameters with effect on well test
# self.params = params
# self.H = np.mean(params[0])
# self.Q = np.mean(params[4])
# Set lengths in system
self.lpipe = self.z = self.aquifer.dsensor
self.dpump = self.aquifer.dpump
# Set specs
self.effpump = 0.61 # Efficiency of pump [-]
self.eta = 0.61 # Efficiency of heat exchanger [-]
self.Ppump = 2.671e5/2 # Power of pump [W]
# Evaluate objects within doublet
self.T_aqinjector = self.Tinj
self.T_aqproducer = self._get_Tz(self.lpipe)
self.P_aqproducer = self._get_pgz(self.aquifer.patm, self.lpipe, self.T_aqproducer)
self.P_aqinjector = self._get_pgz(self.aquifer.patm, self.lpipe, self.Tinj)
self.ppump = self._get_ppump(self.Ppump, self.Q)
# Evaluate Tnodes within doublet
self.Tnode10 = self.T_aqproducer # Tref when based on depth of sensor
self.Tnode8 = self.get_Tnode8(self.Tnode9)
self.Tnode6 = self.Tnode7 = self.get_Tnode7(self.Tnode9)
self.Tnode4 = self.Tnode5 = self.Tinj
self.Tnode3 = self.get_Tnode3(self.Tnode4)
self.Tnode2 = self.get_Twinj(self.z - self.dpump, self.Tinj)
self.Tnode1 = self.T_aqproducer
# Evaluate pnodes within doublet
self.pnode10 = self.P_aqproducer # pref when based on depth
self.pnode8 = self.get_pnode8(self.pnode9)
self.pnode6 = self.pnode7 = self.get_pnode7(self.pnode8)
self.pnode4 = self.pnode5 = self.pnode6
self.pnode3 = self.get_pnode3(self.pnode4)
self.pnode2 = self.get_pnode2(self.pnode3)
self.pnode1 = self.P_aqinjector # pref when based on depth and injection temperature
# Calculate power output system
self.Phe = self.aquifer.mdot * self.aquifer.cpf * (self.Tnode6 - self.Tinj)
def get_Tw(self, dz, Tw):
Tw = Tw.copy()
dl = 10 # pipe segment [m]
zi = np.linspace(self.z, self.z - dz, dz/dl + 1)
for i in range(len(zi)-1):
Tw -= dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )
return Tw
def get_Twinj(self, dz, Tw):
Tw = Tw.copy()
dl = 10 # pipe segment [m]
zi = np.linspace(0, dz, dz/dl + 1)
for i in range(len(zi)-1):
Tw += dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )
return Tw
def _getqw(self, Tw, zi):
qw = 4 * math.pi * self.aquifer.labdas * ( Tw - self._get_Tz(zi) ) / math.log( ( 4 * self.alpha * self.time ) / (math.exp(self.gamma) * self.aquifer.rw**2 ) )
return qw
def get_Tnode8(self, Tnode9):
Tnode8 = self.get_Tw(self.z - self.dpump, Tnode9)
return Tnode8
def get_Tnode7(self, Tnode9):
Tnode7 = self.get_Tw(self.z, Tnode9)
return Tnode7
def get_Tnode3(self, Tnode4):
Tnode3 = self.get_Twinj(self.dpump, Tnode4)
return Tnode3
def get_Tnode2(self, Tnode4):
Tnode2 = self.get_Twinj(self.z, Tnode4)
return Tnode2
def get_pnode8(self, pnode9):
pnode8 = pnode9 - self._get_pgz(0, (self.z - self.dpump), self.Tnode9) - self._get_pfriction(self.z - self.dpump)
# print('loss of pressure by height', self._get_pgz(0, (self.z - self.dpump), self.Tnode9))
# print('loss of pressure by friction', self._get_pfriction(self.z - self.dpump))
return pnode8
def get_pnode7(self, pnode8):
pnode7 = pnode8 - self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) - self._get_pfriction(self.dpump) + self._get_ppump(self.Ppump, self.Q)
return pnode7
def get_pnode3(self, pnode4):
pnode3 = pnode4 + self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) + self._get_pfriction(self.dpump) #+ self._get_ppump(self.Ppump, self.Q)
return pnode3
def get_pnode2(self, pnode3):
pnode2 = pnode3 + self._get_pgz(0, (self.z - self.dpump), self.T_aqinjector) + self._get_pfriction(self.z - self.dpump)
return pnode2
def _get_ppump(self, Ppump, Q):
ppump = Ppump / (Q * self.effpump) # appropiate value is 20e5 Pa
# print('pump added pressure', ppump)
return ppump
def _get_pgz(self, patm, z, T):
""" Computes pressure of the aquifer as a function of the depth, temperature and pressure
Arguments:
z (float): depth (downwards from groundlevel is positive)
Returns:
p (float): value of pressure
"""
pgz = patm + self.aquifer.g * self.aquifer.rhof * z # density as a constant
# pgz = patm + self.aquifer.g * self.rho(np.mean(T)-273, pgz) * z # density as a function of temperature and pressure
return pgz
def _get_pfriction(self, z):
pfriction = (self._get_f() * self.aquifer.rhof * self.get_vmean(self.Q) * z) / 2 * self.aquifer.D
return pfriction
def _get_f(self):
f = ( 1.14 - 2 * math.log10( self.aquifer.ε / self.aquifer.D + 21.25 / ( self.get_Re( self.get_vmean(self.Q) )**0.9 ) ) )**-2
return f
def get_vmean(self, Q):
vmean = 4 * Q / ( math.pi * ( self.aquifer.D ** 2 ) )
return vmean
def get_Re(self, vmean):
Re = ( self.aquifer.rhof * vmean ) / self.aquifer.mu
return Re
# Theis solution, temperature and pressure as a function of depth
# def _get_P_wb(self, P_aquifer, T_aquifer):
# """ Computes pressure at wellbore
#
# Arguments:
# d (float): depth (downwards from groundlevel is positive)
# Returns:
# P_wb (float): value of pressure at well bore
# """
# if P_aquifer == self.P_aqproducer:
# Q = -self.Q
# else:
# Q = self.Q
#
# P_wb = P_aquifer + ( ( Q * self.mu(T_aquifer, P_aquifer) ) / ( 2 * math.pi * self.aquifer.K * self.aquifer.H ) ) * np.log ( self.aquifer.L / self.aquifer.rw)
# return P_wb
def _get_Tz(self, z):
""" Computes temperature of the aquifer as a function of the depth
Arguments:
z (float): depth (downwards from groundlevel is positive)
Returns:
T (float): value of temperature
"""
T = self.aquifer.Tsur + z * self.aquifer.labda
return T
# Thermophysical properties
def rho(self, Twater, Pwater):
# rho = (1 + 10e-6 * (-80 * T - 3.3 * T**2 + 0.00175 * T**3 + 489 * p - 2 * T * p + 0.016 * T**2 * p - 1.3e-5 * T**3\
# * p - 0.333 * p**2 - 0.002 * T * p**2) )
rho = PropsSI('D', 'T', Twater, 'P', Pwater, 'IF97::Water')
# rho = self.aquifer.rhof * (1 - 3.17e-4 * (Twater - 298.15) - 2.56e-6 * (Twater - 298.15) ** 2)
return rho
def mu(self, Twater, Pwater):
# mu = 0.1 + 0.333 * saltcontent + (1.65 + 91.9 * saltcontent**3) * math.exp(-(0.42*(saltcontent**0.8 - 0.17)**2 + 0.045) * Twater**0.8)
mu = PropsSI('V', 'T', Twater, 'P', Pwater, 'IF97::Water')
return mu
## Graphical variables for GUI ##
# self.Dx = self.aquifer.L * 3 # domain of x
# self.Dy = - (2 * self.aquifer.dtop + self.aquifer.H) # domain of y
# self.Nx = 24 # number of nodes by x
# self.Ny = 10 # number of nodes by y
# self.nNodes = self.Nx * self.Ny # total number of nodes
# self.ne = (self.Nx - 1) * (self.Ny - 1)
# self.dx = self.Dx / self.Nx # segment length of x
# self.dy = self.Dy / self.Ny # segment length of y
# self.domain = np.array([self.dx, self.dy])
# self.x_grid, self.y_grid = self._make_grid()
# self.x_well, self.y_well = self._construct_well()
# self.nodes_grid = self._make_nodes_grid()
# self.coordinate_grid = self._make_coordinates_grid()
# self.P_grid = self._compute_P_grid()
# self.T_grid = self._compute_T_grid()
# def _get_gaussian_points
# def _compute_T_grid(self):
# T_grid = self._get_T(-self.y_grid)
# # P_grid[self.Ny/2][self.Nx/3] = self.P_wellbore
# # P_grid[5][16] = self.P_wellbore
# # P_grid[4][16] = self.P_wellbore
# T_grid[5][8] = self.Tinj
# T_grid[4][8] = self.Tinj
#
# return T_grid
# def _compute_P_grid(self):
# P_grid = self._get_P(-self.y_grid)
# # P_grid[self.Ny/2][self.Nx/3] = self.P_wellbore
# P_grid[5][16] = self.P_wellbore
# P_grid[4][16] = self.P_wellbore
# P_grid[5][8] = self.P_wellbore
# P_grid[4][8] = self.P_wellbore
#
# return P_grid
# def _make_nodes_grid(self):
# """ Compute a nodes grid for the doublet
#
# Returns:
# x_grid_nodes, y_grid_nodes (np.array): arrays of the domain in x and y direction
# """
# i = np.arange(0, self.Nx+1, 1)
# j = np.arange(0, -self.Ny-1, -1)
#
# i_coords, j_coords = np.meshgrid(i, j)
#
# nodes_grid = np.array([i_coords, j_coords])
#
# return nodes_grid
# def _make_coordinates_grid(self):
# coordinates_grid = self.nodes_grid
#
# coordinates_grid[0,:,:] = self.nodes_grid[0,:,:] * self.domain[0]
# coordinates_grid[1,:,:] = self.nodes_grid[1,:,:] * -self.domain[1]
#
# return coordinates_grid
# def _make_grid(self):
# """ Compute a cartesian grid for the doublet
#
# Returns:
# domain (np.array): array of the domain in x and y direction
# """
# x = np.linspace(0, self.aquifer.L * 3, self.Nx)
# y = np.linspace(0,- (2 * self.aquifer.dtop + self.aquifer.H) , self.Ny)
# x_grid, y_grid = np.meshgrid(x, y)
#
# return x_grid, y_grid
# def _construct_well(self):
# """ Compute two wells for the doublet
#
# Returns:
# x_well, y_well (np.array): array of the x and y of the well
# """
# # x = np.array([[self.aquifer.L * 5 - self.aquifer.L * 0.5], [self.aquifer.L * 5 + self.aquifer.L * 0.5]])
# # y = np.linspace(0,- (self.aquifer.dtop + self.aquifer.H) , (20 * self.Ny) - 1)
# x_well = np.array([[self.x_grid[0][math.floor(self.Nx/3)]], [self.x_grid[0][2*math.floor(self.Nx/3)]]])
# y_well = self.y_grid[math.floor(self.Ny/2)][0] * np.ones(2)
#
# return x_well, y_well
#Forward Analysis
def evaluateDoublet(doublet):
print("\r\n############## Analytical values model ##############\n"
"m_dot: ", doublet.aquifer.mdot, "Kg/s\n"
"ppump,p/i ", doublet.ppump/1e5, "Bar\n"
"pnode10/p_aq,p: ", doublet.pnode10/1e5, "Bar\n"
"pnode9/p_bh,p: ", doublet.pnode9/1e5, "Bar\n"
"pnode8/p_pu,p: ", doublet.pnode8/1e5, "Bar\n"
"pnode7/p_out,p: ", doublet.pnode7/1e5, "Bar\n"
"pnode6/p_in,HE: ", doublet.pnode6/1e5, "Bar\n"
"pnode5/p_out,HE: ", doublet.pnode5/1e5, "Bar\n"
"pnode2/p_bh,i: ", doublet.pnode2/1e5, "Bar\n"
"pnode1/p_aq,i: ", doublet.pnode1/1e5, "Bar\n"
"Tnode9/T_bh,p: ", doublet.Tnode9-273, "Celcius\n"
"Tnode8/T_pu,p: ", doublet.Tnode8-273, "Celcius\n"
"Tnode7/T_in,HE: ", doublet.Tnode7-273, "Celcius\n"
"Tnode6/T_in,HE: ", doublet.Tnode6-273, "Celcius\n"
"Tnode5/T_out,HE: ", doublet.Tnode5-273, "Celcius\n"
"Tnode4/T_in,i: ", doublet.Tnode4-273, "Celcius\n"
"Tnode3/T_pu,i: ", doublet.Tnode3-273, "Celcius\n"
"Tnode2/T_bh,i: ", doublet.Tnode2-273, "Celcius\n"
"Power,HE: ", doublet.Phe/1e6, "MW")
MPA = 1e6
pnodelist = [doublet.pnode2 / MPA, doublet.pnode3 / MPA, doublet.pnode4 / MPA, doublet.pnode5 / MPA,
doublet.pnode6 / MPA, doublet.pnode7 / MPA, doublet.pnode8 / MPA, doublet.pnode9 / MPA]
Tnodelist = [doublet.Tnode2, doublet.Tnode3, doublet.Tnode4, doublet.Tnode5, doublet.Tnode6, doublet.Tnode7,
doublet.Tnode8, doublet.Tnode9]
return pnodelist, Tnodelist
# ## Finite element thermo-hydraulic model
#
# def DoubletFlow(aquifer, well, doublet, k, porosity, timestep, endtime):
#
# # construct mesh
# nelemsX = 10
# nelemsY = 10
# vertsX = np.linspace(0, well.L, nelemsX + 1)
# vertsY = np.linspace(0, aquifer.H, nelemsY + 1)
# vertsZ = np.linspace(0, aquifer.H, nelemsY + 1)
# topo, geom = mesh.rectilinear([vertsX, vertsY])
# # topo = topo.withboundary(inner='left', outer='right')
#
# bezier = topo.sample('bezier', 3)
# points, vals = bezier.eval([geom, 0])
#
# # # plot
# # plt.figure(figsize=(10, 10))
# # cmap = colors.ListedColormap("limegreen")
# # plt.tripcolor(points[:, 0], points[:, 1], bezier.tri, vals, shading='gouraud', cmap=cmap)
# # ax = plt.gca()
# # ax.add_collection(collections.LineCollection(points[bezier.hull], colors='r', linewidth=2, alpha=1))
#
# # create namespace
# ns = function.Namespace()
# degree = 3
# ns.pbasis = topo.basis('std', degree=degree)
# ns.Tbasis = topo.basis('std', degree=degree - 1)
# ns.p = 'pbasis_n ?lhsp_n'
# ns.T = 'Tbasis_n ?lhsT_n'
# ns.x = geom
# ns.cf = aquifer.Cp_f
# ns.g = aquifer.g
# ns.g_i = '<0, -g>_i'
# ns.uinf = 1, 0
# ns.mdot = well.mdot
# ns.r = well.r
# ns.Awell = well.A_well
# ns.nyy = 0, 1
# ns.pout = doublet.P_aqproducer
# ns.p0 = ns.pout
# ns.Tatm = 20 + 273
# ns.Tin = doublet.well.Tinj
# ns.Tout = doublet.T_HE
# ns.T0 = doublet.T_HE
# ns.ρf = aquifer.rhof
# ns.ρ = ns.ρf #* (1 - 3.17e-4 * (ns.T - 298.15) - 2.56e-6 * (ns.T - 298.15)**2) #no lhsT in lhsp
# ns.lambdl = aquifer.labda_l #'thermal conductivity liquid [W/mK]'
# ns.lambds = aquifer.labda_s #'thermal conductivity solid [W/mK]'
# ns.qh = ns.lambds * aquifer.labda #heat source production rocks [W/m^2]
# k_int_x = k #'intrinsic permeability [m2]'
# k_int_y = k #'intrinsic permeability [m2]'
# k_int= (k_int_x,k_int_y)
# ns.k = (1/aquifer.mu)*np.diag(k_int)
# ns.k1 = k
# ns.u_i = '-k_ij (p_,j - (ρ g_1)_,j)' #darcy velocity
# ns.ur = '-k1 (p_,i)' #darcy velocity, but now simple
# ns.u0 = (ns.mdot / (ns.ρ * ns.Awell))
# ns.qf = -ns.u0
# ns.λ = porosity * ns.lambdl + (1 - porosity) * ns.lambds # heat conductivity λ [W/m/K]
# ns.porosity = porosity
# ns.w = math.sin()
# ns.Ar = aquifer.H * ns.w
#
# # define initial condition for mass balance and darcy's law
# sqr = topo.integral('(p - p0) (p - p0)' @ ns, degree=degree * 2) # set initial temperature to T=T0
# pdofs0 = solver.optimize('lhsp', sqr)
# statep0 = dict(lhsp=pdofs0)
#
# # define dirichlet constraints for hydraulic process
# sqrp = topo.boundary['right'].integral('(p - pout) (p - pout) d:x' @ ns, degree=degree * 2) # set outflow condition to p=p_out
# consp = solver.optimize('lhsp', sqrp, droptol=1e-15)
# # consp = dict(lhsp=consp)
#
# # formulate hydraulic process single field
# resp = topo.integral('(u_i porosity pbasis_n,i) d:x' @ ns, degree=degree*2) # formulation of velocity
# resp -= topo.boundary['left'].integral('pbasis_n qf d:x' @ ns, degree=degree*2) # set inflow boundary to q=u0
# resp += topo.boundary['top,bottom'].integral('(pbasis_n u_i n_i) d:x' @ ns, degree=degree*2) #neumann condition
# pinertia = topo.integral('ρ pbasis_n,i u_i porosity d:x' @ ns, degree=degree*4)
#
# # solve for transient state of pressure
# # lhsp = solver.solve_linear('lhsp', resp, constrain=consp)
#
# # introduce temperature dependent variables
# ns.ρ = ns.ρf * (1 - 3.17e-4 * (ns.T - 298.15) - 2.56e-6 * (ns.T - 298.15)**2)
# ns.lambdl = 4187.6 * (-922.47 + 2839.5 * (ns.T / ns.Tatm) - 1800.7 * (ns.T / ns.Tatm)**2 + 525.77*(ns.T / ns.Tatm)**3 - 73.44*(ns.T / ns.Tatm)**4)
# # ns.cf = 3.3774 - 1.12665e-2 * ns.T + 1.34687e-5 * ns.T**2 # if temperature above T=100 [K]
#
# # define initial condition for thermo process
# sqr = topo.integral('(T - T0) (T - T0)' @ ns, degree=degree * 2) # set initial temperature to T=T0
# Tdofs0 = solver.optimize('lhsT', sqr)
# stateT0 = dict(lhsT=Tdofs0)
#
# # define dirichlet constraints for thermo process
# sqrT = topo.boundary['left'].integral('(T - Tin) (T - Tin) d:x' @ ns, degree=degree*2) # set temperature injection pipe to T=Tin
# # sqrT = topo.boundary['left, bottom, top'].integral('(T - T0) (T - T0) d:x' @ ns, degree=degree*2) #set bottom temperature T=T0
# consT = solver.optimize('lhsT', sqrT, droptol=1e-15)
# consT = dict(lhsT=consT)
#
# # formulate thermo process
# resT = topo.integral('(ρ cf Tbasis_n (u_k T)_,k ) d:x' @ ns, degree=degree*2) # formulation of convection of energy
# resT -= topo.integral('Tbasis_n,i (- λ) T_,i d:x' @ ns, degree=degree*2) # formulation of conductive heat flux
# resT -= topo.boundary['top,bottom'].integral('Tbasis_n qh d:x' @ ns, degree=degree*2) # heat flux on boundary
# # resT -= topo.integral('Tbasis_n qh d:x' @ ns, degree=degree*2) # heat source/sink term within domain
# Tinertia = topo.integral('ρ cf Tbasis_n T d:x' @ ns, degree=degree*4)
#
# def make_plots():
# fig, ax = plt.subplots(2)
#
# ax[0].set(xlabel='X [m]', ylabel='Pressure [Bar]')
# ax[0].set_ylim([min(p/1e5), doublet.P_aqproducer/1e5])
# # ax[0].set_xlim([0, 1000])
# print("wellbore pressure", p[0])
# print("pressure difference", p[0] - doublet.P_aqproducer)
# ax[0].plot(x[:, 0].take(bezier.tri.T, 0), (p/1e5).take(bezier.tri.T, 0))
#
# # ax[1].set(xlabel='X [m]', ylabel='Temperature [Celcius]')
# # ax[1].plot(x[:,0].take(bezier.tri.T, 0), T.take(bezier.tri.T, 0)-273)
#
# fig, axs = plt.subplots(3, sharex=True, sharey=True)
# fig.suptitle('2D Aquifer')
#
# plot0 = axs[0].tripcolor(x[:, 0], x[:, 1], bezier.tri, p / 1e5, vmin=min(p/1e5), vmax=doublet.P_aqproducer/1e5, shading='gouraud', rasterized=True)
# fig.colorbar(plot0, ax=axs[0], label="Darcy p [Bar]")
#
# plot1 = axs[1].tripcolor(x[:, 0], x[:, 1], bezier.tri, u[:, 0], vmin=0, vmax=0.05, shading='gouraud',
# rasterized=True)
# fig.colorbar(plot1, ax=axs[1], label="Darcy Ux [m/s]")
# plt.xlabel('x')
# plt.ylabel('z')
#
# # plot2 = axs[2].tripcolor(x[:, 0], x[:, 1], bezier.tri, T-273, shading='gouraud', rasterized=True)
# # fig.colorbar(plot2, ax=axs[2], label="T [C]")
#
# plt.show()
#
# # Time dependent pressure development
#
# bezier = topo.sample('bezier', 5)
# with treelog.iter.plain(
# 'timestep', solver.impliciteuler(('lhsp'), residual=resp, inertia=pinertia,
# arguments=statep0, timestep=timestep, constrain=consp,
# newtontol=1e-2)) as steps:
# #arguments=dict(lhsp=lhsp, lhsT=Tdofs0)
#
# for istep, lhsp in enumerate(steps):
#
# time = istep * timestep
# # x, u, p, T = bezier.eval(['x_i', 'u_i', 'p', 'T'] @ ns, **state)
# x, p, u = bezier.eval(['x_i', 'p', 'u_i'] @ ns, lhsp=lhsp)
#
# if time >= endtime:
# print(len(x[:, 0]), len(p))
#
# make_plots()
# break
#
# # Time dependent heat transport process
# bezier = topo.sample('bezier', 5)
# with treelog.iter.plain(
# 'timestep', solver.impliciteuler(('lhsT'), residual=resT, inertia=Tinertia,
# arguments=dict(lhsp=lhsp, lhsT=Tdofs0), timestep=timestep, constrain=consT,
# newtontol=1e-2)) as steps:
#
# for istep, lhsT in enumerate(steps):
#
# time = istep * timestep
# # x, u, p, T = bezier.eval(['x_i', 'u_i', 'p', 'T'] @ ns, **state)
# x, p, u, T = bezier.eval(['x_i', 'p', 'u_i', 'T'] @ ns, lhsp=lhsp, lhsT=lhsT)
#
# if time >= endtime:
# print(len(x[:,0]), len(T))
#
# make_plots()
# break
#
# bar = 1e5
# p_inlet = p[0]/bar
# T_prod = T[-1]
#
# return p_inlet, T_prod
#
# # solve for steady state of temperature
# # lhsT = solver.newton('lhsT', resT, constrain=consT, arguments=dict(lhsp=lhsp)).solve(tol=1e-2)
#
#
# #################
# # Postprocessing
# #################
#
# # bezier = topo.sample('bezier', 5)
# # # x, p, u = bezier.eval(['x_i', 'p', 'u_i'] @ ns, lhsp=lhsp)
# # x, p, u, T = bezier.eval(['x_i', 'p', 'u_i', 'T'] @ ns, lhsp=lhsp, lhsT=lhsT)
#
# def add_value_to_plot():
# for i, j in zip(x[:,0], x[:,1]):
# for index in range(len(T)):
# print(T[index], index)
# # axs[2].annotate(T[index], xy=(i, j))
#
# # add_value_to_plot()
# # fig, ax = plt.subplots(4)
# # density = 'True'
# #
# # ax[0].plot(x1,frozen_lognorm.pdf(x1)*(max(x1)-min(x1)))
# # # ax[0].hist(permeability, bins=bin_centers1, density=density, histtype='stepfilled', alpha=0.2)
# # ax[0].set(xlabel='Permeability K [m/s]', ylabel='Probability')
# # ax[0].axvline(x=2.2730989084434785e-08)
# #
# # ax[1].plot(x2, frozen_norm_por.pdf(x2)*(max(x2)-min(x2)))
# # # ax[1].hist(porosity, bins=bin_centers2, density=density, histtype='stepfilled', alpha=0.2)
# # ax[1].set(xlabel='Porosity [-]', ylabel='Probability')
# # ax[1].axvline(x=0.163)
# #
# # ax[2].hist(p_inlet, density=density, bins=50, histtype='stepfilled', alpha=0.2)
# # mu_p = np.mean(p_inlet)
# # # print(mu_p)
# # stddv_p = np.var(p_inlet)**0.5
# # # print(stddv_p)
# # frozen_norm_p = stats.norm(loc=mu_p, scale=stddv_p)
# # x3 = np.linspace(mu_p-3*stddv_p, mu_p+3*stddv_p, 10)
# # # print(frozen_norm_p.pdf(x3))
# # # ax[2].plot(x3,frozen_lognorm_p.pdf(x3))
# # ax[2].plot(x3,frozen_norm_p.pdf(x3))
# # # ax[2].xaxis.set_major_locator(MaxNLocator(integer=True))
# # ax[2].get_xaxis().get_major_formatter().set_useOffset(False)
# # ax[2].set(xlabel='Injector Pressure [Bar]', ylabel='Probability')
# # # plt.xlabel('Inlet Pressure [Bar]')
# # # plt.ylabel('Probability')
# #
# # ax[3].hist(T_prod, density=density, bins=50, histtype='stepfilled', alpha=0.2)
# # mu_T = np.mean(T_prod)
# # stddv_T = np.var(T_prod)**0.5
# # frozen_norm_T = stats.norm(loc=mu_T, scale=stddv_T)
# # x4 = np.linspace(mu_T-3*stddv_T, mu_T+3*stddv_T, 10)
# # # print(frozen_norm_p.pdf(x4))
# # ax[3].plot(x4,frozen_norm_T.pdf(x4))
# # ax[3].set(xlabel='Producer Temperature [Celcius]', ylabel='Probability')
# #
# # # print(ns.u0.eval())
# # # print("velocity horizontal", (u[:,0]))
# # # print((p[0]))
# # plt.subplots_adjust(hspace=1)
# # # plt.show()
# #
# # Confidence_mu = 0.95
# # N_min = (norm.ppf((1 + Confidence_mu)/2) / (1 - Confidence_mu))**2 * (stddv_p / mu_p)**2
# # print("Cdf", norm.ppf((1 + Confidence_mu)/2))
# # print("N_min", N_min)
#
# # fig1, ax1 = plt.subplots(2)
#
# # import numpy as np
# # from scipy import stats
#
# # sns.set(color_codes=True)
#
# # x = np.random.normal(size=100)
# # sns.distplot(x);
# #
# # mean, cov = [0, 1], [(1, .5), (.5, 1)]
# # data = np.random.multivariate_normal(mean, cov, 200)
# # df = pd.DataFrame(data, columns=["x1", "x2"])
# # sns.jointplot(x="x1", y="x2", data=df);
#
# # f, ax = plt.subplots(figsize=(6, 6))
# # sns.kdeplot(x1, x2, ax=ax)
# # sns.rugplot(x1, color="g", ax=ax)
# # sns.rugplot(x2, vertical=True, ax=ax);
#
# # fig1.suptitle('2D Probability plot')
# # triang = tri.Triangulation(x1, x2)
#
# # plot1 = ax1[0].tripcolor(x1, x2, triang, frozen_lognorm.pdf(x1)+frozen_norm_por.pdf(x2), shading='gouraud', rasterized=True)
# # fig1.colorbar(plot1, ax=ax1[0], label="Probability [x]")
#
# # Z = frozen_lognorm.pdf(x1)*frozen_norm_por.pdf(x2)
# # print("permeability", len(x1))
# # print("porosity", len(x2))
# # print("dit is Z", len(Z))
# # fig1, ax1 = plt.subplots()
# # CS = ax1.contour(x1, x2, Z)
# # ax1.clabel(CS, inline=1, fontsize=10)
# # # ax1.set_title('Simplest default with labels')
# #
# # plt.show()
| [
"CoolProp.CoolProp.PropsSI",
"numpy.linspace",
"numpy.ones_like",
"math.exp"
] | [((5426, 5471), 'numpy.linspace', 'np.linspace', (['self.z', '(self.z - dz)', '(dz / dl + 1)'], {}), '(self.z, self.z - dz, dz / dl + 1)\n', (5437, 5471), True, 'import numpy as np, treelog\n'), ((5718, 5749), 'numpy.linspace', 'np.linspace', (['(0)', 'dz', '(dz / dl + 1)'], {}), '(0, dz, dz / dl + 1)\n', (5729, 5749), True, 'import numpy as np, treelog\n'), ((10064, 10117), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['"""D"""', '"""T"""', 'Twater', '"""P"""', 'Pwater', '"""IF97::Water"""'], {}), "('D', 'T', Twater, 'P', Pwater, 'IF97::Water')\n", (10071, 10117), False, 'from CoolProp.CoolProp import PropsSI\n'), ((10436, 10489), 'CoolProp.CoolProp.PropsSI', 'PropsSI', (['"""V"""', '"""T"""', 'Twater', '"""P"""', 'Pwater', '"""IF97::Water"""'], {}), "('V', 'T', Twater, 'P', Pwater, 'IF97::Water')\n", (10443, 10489), False, 'from CoolProp.CoolProp import PropsSI\n'), ((3384, 3409), 'numpy.ones_like', 'np.ones_like', (['self.Tnode9'], {}), '(self.Tnode9)\n', (3396, 3409), True, 'import numpy as np, treelog\n'), ((6044, 6064), 'math.exp', 'math.exp', (['self.gamma'], {}), '(self.gamma)\n', (6052, 6064), False, 'import math\n')] |
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import sys
from scipy import interpolate
from os import makedirs
from os.path import exists
from vampy import vamplot
from vampy import utils
plt.rcParams['axes.labelsize'] = 9
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['legend.fontsize'] = 9
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = ['Arial']
def main(param):
# read config file
f, a, s = utils.read_config(param)
data_dir = f['data_dir']
plot_dir = f['plot_dir']
suffix = f['run_id']
T = s['T']
tc = s['tc']
tf = T*tc
Ru = a['Ru']
depth = a['depth']
if not exists("%s/%s" % (plot_dir, suffix)):
makedirs("%s/%s" % (plot_dir, suffix))
#pos = 0
if depth == 1:
length = 1
else:
length = len(Ru)
for pos in range(0,length,1):
if type(a['Ru']) is float:
L = a['Ru']*a['lam']
else:
L = a['Ru'][pos]*a['lam'][pos]
P = np.loadtxt("%s/%s/p%d_%s.csv" % (data_dir, suffix, pos, suffix), delimiter=',')
U = np.loadtxt("%s/%s/u%d_%s.csv" % (data_dir, suffix, pos, suffix), delimiter=',')
t = np.linspace(tf-T, tf, P.shape[1])
x = np.linspace(0,L,P.shape[0])
f = interpolate.interp2d(t, x, P, kind='linear')
g = interpolate.interp2d(t, x, U, kind='linear')
x = np.linspace(0, L, len(t))
P = f(t, x)
U = g(t, x)
WIDTH = 510 # the number latex spits out
FACTOR = 1.0 # the fraction of the width you'd like the figure to occupy
fig_width_pt = WIDTH * FACTOR
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
fig_dims = [fig_width_in, fig_height_in] # fig dims as a list
vamplot.p3d_plot(fig_dims, t, P, L, pos, suffix, plot_dir)
vamplot.q3d_plot(fig_dims, t, U, L, pos, suffix, plot_dir)
if __name__ == "__main__":
script, param = sys.argv
main(param)
| [
"os.path.exists",
"numpy.sqrt",
"os.makedirs",
"vampy.utils.read_config",
"vampy.vamplot.p3d_plot",
"numpy.linspace",
"vampy.vamplot.q3d_plot",
"numpy.loadtxt",
"scipy.interpolate.interp2d"
] | [((535, 559), 'vampy.utils.read_config', 'utils.read_config', (['param'], {}), '(param)\n', (552, 559), False, 'from vampy import utils\n'), ((756, 792), 'os.path.exists', 'exists', (["('%s/%s' % (plot_dir, suffix))"], {}), "('%s/%s' % (plot_dir, suffix))\n", (762, 792), False, 'from os.path import exists\n'), ((802, 840), 'os.makedirs', 'makedirs', (["('%s/%s' % (plot_dir, suffix))"], {}), "('%s/%s' % (plot_dir, suffix))\n", (810, 840), False, 'from os import makedirs\n'), ((1115, 1194), 'numpy.loadtxt', 'np.loadtxt', (["('%s/%s/p%d_%s.csv' % (data_dir, suffix, pos, suffix))"], {'delimiter': '""","""'}), "('%s/%s/p%d_%s.csv' % (data_dir, suffix, pos, suffix), delimiter=',')\n", (1125, 1194), True, 'import numpy as np\n'), ((1207, 1286), 'numpy.loadtxt', 'np.loadtxt', (["('%s/%s/u%d_%s.csv' % (data_dir, suffix, pos, suffix))"], {'delimiter': '""","""'}), "('%s/%s/u%d_%s.csv' % (data_dir, suffix, pos, suffix), delimiter=',')\n", (1217, 1286), True, 'import numpy as np\n'), ((1299, 1334), 'numpy.linspace', 'np.linspace', (['(tf - T)', 'tf', 'P.shape[1]'], {}), '(tf - T, tf, P.shape[1])\n', (1310, 1334), True, 'import numpy as np\n'), ((1345, 1374), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'P.shape[0]'], {}), '(0, L, P.shape[0])\n', (1356, 1374), True, 'import numpy as np\n'), ((1385, 1429), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['t', 'x', 'P'], {'kind': '"""linear"""'}), "(t, x, P, kind='linear')\n", (1405, 1429), False, 'from scipy import interpolate\n'), ((1442, 1486), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['t', 'x', 'U'], {'kind': '"""linear"""'}), "(t, x, U, kind='linear')\n", (1462, 1486), False, 'from scipy import interpolate\n'), ((2101, 2159), 'vampy.vamplot.p3d_plot', 'vamplot.p3d_plot', (['fig_dims', 't', 'P', 'L', 'pos', 'suffix', 'plot_dir'], {}), '(fig_dims, t, P, L, pos, suffix, plot_dir)\n', (2117, 2159), False, 'from vampy import vamplot\n'), ((2168, 2226), 'vampy.vamplot.q3d_plot', 'vamplot.q3d_plot', (['fig_dims', 't', 'U', 'L', 'pos', 'suffix', 'plot_dir'], {}), '(fig_dims, t, U, L, pos, suffix, plot_dir)\n', (2184, 2226), False, 'from vampy import vamplot\n'), ((1807, 1817), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (1814, 1817), True, 'import numpy as np\n')] |
import math
import numpy as np
class Robot:
def __init__(self, wheel_base, track_width, wheel_radius, max_v, max_w):
# w1<--track width--> w2
# ^ |
# | |
# wb |
# | |
# v |
# w3 ---------------- w4
self.wheel_base = wheel_base
self.track_width = track_width
self.wheel_radius = wheel_radius
self.max_v = max_v
self.max_w = max_w
wb = self.wheel_base/2.0
tw = self.track_width/2.0
r = self.wheel_radius
T = np.array([[1,-1,-(tw+wb)],
[1,1,(tw+wb)],
[1,1,-(tw+wb)],
[1,-1,(tw+wb)]])
self.inverse_transform_matrix=(1/r)*T
self.max_wheel_speed = max(abs(np.matmul(self.inverse_transform_matrix, np.array([[1.0],[1.0],[0.0]]))))
def compute_motor_velocities(input,robot,max_value=255):
motor_velocities = np.zeros(4)
if (len(input)<3):
return motor_velocities
robot_velocity = np.array([[input[0]],[input[1]],[input[2]]])
raw_velocities = np.matmul(robot.inverse_transform_matrix,robot_velocity)
if (max(raw_velocities) == 0.0):
return motor_velocities
sum =0
for i in raw_velocities:
sum = sum + abs(i)
for i in range(len(raw_velocities)):
motor_velocities[i] = raw_velocities[i]*max_value/robot.max_wheel_speed
return motor_velocities
| [
"numpy.array",
"numpy.zeros",
"numpy.matmul"
] | [((1016, 1027), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1024, 1027), True, 'import numpy as np\n'), ((1104, 1150), 'numpy.array', 'np.array', (['[[input[0]], [input[1]], [input[2]]]'], {}), '([[input[0]], [input[1]], [input[2]]])\n', (1112, 1150), True, 'import numpy as np\n'), ((1170, 1227), 'numpy.matmul', 'np.matmul', (['robot.inverse_transform_matrix', 'robot_velocity'], {}), '(robot.inverse_transform_matrix, robot_velocity)\n', (1179, 1227), True, 'import numpy as np\n'), ((631, 721), 'numpy.array', 'np.array', (['[[1, -1, -(tw + wb)], [1, 1, tw + wb], [1, 1, -(tw + wb)], [1, -1, tw + wb]]'], {}), '([[1, -1, -(tw + wb)], [1, 1, tw + wb], [1, 1, -(tw + wb)], [1, -1,\n tw + wb]])\n', (639, 721), True, 'import numpy as np\n'), ((898, 929), 'numpy.array', 'np.array', (['[[1.0], [1.0], [0.0]]'], {}), '([[1.0], [1.0], [0.0]])\n', (906, 929), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
An Eye Tracker can get landmarks of the eyes from an image tensor.
"""
import cv2 as cv
import numpy as np
from config import ConfigOptionMetadata, ConfigOptionPackage
from tracking.eye_tracking import EyeTrackerInput
from tracking.eye_tracking.eye_tracking import EyeTracker
class InfraredEyeTrackerCOP(ConfigOptionPackage):
@staticmethod
def get_options_metadata() -> list:
return [
ConfigOptionMetadata(int, 'eye_tracking_threshold', 88,
'The eye tracking threshold if infrared tracking is used.'),
]
class InfraredEyeTracker(EyeTracker):
@staticmethod
def get_required_option_packages() -> list:
packages = super(InfraredEyeTracker, InfraredEyeTracker).get_required_option_packages()
packages.extend([InfraredEyeTrackerCOP])
return packages
def __init__(self, config):
super().__init__(config)
self.eye_tracking_threshold = config['eye_tracking_threshold']
def track_landmarks(self, input_data: EyeTrackerInput):
super(InfraredEyeTracker, self).track_landmarks(input_data)
self.input = input_data
image = np.copy(self.input.image)
bbox_eye_left = self.input.bbox_eye_left
bbox_eye_right = self.input.bbox_eye_right
x = int(bbox_eye_left["x"])
y = int(bbox_eye_left["y"])
w = int((bbox_eye_right["x"] + bbox_eye_right["width"]) - bbox_eye_left["x"])
h = int(
bbox_eye_left["height"] if bbox_eye_left["height"] > bbox_eye_right["height"] else bbox_eye_right["height"])
image = cv.equalizeHist(image)
roi = image[y:y + h, x:x + w]
image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
cv.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi[:, int(w / 3):int(w / 3) * 2] = 255
# print_numpy(roi, True, True)
roi = cv.GaussianBlur(roi, (11, 11), 0)
thresh = self.eye_tracking_threshold
_, roi = cv.threshold(roi, thresh, 255, cv.THRESH_BINARY_INV)
kernel = np.ones((5, 5), np.uint8)
roi = cv.dilate(roi, kernel, iterations=2)
roi_left = roi[:, 0:int(w / 2)]
roi_right = roi[:, int(w / 2):w]
roi = cv.cvtColor(roi, cv.COLOR_GRAY2BGR)
x1 = 0
y1 = 0
x2 = 0
y2 = 0
contours, _ = cv.findContours(roi_left, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=lambda x: cv.contourArea(x), reverse=True)
for cnt in contours:
(x1, y1, w1, h1) = cv.boundingRect(cnt)
cv.rectangle(roi, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
y1 += y + int(h1 / 2)
x1 += x + w1 - 15 # *2
image[y1 - 3:y1 + 3, x1 - 3:x1 + 3] = np.array([0, 255, 0])
break
contours, _ = cv.findContours(roi_right, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=lambda x: cv.contourArea(x), reverse=True)
offset_w = int(w / 2)
for cnt in contours:
(x2, y2, w2, h2) = cv.boundingRect(cnt)
cv.rectangle(roi, (x2 + offset_w, y2), (x2 + w2 + offset_w, y2 + h2), (0, 255, 0), 2)
y2 += y + int(h2 / 2)
x2 += x + int(w / 2) + 15
image[y2 - 3:y2 + 3, x2 - 3:x2 + 3] = np.array([0, 255, 0])
break
if x1 == 0 and y1 == 0:
y1 += y + int(h / 2)
x1 += x + int(w / 4)
if x2 == 0 and y2 == 0:
y2 += y + int(h / 2)
x2 += x + int(w / 4) * 3
self.tracked_data = np.asarray([[x1, y1], [x2, y2]])
return self.tracked_data
| [
"cv2.rectangle",
"numpy.copy",
"config.ConfigOptionMetadata",
"numpy.ones",
"cv2.threshold",
"numpy.asarray",
"cv2.contourArea",
"cv2.equalizeHist",
"numpy.array",
"cv2.cvtColor",
"cv2.findContours",
"cv2.dilate",
"cv2.GaussianBlur",
"cv2.boundingRect"
] | [((1193, 1218), 'numpy.copy', 'np.copy', (['self.input.image'], {}), '(self.input.image)\n', (1200, 1218), True, 'import numpy as np\n'), ((1633, 1655), 'cv2.equalizeHist', 'cv.equalizeHist', (['image'], {}), '(image)\n', (1648, 1655), True, 'import cv2 as cv\n'), ((1710, 1747), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_GRAY2BGR'], {}), '(image, cv.COLOR_GRAY2BGR)\n', (1721, 1747), True, 'import cv2 as cv\n'), ((1756, 1815), 'cv2.rectangle', 'cv.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (1768, 1815), True, 'import cv2 as cv\n'), ((1919, 1952), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['roi', '(11, 11)', '(0)'], {}), '(roi, (11, 11), 0)\n', (1934, 1952), True, 'import cv2 as cv\n'), ((2015, 2067), 'cv2.threshold', 'cv.threshold', (['roi', 'thresh', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(roi, thresh, 255, cv.THRESH_BINARY_INV)\n', (2027, 2067), True, 'import cv2 as cv\n'), ((2086, 2111), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (2093, 2111), True, 'import numpy as np\n'), ((2126, 2162), 'cv2.dilate', 'cv.dilate', (['roi', 'kernel'], {'iterations': '(2)'}), '(roi, kernel, iterations=2)\n', (2135, 2162), True, 'import cv2 as cv\n'), ((2260, 2295), 'cv2.cvtColor', 'cv.cvtColor', (['roi', 'cv.COLOR_GRAY2BGR'], {}), '(roi, cv.COLOR_GRAY2BGR)\n', (2271, 2295), True, 'import cv2 as cv\n'), ((2380, 2443), 'cv2.findContours', 'cv.findContours', (['roi_left', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(roi_left, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (2395, 2443), True, 'import cv2 as cv\n'), ((2870, 2934), 'cv2.findContours', 'cv.findContours', (['roi_right', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(roi_right, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (2885, 2934), True, 'import cv2 as cv\n'), ((3623, 3655), 'numpy.asarray', 'np.asarray', (['[[x1, y1], [x2, y2]]'], {}), '([[x1, y1], [x2, y2]])\n', (3633, 3655), True, 'import numpy as np\n'), ((444, 563), 'config.ConfigOptionMetadata', 'ConfigOptionMetadata', (['int', '"""eye_tracking_threshold"""', '(88)', '"""The eye tracking threshold if infrared tracking is used."""'], {}), "(int, 'eye_tracking_threshold', 88,\n 'The eye tracking threshold if infrared tracking is used.')\n", (464, 563), False, 'from config import ConfigOptionMetadata, ConfigOptionPackage\n'), ((2587, 2607), 'cv2.boundingRect', 'cv.boundingRect', (['cnt'], {}), '(cnt)\n', (2602, 2607), True, 'import cv2 as cv\n'), ((2620, 2683), 'cv2.rectangle', 'cv.rectangle', (['roi', '(x1, y1)', '(x1 + w1, y1 + h1)', '(0, 255, 0)', '(2)'], {}), '(roi, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)\n', (2632, 2683), True, 'import cv2 as cv\n'), ((2806, 2827), 'numpy.array', 'np.array', (['[0, 255, 0]'], {}), '([0, 255, 0])\n', (2814, 2827), True, 'import numpy as np\n'), ((3108, 3128), 'cv2.boundingRect', 'cv.boundingRect', (['cnt'], {}), '(cnt)\n', (3123, 3128), True, 'import cv2 as cv\n'), ((3141, 3231), 'cv2.rectangle', 'cv.rectangle', (['roi', '(x2 + offset_w, y2)', '(x2 + w2 + offset_w, y2 + h2)', '(0, 255, 0)', '(2)'], {}), '(roi, (x2 + offset_w, y2), (x2 + w2 + offset_w, y2 + h2), (0, \n 255, 0), 2)\n', (3153, 3231), True, 'import cv2 as cv\n'), ((3351, 3372), 'numpy.array', 'np.array', (['[0, 255, 0]'], {}), '([0, 255, 0])\n', (3359, 3372), True, 'import numpy as np\n'), ((2494, 2511), 'cv2.contourArea', 'cv.contourArea', (['x'], {}), '(x)\n', (2508, 2511), True, 'import cv2 as cv\n'), ((2985, 3002), 'cv2.contourArea', 'cv.contourArea', (['x'], {}), '(x)\n', (2999, 3002), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python
# <NAME>; Polar Geospatial Center, University of Minnesota; 2019
from __future__ import division
from lib import script_utils
PYTHON_VERSION_ACCEPTED_MIN = "2.7" # supports multiple dot notation
if script_utils.PYTHON_VERSION < script_utils.VersionString(PYTHON_VERSION_ACCEPTED_MIN):
raise script_utils.VersionError("Python version ({}) is below accepted minimum ({})".format(
script_utils.PYTHON_VERSION, PYTHON_VERSION_ACCEPTED_MIN))
import argparse
import copy
import glob
import logging
import os
import re
import shutil
import subprocess
import sys
import traceback
import warnings
from time import sleep
if script_utils.PYTHON_VERSION < script_utils.VersionString(3):
from StringIO import StringIO
else:
from io import StringIO
from lib import walk
from lib.script_utils import LOGGER, eprint
from lib.script_utils import ScriptArgumentError, DeveloperError
##############################
## Core globals
SCRIPT_VERSION_NUM = 1.0
# Paths
SCRIPT_FILE = os.path.realpath(__file__)
SCRIPT_FNAME = os.path.basename(SCRIPT_FILE)
SCRIPT_NAME, SCRIPT_EXT = os.path.splitext(SCRIPT_FNAME)
SCRIPT_DIR = os.path.dirname(SCRIPT_FILE)
SCRIPT_RUNCMD = ' '.join(sys.argv)+'\n'
PYTHON_EXE = 'python -u'
HOSTNAME = os.getenv('HOSTNAME')
if HOSTNAME is not None:
HOSTNAME = HOSTNAME.lower()
RUNNING_AT_PGC = True if True in [s in HOSTNAME for s in ['rookery', 'nunatak']] else False
else:
RUNNING_AT_PGC = False
LOGGER.setLevel(logging.INFO)
##############################
## Argument globals
# Argument strings
ARGSTR_SRC = 'src'
ARGSTR_DEPTH = '--depth'
ARGSTR_SRC_SUFFIX = '--src-suffix'
ARGSTR_CHECK_METHOD = '--check-method'
ARGSTR_CHECK_SETSM_VALIDRANGE = '--check-setsm-validrange'
ARGSTR_CHECK_SETSM_ALLOW_INVALID = '--check-setsm-allow-invalid'
ARGSTR_CHECKFILE_WRITE_AT_END = '--checkfile-write-at-end'
ARGSTR_CHECKFILE_OFF = '--checkfile-off'
ARGSTR_CHECKFILE = '--checkfile'
ARGSTR_CHECKFILE_ROOT = '--checkfile-root'
ARGSTR_CHECKFILE_ROOT_REGEX = '--checkfile-root-regex'
ARGSTR_CHECK_SPECIAL = '--check-special'
ARGSTR_CHECK_SPECIAL_DEMTYPE = '--check-special-demtype'
ARGSTR_VERIFY_BY_PAIRNAME_DIR = '--verify-by-pairname-dir'
ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH = '--verify-by-pairname-dir-depth'
ARGSTR_INDEX_PAIRNAMES_TO_JSON = '--index-pairnames-to-json'
ARGSTR_VERIFY_QUICK_CHECK = '--verify-quick-check'
ARGSTR_CHECKFILE_EXT = '--checkfile-ext'
ARGSTR_ERRFILE_EXT = '--errfile-ext'
ARGSTR_ALLOW_MISSING_SUFFIX = '--allow-missing-suffix'
ARGSTR_ALLOW_MISSING_ORTHO2 = '--allow-missing-ortho2'
ARGSTR_RETRY_ERRORS = '--retry-errors'
ARGSTR_KEEP_CHECKFILE_WITH_ERRORS = '--keep-checkfile-with-errors'
ARGSTR_SUPPRESS_ERRFILE_EXISTS = '--suppress-errfile-exists'
ARGSTR_SUPPRESS_MISSING_SUFFIX = '--suppress-missing-suffix'
ARGSTR_SUPPRESS_MISSING_CHECKED = '--suppress-missing-checked'
ARGSTR_SUPPRESS_NEW_SOURCE = '--suppress-new-source'
ARGSTR_REMOVE_TYPE = '--remove-type'
ARGSTR_RMWHERE_ERRFILE_EXISTS = '--rmwhere-errfile-exists'
ARGSTR_RMWHERE_MISSING_SUFFIX = '--rmwhere-missing-suffix'
ARGSTR_RMWHERE_MISSING_CHECKED = '--rmwhere-missing-checked'
ARGSTR_RMWHERE_NEW_SOURCE = '--rmwhere-new-source'
ARGSTR_REMOVE_ONLY = '--remove-only'
ARGSTR_STATS_ONLY = '--stats-only'
ARGSTR_SCHEDULER = '--scheduler'
ARGSTR_JOBSCRIPT = '--jobscript'
ARGSTR_JOBNAME = '--jobname'
ARGSTR_TASKS_PER_JOB = '--tasks-per-job'
ARGSTR_SCRATCH = '--scratch'
ARGSTR_WD = '--wd'
ARGSTR_LOGDIR = '--logdir'
ARGSTR_EMAIL = '--email'
ARGSTR_DO_DELETE = '--do-delete'
ARGSTR_DRYRUN = '--dryrun'
ARGSTR_DEBUG = '--debug'
# Argument groups
ARGGRP_OUTDIR = [ARGSTR_LOGDIR, ARGSTR_SCRATCH]
ARGGRP_BATCH = [ARGSTR_SCHEDULER, ARGSTR_JOBSCRIPT, ARGSTR_TASKS_PER_JOB, ARGSTR_EMAIL]
ARGGRP_CHECK_REGULAR = [ARGSTR_CHECKFILE, ARGSTR_CHECKFILE_ROOT, ARGSTR_CHECKFILE_ROOT_REGEX]
ARGGRP_CHECK_OTHER = [ARGSTR_CHECK_SPECIAL]
ARGGRP_CHECK_ALL = ARGGRP_CHECK_REGULAR + ARGGRP_CHECK_OTHER
ARGGRP_RMWHERE = [
ARGSTR_RMWHERE_ERRFILE_EXISTS,
ARGSTR_RMWHERE_MISSING_SUFFIX,
ARGSTR_RMWHERE_MISSING_CHECKED,
ARGSTR_RMWHERE_NEW_SOURCE
]
ARGGRP_REQUIRES_RMWHERE = [ARGSTR_DO_DELETE, ARGSTR_REMOVE_ONLY]
# Argument choices
ARGCHO_CHECK_METHOD_READ = 'read'
ARGCHO_CHECK_METHOD_CHECKSUM = 'checksum'
ARGCHO_CHECK_METHOD = [
ARGCHO_CHECK_METHOD_READ,
ARGCHO_CHECK_METHOD_CHECKSUM
]
ARGCHO_CHECK_SPECIAL_ALL_TOGETHER = 'altogether'
ARGCHO_CHECK_SPECIAL_ALL_SEPARATE = 'separate'
ARGCHO_CHECK_SPECIAL_SCENEPAIRS = 'scenes'
ARGCHO_CHECK_SPECIAL_PAIRNAMES = 'pairnames'
ARGCHO_CHECK_SPECIAL_STRIPSEGMENTS = 'strip-segments'
ARGCHO_CHECK_SPECIAL_STRIPS = 'strips'
ARGCHO_CHECK_SPECIAL_SCENEMETA = 'scene-meta'
ARGCHO_CHECK_SPECIAL_STRIPMETA = 'strip-meta'
ARGCHO_CHECK_SPECIAL_DSP = '2m_dsp_scenes'
ARGCHO_CHECK_SPECIAL = [
ARGCHO_CHECK_SPECIAL_ALL_TOGETHER,
ARGCHO_CHECK_SPECIAL_ALL_SEPARATE,
ARGCHO_CHECK_SPECIAL_SCENEPAIRS,
ARGCHO_CHECK_SPECIAL_PAIRNAMES,
ARGCHO_CHECK_SPECIAL_STRIPSEGMENTS,
ARGCHO_CHECK_SPECIAL_STRIPS,
ARGCHO_CHECK_SPECIAL_SCENEMETA,
ARGCHO_CHECK_SPECIAL_STRIPMETA,
ARGCHO_CHECK_SPECIAL_DSP
]
ARGCHO_CHECK_SPECIAL_DEMTYPE_REGULAR = 'non-lsf'
ARGCHO_CHECK_SPECIAL_DEMTYPE_SMOOTH = 'lsf'
ARGCHO_CHECK_SPECIAL_DEMTYPE_BOTH = 'both'
ARGCHO_CHECK_SPECIAL_DEMTYPE = [
ARGCHO_CHECK_SPECIAL_DEMTYPE_REGULAR,
ARGCHO_CHECK_SPECIAL_DEMTYPE_SMOOTH,
ARGCHO_CHECK_SPECIAL_DEMTYPE_BOTH
]
ARGCHO_REMOVE_TYPE_CHECKFILES = 'checkfiles'
ARGCHO_REMOVE_TYPE_SOURCEFILES = 'sourcefiles'
ARGCHO_REMOVE_TYPE_BOTH = 'both'
ARGCHO_REMOVE_TYPE = [
ARGCHO_REMOVE_TYPE_CHECKFILES,
ARGCHO_REMOVE_TYPE_SOURCEFILES,
ARGCHO_REMOVE_TYPE_BOTH
]
# Argument choice groups
ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL = [
ARGCHO_CHECK_SPECIAL_SCENEPAIRS,
ARGCHO_CHECK_SPECIAL_PAIRNAMES,
ARGCHO_CHECK_SPECIAL_DSP
]
ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_STRIPLEVEL = [
ARGCHO_CHECK_SPECIAL_STRIPSEGMENTS,
ARGCHO_CHECK_SPECIAL_STRIPS
]
ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM = (
ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL
+ ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_STRIPLEVEL
)
ARGCHOGRP_CHECK_SPECIAL_SETSM_SCENELEVEL = ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL + [
ARGCHO_CHECK_SPECIAL_SCENEMETA
]
ARGCHOGRP_CHECK_SPECIAL_SETSM_STRIPLEVEL = ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_STRIPLEVEL + [
ARGCHO_CHECK_SPECIAL_STRIPMETA
]
ARGCHOGRP_CHECK_SPECIAL_SETSM = (
ARGCHOGRP_CHECK_SPECIAL_SETSM_SCENELEVEL
+ ARGCHOGRP_CHECK_SPECIAL_SETSM_STRIPLEVEL
)
# Argument choice settings
CHECK_SPECIAL_DEM_SUFFIX_ORTHO2 = 'ortho2.tif'
CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M = 'ortho2_10m.tif'
CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL_MATCHTAG_SET = {'matchtag_mt.tif', 'meta_mt.txt'}
CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_SCENELEVEL_SET = {
'mask.tif',
'meta_or.txt'
}
CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_STRIPLEVEL_SET = {
'bitmask_10m.tif',
'matchtag_10m.tif',
'ortho_10m.tif',
CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M,
'dem_10m.tif',
'dem_10m_masked.tif',
'dem_10m_shade.tif',
'dem_10m_shade_masked.tif',
'dem_40m_masked.tif',
'dem_40m_coverage.tif'
}
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META = 'meta.txt'
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_INFO50CM = 'info50cm.txt'
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL = '/'.join([
# DEM suffix(es) will be set by --check-special-demtype script argument
'matchtag.tif',
'ortho.tif',
CHECK_SPECIAL_DEM_SUFFIX_ORTHO2,
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META,
'/'.join(CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL_MATCHTAG_SET),
'/'.join(CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_SCENELEVEL_SET)
])
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_STRIPLEVEL = '/'.join([
'dem.tif',
'matchtag.tif',
'ortho.tif',
CHECK_SPECIAL_DEM_SUFFIX_ORTHO2,
'bitmask.tif',
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META,
'/'.join(CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_STRIPLEVEL_SET)
])
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_DSP = '/'.join([
'matchtag.tif',
'ortho.tif',
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_INFO50CM
])
# ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_SCENELEVEL = re.compile("(?P<scenepairname>(?P<strippairname>(?P<sensor>[A-Z0-9]{4})_(?P<timestamp>\d{8})_(?P<catid1>[A-Z0-9]{16})_(?P<catid2>[A-Z0-9]{16}))_(?P<tile1>R\d+C\d+-)?(?P<order1>\d{12}_\d{2})_(?P<part1>P\d{3})_(?P<tile2>R\d+C\d+-)?(?P<order2>\d{12}_\d{2})_(?P<part2>P\d{3})_(?P<res>\d{1}))_(?P<suffix>[_a-z0-9]+)\.(?P<ext>\w+)")
ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_SCENELEVEL = re.compile("^([A-Z0-9]{4}_\d{8}_[0-9A-F]{16}_[0-9A-F]{16}_(R\d+C\d+-)?\d{12}_\d{2}_P\d{3}_(R\d+C\d+-)?\d{12}_\d{2}_P\d{3}_\d{1}(-\d{2})?)_[a-z0-9_]+\.\w+$")
ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL = re.compile("^([A-Z0-9]{4}_\d{8}_[0-9A-F]{16}_[0-9A-F]{16}).*$")
ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPSEGMENT = re.compile("^([A-Z0-9]{4}_\d{8}_[0-9A-F]{16}_[0-9A-F]{16}_\d+c?m(_lsf)?_seg\d+)_[a-z0-9_]+\.\w+$")
ARGCHOSET_CHECK_SPECIAL_SETTING_DICT = {
ARGCHO_CHECK_SPECIAL_ALL_TOGETHER: [
(ARGSTR_CHECKFILE_ROOT, "")
],
ARGCHO_CHECK_SPECIAL_ALL_SEPARATE: [
(ARGSTR_CHECKFILE_ROOT_REGEX, "^(.*)$")
],
ARGCHO_CHECK_SPECIAL_SCENEPAIRS: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL),
(ARGSTR_CHECKFILE_ROOT_REGEX, ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_SCENELEVEL)
],
ARGCHO_CHECK_SPECIAL_PAIRNAMES: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL),
(ARGSTR_CHECKFILE_ROOT_REGEX, ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL)
],
ARGCHO_CHECK_SPECIAL_STRIPSEGMENTS: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_STRIPLEVEL),
(ARGSTR_CHECKFILE_ROOT_REGEX, ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPSEGMENT)
],
ARGCHO_CHECK_SPECIAL_STRIPS: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_STRIPLEVEL),
(ARGSTR_CHECKFILE_ROOT_REGEX, ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL)
],
ARGCHO_CHECK_SPECIAL_SCENEMETA: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META),
(ARGSTR_CHECKFILE_OFF, True),
],
ARGCHO_CHECK_SPECIAL_STRIPMETA: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META),
(ARGSTR_CHECKFILE_OFF, True),
],
ARGCHO_CHECK_SPECIAL_DSP: [
(ARGSTR_SRC_SUFFIX, ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_DSP),
(ARGSTR_CHECKFILE_ROOT_REGEX, ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_SCENELEVEL)
]
}
ARGCHOSET_CHECK_SPECIAL_SUBGROUP_DICT = {
ARGCHO_CHECK_SPECIAL_PAIRNAMES: [
ARGCHO_CHECK_SPECIAL_SCENEPAIRS
],
ARGCHO_CHECK_SPECIAL_STRIPS: [
ARGCHO_CHECK_SPECIAL_STRIPSEGMENTS
]
}
ARGCHOSET_CHECK_SPECIAL_DEMTYPE_SUFFIX_DICT = {
ARGCHO_CHECK_SPECIAL_DEMTYPE_REGULAR: 'dem.tif',
ARGCHO_CHECK_SPECIAL_DEMTYPE_SMOOTH: 'dem_smooth.tif/smooth_result.txt',
}
ARGCHOSET_CHECK_SPECIAL_DEMTYPE_SUFFIX_DICT[ARGCHO_CHECK_SPECIAL_DEMTYPE_BOTH] = '/'.join(
sorted(ARGCHOSET_CHECK_SPECIAL_DEMTYPE_SUFFIX_DICT.values()))
ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT = {
ARGCHO_CHECK_SPECIAL_SCENEPAIRS: 'scene',
ARGCHO_CHECK_SPECIAL_STRIPS: 'strip',
ARGCHO_CHECK_SPECIAL_DSP: 'scene'
}
# Argument defaults
ARGDEF_SRC_SUFFIX = '.tif'
ARGDEF_DEPTH = script_utils.ARGNUM_POS_INF
ARGDEF_VERIFY_BY_PAIRNAME_DIR_DEPTH = 1
ARGDEF_CHECKFILE_EXT = '.check'
ARGDEF_CHECKERROR_EXT = '.err'
ARGDEF_SCRATCH = os.path.join(os.path.expanduser('~'), 'scratch', 'task_bundles')
##############################
## Batch settings
JOBSCRIPT_DIR = os.path.join(SCRIPT_DIR, 'jobscripts')
JOBSCRIPT_INIT = os.path.join(JOBSCRIPT_DIR, 'init.sh')
JOB_ABBREV = 'Check'
# BATCH_ARGDEF_WD = '/local' if RUNNING_AT_PGC else None
BATCH_ARGDEF_WD = None
JOB_WALLTIME_HR = 72
JOB_MEMORY_GB = 40
##############################
## Custom globals
INDEX_SETSM_SCRIPT = os.path.join(SCRIPT_DIR, '..', 'pgcdemtools', 'index_setsm.py')
GDAL_RASTER_SUFFIXES = ['.tif', '.tiff']
SETSM_RASTER_SUFFIX_VALIDRANGE_DICT = {
'_dem.tif': [-8000, 100000],
'_dem_smooth.tif': [-8000, 100000],
'_matchtag.tif': [0, 1],
'_matchtag_mt.tif': [0, 1],
}
SETSM_META_SUFFIX = '_meta.txt'
SETSM_STRIPMETA_SCENEMETA_SECTION_HEADER = 'Scene Metadata'
SETSM_STRIPMETA_SCENEMETA_ITEM_HEADER_REGEX = re.compile("^\s*scene \d+ name=.*$")
SETSM_META_REQUIRED_DICT = dict()
SETSM_META_KEY_TOKEN_DELIM_RE = '(?: +|_+)'
SETSM_META_SPACE_RE = '[ \t]*?'
SETSM_META_NEWLINE_START_RE = '(?:\r\n|\r|\n)'
SETSM_META_NEWLINE_END_RE = '(?=(?:\r\n|\r|\n))'
SETSM_META_KEY_PREFIX_IMAGE = 'image'.strip().lower()
SETSM_META_KEY_PREFIX_IMAGE_1 = ' '.join([SETSM_META_KEY_PREFIX_IMAGE, str(1)])
SETSM_META_KEY_PREFIX_IMAGE_2 = ' '.join([SETSM_META_KEY_PREFIX_IMAGE, str(2)])
SETSM_META_IMAGE_PREFIX_RE = SETSM_META_KEY_TOKEN_DELIM_RE.join([SETSM_META_KEY_PREFIX_IMAGE, '[12]'])
SETSM_META_WV_CORRECT_SATIDS = ['WV01', 'WV02']
def get_setsm_meta_item_regex(key_str, value_re, allow_missing_image_prefix=False):
if key_str is None:
key_re = SETSM_META_IMAGE_PREFIX_RE
else:
key_re = SETSM_META_KEY_TOKEN_DELIM_RE.join(key_str.replace('_', ' ').split())
if allow_missing_image_prefix:
key_re = '(?:{}|{})'.format(SETSM_META_KEY_TOKEN_DELIM_RE.join([SETSM_META_IMAGE_PREFIX_RE, key_re]), key_re)
else:
key_re = SETSM_META_KEY_TOKEN_DELIM_RE.join([SETSM_META_IMAGE_PREFIX_RE, key_re])
item_re = SETSM_META_SPACE_RE.join([SETSM_META_NEWLINE_START_RE, key_re, '=', value_re, SETSM_META_NEWLINE_END_RE])
return re.compile(item_re, re.I)
SETSM_META_ITEM_IS_KEY_VALUE = True
SETSM_META_ITEM_IS_NOT_KEY_VALUE = False
SETSM_META_ITEM_COUNT_SINGLE = 1
SETSM_META_ITEM_COUNT_PAIR = 2
SETSM_META_KEY = 'Image path'
SETSM_META_KEY_IMAGE_PATH = SETSM_META_KEY
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(None, "[\d\w_\-/]+\.tif")
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
SETSM_META_KEYGRP_GSD = [
# 'Mean_row_GSD',
# 'Mean_col_GSD',
# 'Mean_GSD'
]
for SETSM_META_KEY in SETSM_META_KEYGRP_GSD + [
'Mean_sun_azimuth_angle',
'Mean_sun_elevation',
'Mean_sat_azimuth_angle'
]:
SETSM_META_VALUE_RE = "\d+\.?\d*"
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(SETSM_META_KEY, SETSM_META_VALUE_RE, allow_missing_image_prefix=True)
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
SETSM_META_KEY = 'Mean_sat_elevation'
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(SETSM_META_KEY, "\-?\d+\.?\d*", allow_missing_image_prefix=True)
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
for SETSM_META_KEY in [
'effbw',
'abscalfact'
]:
SETSM_META_VALUE_RE = "\d+\.?\d*"
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(SETSM_META_KEY, SETSM_META_VALUE_RE)
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
for SETSM_META_KEY in [
'tdi',
'min',
'max'
]:
SETSM_META_VALUE_RE = "\d+"
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(SETSM_META_KEY, SETSM_META_VALUE_RE)
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
SETSM_META_KEY = 'wv_correct'
SETSM_META_KEY_WV_CORRECT = SETSM_META_KEY
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(SETSM_META_KEY, "[01]")
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
SETSM_META_KEY = 'ASP build ID'
SETSM_META_ITEM_RE = get_setsm_meta_item_regex(SETSM_META_KEY, "(?:[0-9A-F]+)?")
SETSM_META_REQUIRED_DICT[SETSM_META_KEY] = (SETSM_META_ITEM_RE, SETSM_META_ITEM_IS_KEY_VALUE, SETSM_META_ITEM_COUNT_PAIR)
SETSM_META_REQUIRED_KEY_SORTED_LIST = sorted(SETSM_META_REQUIRED_DICT.keys())
del SETSM_META_KEY, SETSM_META_ITEM_RE, SETSM_META_VALUE_RE
INFO50CM_RE = re.compile(
"""scenedemid=[A-Z][A-Z0-9]{2}\d{1}_\d{8}_[A-Z0-9]{16}_[A-Z0-9]{16}_(?:R\d+C\d+-)?\d{12}_\d{2}_P\d{3}_(?:R\d+C\d+-)?\d{12}_\d{2}_P\d{3}_0(?:-\d{2})?
stripdemid=[A-Z][A-Z0-9]{2}\d{1}_\d{8}_[A-Z0-9]{16}_[A-Z0-9]{16}_50cm_v\d{6}
filesz_dem=(\d+\.\d+([eE][-\+]?\d+)?)
filesz_lsf=((?:\d+\.\d+)?([eE][-\+]?\d+)?)
filesz_mt=(\d+\.\d+([eE][-\+]?\d+)?)
filesz_or=(\d+\.\d+([eE][-\+]?\d+)?)
filesz_or2=((?:\d+\.\d+)?([eE][-\+]?\d+)?)
\Z"""
)
##############################
class SETSMMetaParseError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class RasterFileReadError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
def argparser_init():
parser = argparse.ArgumentParser(
formatter_class=script_utils.RawTextArgumentDefaultsHelpFormatter,
description=' '.join([
"Check existence and integrity of data files in batch."
])
)
# Positional arguments
parser.add_argument(
ARGSTR_SRC,
type=script_utils.ARGTYPE_PATH(
argstr=ARGSTR_SRC,
abspath_fn=os.path.abspath,
existcheck_fn=os.path.exists,
existcheck_reqval=True),
help=' '.join([
"Path to source file directory or single input file to check.",
"Accepts a task bundle text file listing paths to checkfile root paths."
])
)
# Optional arguments
parser.add_argument(
ARGSTR_DEPTH,
type=script_utils.ARGTYPE_NUM(
argstr=ARGSTR_DEPTH,
numeric_type=int,
allow_neg=False,
allow_zero=False,
allow_inf=True),
default=ARGDEF_DEPTH,
help=' '.join([
"Depth of recursive search into source directory for files to check.",
])
)
parser.add_argument(
ARGSTR_SRC_SUFFIX,
type=str,
default=ARGDEF_SRC_SUFFIX,
help=' '.join([
"'/'-delimited list of accepted source file suffixes to be checked.",
])
)
parser.add_argument(
ARGSTR_CHECK_METHOD,
type=str,
choices=ARGCHO_CHECK_METHOD,
default=ARGCHO_CHECK_METHOD_CHECKSUM,
help=' '.join([
"Method used to check integrity of source rasters.",
"\nIf '{}', simply attempt to read raster band(s).".format(ARGCHO_CHECK_METHOD_READ),
"\nIf '{}', attempt to compute checksum of each raster band.".format(ARGCHO_CHECK_METHOD_CHECKSUM),
"\n"
])
)
parser.add_argument(
ARGSTR_CHECK_SETSM_VALIDRANGE,
action='store_true',
help=' '.join([
"After successfully opening a source raster ending with a filename suffix listed in",
"script 'Custom globals' dictionary variable SETSM_RASTER_SUFFIX_VALIDRANGE_DICT, check that all",
"non-NoData values fall the raster band fall within the corresponding numerical range",
"(inclusive)."
])
)
parser.add_argument(
ARGSTR_CHECKFILE_OFF,
action='store_true',
help=' '.join([
"Ignore existing checkfiles and check all files, saving error files but not checkfiles."
])
)
parser.add_argument(
ARGSTR_CHECKFILE,
type=script_utils.ARGTYPE_PATH(
argstr=ARGSTR_CHECKFILE,
existcheck_fn=os.path.isdir,
existcheck_reqval=False),
default=None,
help=' '.join([
"Path to single checkfile (which may already exist) used to store filenames of",
"passing source file(s) selected by arguments {} and {}.".format(ARGSTR_SRC, ARGSTR_SRC_SUFFIX),
"Due to the issue of multiple processes attempting to write to a text file at once,",
"this argument is incompatible with job scheduler options.",
])
)
parser.add_argument(
ARGSTR_CHECKFILE_ROOT,
type=str,
default=None,
help=' '.join([
"Filename prefix by which to group source files for checking.",
"The default path of the checkfile becomes '[{}]/[{}].[{}]'".format(ARGSTR_SRC, ARGSTR_CHECKFILE_ROOT, ARGSTR_CHECKFILE_EXT),
"Use only if argument {} is a directory.".format(ARGSTR_SRC),
"Due to the issue of multiple processes attempting to write to a text file at once,",
"this argument is incompatible with job scheduler options."
])
)
parser.add_argument(
ARGSTR_CHECKFILE_ROOT_REGEX,
type=str,
default=None,
help=' '.join([
"Regex for filename prefix by which to group source files for checking.",
"Regex must contain one group for matching, which becomes the filename prefix for"
"a single bundle of source files to check.",
"The default path of each checkfile thus becomes '[{}]/[regex match group].[{}]'".format(ARGSTR_SRC, ARGSTR_CHECKFILE_ROOT, ARGSTR_CHECKFILE_EXT),
"Use only if argument {} is a directory.".format(ARGSTR_SRC),
"In the context of the job scheduler {} option, each unique regex match group becomes".format(ARGSTR_SCHEDULER),
"a single task by passing it as the {} argument to a 'fork' of this batch script".format(ARGSTR_CHECKFILE_ROOT)
])
)
parser.add_argument(
ARGSTR_CHECK_SPECIAL,
type=str,
choices=ARGCHO_CHECK_SPECIAL,
default=None,
help=' '.join([
"Popular options for quickly setting {} and {} arguments.".format(ARGSTR_SRC_SUFFIX, ARGSTR_CHECKFILE_ROOT_REGEX),
])
)
parser.add_argument(
ARGSTR_CHECK_SPECIAL_DEMTYPE,
type=str,
choices=ARGCHO_CHECK_SPECIAL_DEMTYPE,
default=ARGCHO_CHECK_SPECIAL_DEMTYPE_BOTH,
help=' '.join([
"Used in conjunction with argument {}, this determines which DEM file suffix(es)".format(ARGSTR_CHECK_SPECIAL),
"are set for argument {} source file selection".format(ARGSTR_SRC_SUFFIX)
])
)
parser.add_argument(
ARGSTR_VERIFY_BY_PAIRNAME_DIR,
action='store_true',
help=' '.join([
"Use PAIRNAME DIRECTORIES as check groups, writing check and error files next to ",
"the pairname directories."
])
)
parser.add_argument(
ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH,
type=script_utils.ARGTYPE_NUM(
argstr=ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH,
numeric_type=int,
allow_neg=False,
allow_zero=True,
allow_inf=True),
default=ARGDEF_VERIFY_BY_PAIRNAME_DIR_DEPTH,
help=' '.join([
"Depth of recursive search into source directory for PAIRNAME DIRECTORIES to check.",
])
)
parser.add_argument(
ARGSTR_INDEX_PAIRNAMES_TO_JSON,
action='store_true',
help=' '.join([
"Build index .json file alongside each PAIRNAME DIRECTORY after completely successful check.",
"Only applicable when {} option is also provided, and {} must be set to one of {}.".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR,ARGSTR_CHECK_SPECIAL, sorted(ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT.keys())
),
"Requires the 'pgcdemtools' repo to exist at alongside this repo."
])
)
parser.add_argument(
'-vqc', ARGSTR_VERIFY_QUICK_CHECK,
action='store_true',
help=' '.join([
"Scan {} directory for PAIRNAME DIRECTORIES and verify that all necessary checkfiles".format(ARGSTR_SRC),
"and JSON files have built successfully for processing region to be deemed complete",
"and ready for writing JSON indices to applicable database(s)."
])
)
parser.add_argument(
ARGSTR_CHECKFILE_EXT,
type=str,
default=ARGDEF_CHECKFILE_EXT,
help=' '.join([
"File extension of checkfile(s), unless argument {} is used, in which case the extension".format(ARGSTR_CHECKFILE),
"is considered to be included/excluded in the provided checkfile file path."
])
)
parser.add_argument(
ARGSTR_ERRFILE_EXT,
type=str,
default=ARGDEF_CHECKERROR_EXT,
help=' '.join([
"File extension of error files created when source files are deemed invalid during",
"checking procedures, containing error messages describing issues with the source file.",
"The full file path of an error file is constructed by simply appending this string",
"to the full file path of the corresponding source file."
])
)
parser.add_argument(
ARGSTR_CHECKFILE_WRITE_AT_END,
action='store_true',
help=' '.join([
"Write list of passing source files to check file at end of processing all "
"source files in check group, instead of appending as soon as each pass check."
])
)
parser.add_argument(
ARGSTR_ALLOW_MISSING_SUFFIX,
action='store_true',
help=' '.join([
"Allow checking of check groups that are missing source file suffixes."
])
)
parser.add_argument(
ARGSTR_ALLOW_MISSING_ORTHO2,
action='store_true',
help=' '.join([
"Allow checking of SETSM DEM check groups that are missing the '{}' source file.".format(
CHECK_SPECIAL_DEM_SUFFIX_ORTHO2
)
])
)
parser.add_argument(
ARGSTR_RETRY_ERRORS,
action='store_true',
help=' '.join([
"Attempt checking source files & groups with existing error files."
])
)
parser.add_argument(
ARGSTR_KEEP_CHECKFILE_WITH_ERRORS,
action='store_true',
help=' '.join([
"Continue writing group checkfile after errors in source files have been discovered."
])
)
parser.add_argument(
ARGSTR_SUPPRESS_ERRFILE_EXISTS,
action='store_true',
help=' '.join([
"Suppress printing all cases of existing error files among check group source files."
])
)
parser.add_argument(
ARGSTR_SUPPRESS_MISSING_SUFFIX,
action='store_true',
help=' '.join([
"Suppress printing all cases of source file suffixes missing from check group."
])
)
parser.add_argument(
ARGSTR_SUPPRESS_MISSING_CHECKED,
action='store_true',
help=' '.join([
"Suppress printing all files that are listed in checkfiles but cannot be found in source directory."
])
)
parser.add_argument(
ARGSTR_SUPPRESS_NEW_SOURCE,
action='store_true',
help=' '.join([
"Suppress printing all new source files that are to be added to existing checkfiles."
])
)
parser.add_argument(
ARGSTR_REMOVE_TYPE,
type=str,
choices=ARGCHO_REMOVE_TYPE,
default=ARGCHO_REMOVE_TYPE_CHECKFILES,
help=' '.join([
"Specify which files can be removed by the following arguments:",
ARGSTR_RMWHERE_ERRFILE_EXISTS,
ARGSTR_RMWHERE_MISSING_SUFFIX,
ARGSTR_RMWHERE_MISSING_CHECKED,
ARGSTR_RMWHERE_NEW_SOURCE
])
)
parser.add_argument(
ARGSTR_RMWHERE_ERRFILE_EXISTS,
action='store_true',
help=' '.join([
"Remove existing check/source files when error files exist among check group source files.",
"Use {} argument to specify which files can be removed.".format(ARGSTR_REMOVE_TYPE)
])
)
parser.add_argument(
ARGSTR_RMWHERE_MISSING_SUFFIX,
action='store_true',
help=' '.join([
"Remove existing check/source files when source file suffixes are missing from check group.",
"Use {} argument to specify which files can be removed.".format(ARGSTR_REMOVE_TYPE)
])
)
parser.add_argument(
ARGSTR_RMWHERE_MISSING_CHECKED,
action='store_true',
help=' '.join([
"Remove existing check/source files when files listed in checkfile cannot be found in source directory.",
"Use {} argument to specify which files can be removed.".format(ARGSTR_REMOVE_TYPE)
])
)
parser.add_argument(
ARGSTR_RMWHERE_NEW_SOURCE,
action='store_true',
help=' '.join([
"Remove existing check/source files when new source files are to be added to checkfile.",
"Use {} argument to specify which files can be removed.".format(ARGSTR_REMOVE_TYPE)
])
)
parser.add_argument(
ARGSTR_REMOVE_ONLY,
action='store_true',
help="Scan check/source files and possibly perform removal actions, then exit."
)
parser.add_argument(
ARGSTR_STATS_ONLY,
action='store_true',
help="Scan check/source files and report task completion status, then exit."
)
parser.add_argument(
ARGSTR_SCHEDULER,
type=str,
choices=script_utils.SCHED_SUPPORTED,
default=None,
help="Submit tasks to job scheduler."
)
parser.add_argument(
ARGSTR_JOBSCRIPT,
type=script_utils.ARGTYPE_PATH(
argstr=ARGSTR_JOBSCRIPT,
existcheck_fn=os.path.isfile,
existcheck_reqval=True),
default=None,
help=' '.join([
"Script to run in job submission to scheduler.",
"(default scripts are found in {})".format(JOBSCRIPT_DIR)
])
)
parser.add_argument(
ARGSTR_JOBNAME,
type=str,
default=JOB_ABBREV,
help="Prefix for names of jobs submitted to scheduler."
)
parser.add_argument(
ARGSTR_TASKS_PER_JOB,
type=int,
default=None,
help=' '.join([
"Number of tasks to bundle into a single job.",
"(requires {} option)".format(ARGSTR_SCHEDULER)
])
)
parser.add_argument(
ARGSTR_SCRATCH,
type=script_utils.ARGTYPE_PATH(
argstr=ARGSTR_SCRATCH,
existcheck_fn=os.path.isfile,
existcheck_reqval=False),
default=ARGDEF_SCRATCH,
help="Scratch directory to build task bundle text files."
)
parser.add_argument(
ARGSTR_WD,
type=script_utils.ARGTYPE_PATH(
argstr=ARGSTR_WD,
existcheck_fn=os.path.isdir,
existcheck_reqval=True),
default=None,
help=' '.join([
"Copy source files to this directory before checking, run checks on these copies,",
"then clean up the copies before moving on.",
"At PGC, this argument is meant to be used with {} argument to minimize the impact of".format(ARGSTR_SCHEDULER),
"file I/O on the network."
])
)
parser.add_argument(
ARGSTR_LOGDIR,
type=script_utils.ARGTYPE_PATH(
argstr=ARGSTR_LOGDIR,
existcheck_fn=os.path.isfile,
existcheck_reqval=False),
default=None,
help=' '.join([
"Directory to which standard output/error log files will be written for batch job runs.",
"\nIf not provided, default scheduler (or jobscript #CONDOPT_) options will be used.",
"\n**Note:** Due to implementation difficulties, this directory will also become the",
"working directory for the job process. Since relative path inputs are always changed",
"to absolute paths in this script, this should not be an issue."
])
)
parser.add_argument(
ARGSTR_EMAIL,
type=script_utils.ARGTYPE_BOOL_PLUS(
parse_fn=str),
nargs='?',
help="Send email to user upon end or abort of the LAST SUBMITTED task."
)
parser.add_argument(
ARGSTR_DO_DELETE,
action='store_true',
help="Perform file removal actions."
)
parser.add_argument(
ARGSTR_DRYRUN,
action='store_true',
help="Print actions without executing."
)
parser.add_argument(
ARGSTR_DEBUG,
action='store_true',
help="Change logger from INFO to DEBUG level."
)
return parser
def endswith_one_of_coll(check_string, string_ending_coll, case_sensitive=True, return_match=False):
for s_end in string_ending_coll:
if check_string.endswith(s_end) or (not case_sensitive and check_string.lower().endswith(s_end.lower())):
return s_end if return_match else True
return None if return_match else False
def ends_one_of_coll(string_ending, string_coll, case_sensitive=True, return_match=False):
for s in string_coll:
if s.endswith(string_ending) or (not case_sensitive and s.lower().endswith(string_ending.lower())):
return s if return_match else True
return None if return_match else False
def checkfile_incomplete(args,
checkfile_root, checkfile_ext, errfile_ext, src_suffixes,
src_rasters=None, return_incomplete_src_rasters=False,
srcfile_count=None, errfile_count=None,
missing_suffix_flag=None, checkfile_removed_flag=None,
warn_missing_suffix=True, warn_errfile_exists=True,
warn_missing_checked=True, warn_new_source=True):
if checkfile_ext is not None:
checkfile = checkfile_root+checkfile_ext
checkgroup_errfile = checkfile_root+errfile_ext
else:
checkfile = checkfile_root
checkgroup_errfile = None
if checkfile_ext is None and src_rasters is None:
raise DeveloperError("Checkfile {}; cannot locate corresponding source files when checkfile"
"is a full file path (assuming argument {} was provided)".format(checkfile, ARGSTR_CHECKFILE))
checkfile_dir = os.path.dirname(checkfile) if not os.path.isdir(checkfile_root) else checkfile_root
checkfile_exists = os.path.isfile(checkfile)
if src_rasters is not None and type(src_rasters) is list:
src_rasters = set(src_rasters)
checkfname = os.path.basename(checkfile)
check_group_is_xtrack = checkfname[1].isdigit()
find_src_rasters = ( return_incomplete_src_rasters
or warn_missing_suffix or args.get(ARGSTR_RMWHERE_MISSING_SUFFIX)
or warn_errfile_exists or args.get(ARGSTR_RMWHERE_ERRFILE_EXISTS))
delete_files = False
if checkfile_exists and not args.get(ARGSTR_CHECKFILE_OFF):
with open(checkfile, 'r') as checkfile_fp:
src_rasters_checked = set(checkfile_fp.read().splitlines())
if src_rasters is None:
src_rasters = {os.path.basename(f) for f in glob.glob(checkfile_root+'*') if endswith_one_of_coll(f, src_suffixes)}
src_rasters_to_check = src_rasters.difference(src_rasters_checked)
if src_rasters_to_check:
warnings.warn("There are more (new?) source files to be added to an existing checkfile")
if warn_new_source:
eprint("Checkfile {}; {} more (new?) source files are to be added to existing checkfile".format(
checkfile, len(src_rasters_to_check)))
for f in sorted(list(src_rasters_to_check)):
eprint(f)
delete_files = (delete_files or args.get(ARGSTR_RMWHERE_NEW_SOURCE))
src_rasters_checked_missing = src_rasters_checked.difference(src_rasters)
if src_rasters_checked_missing:
warnings.warn("Files listed in a checkfile were not captured in source selection")
if warn_missing_checked:
eprint("Checkfile {}; {} source files listed in checkfile are missing from source selection:".format(
checkfile, len(src_rasters_checked_missing)))
for f in sorted(list(src_rasters_checked_missing)):
eprint(f)
delete_files = (delete_files or args.get(ARGSTR_RMWHERE_MISSING_CHECKED))
elif return_incomplete_src_rasters or find_src_rasters:
if src_rasters is None:
src_rasters = {os.path.basename(f) for f in glob.glob(checkfile_root+'*') if endswith_one_of_coll(f, src_suffixes)}
src_rasters_to_check = src_rasters
else:
src_rasters_to_check = True
if src_rasters is not None:
check_special_missing_subgroups = [None]
if args.get(ARGSTR_CHECK_SPECIAL) is not None and args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOSET_CHECK_SPECIAL_SUBGROUP_DICT:
check_special_missing_subgroups = check_special_missing_subgroups + ARGCHOSET_CHECK_SPECIAL_SUBGROUP_DICT[args.get(ARGSTR_CHECK_SPECIAL)]
if type(srcfile_count) is list and len(srcfile_count) == 1:
srcfile_count[0] = len(src_rasters)
for check_special_option in check_special_missing_subgroups:
if check_special_option is None:
cssgroup_ffileroot_srcfname_dict = {checkfile_root: src_rasters}
src_suffixes_subgroup = src_suffixes
else:
cssgroup_ffileroot_srcfname_dict = dict()
for check_special_set_argstr, check_special_set_value in ARGCHOSET_CHECK_SPECIAL_SETTING_DICT[check_special_option]:
if check_special_set_argstr == ARGSTR_SRC_SUFFIX:
src_suffixes = [s.strip() for s in check_special_set_value.split('/')]
elif check_special_set_argstr == ARGSTR_CHECKFILE_ROOT_REGEX:
subgroup_root_regex = check_special_set_value
for srcfname in src_rasters:
match = re.match(subgroup_root_regex, srcfname)
if match is None:
eprint("No regex match for check special subgroup {}='{}' setting {}='{}' with filename: {}".format(
ARGSTR_CHECK_SPECIAL, check_special_option, ARGSTR_CHECKFILE_ROOT_REGEX, subgroup_root_regex.pattern, srcfname
))
else:
cf_root_name = match.group(1)
cf_root_full = os.path.join(checkfile_dir, cf_root_name)
if cf_root_full not in cssgroup_ffileroot_srcfname_dict:
cssgroup_ffileroot_srcfname_dict[cf_root_full] = set()
cssgroup_ffileroot_srcfname_dict[cf_root_full].add(srcfname)
else:
eprint("No option to handle check special subgroup {}={} setting {}={}, exiting".format(
ARGSTR_CHECK_SPECIAL, check_special_option, check_special_set_argstr, check_special_set_value
))
sys.exit(1)
for checkfile_root_subgroup, src_rasters_subgroup in cssgroup_ffileroot_srcfname_dict.items():
# if ( (len(src_rasters_subgroup) == 1 and src_rasters_subgroup.pop().endswith('meta.txt'))
# and (check_special_option is not None and check_special_option == ARGCHO_CHECK_SPECIAL_SCENEPAIRS)):
# warnings.showwarning = script_utils.showwarning_stdout
# warnings.warn("Stray metadata file detected in check special 'scene' subgroup."
# " Stray metadata files are ignored for the purpose of flagging"
# " higher-level check special groups as incomplete due to missing suffixes.")
# warnings.showwarning = script_utils.showwarning_stderr
# continue
missing_suffixes = [s for s in src_suffixes_subgroup if not ends_one_of_coll(s, src_rasters_subgroup)]
if missing_suffixes and args.get(ARGSTR_CHECK_SPECIAL) is not None:
missing_suffixes_set = set(missing_suffixes)
if args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL:
if CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL_MATCHTAG_SET.issubset(missing_suffixes_set):
missing_suffixes_set.difference_update(CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL_MATCHTAG_SET)
missing_suffixes_set.difference_update(CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_SCENELEVEL_SET)
if missing_suffixes_set and args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM:
if ( ( CHECK_SPECIAL_DEM_SUFFIX_ORTHO2 in missing_suffixes_set
or CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M in missing_suffixes_set)
and ((not check_group_is_xtrack) or args.get(ARGSTR_ALLOW_MISSING_ORTHO2))):
if CHECK_SPECIAL_DEM_SUFFIX_ORTHO2 in missing_suffixes_set:
missing_suffixes_set.remove(CHECK_SPECIAL_DEM_SUFFIX_ORTHO2)
if CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M in missing_suffixes_set:
missing_suffixes_set.remove(CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M)
if missing_suffixes_set and args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_STRIPLEVEL:
missing_suffixes_set.difference_update(CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_STRIPLEVEL_SET)
missing_suffixes = [s for s in missing_suffixes if s in missing_suffixes_set]
if missing_suffixes:
warnings.warn("Source file suffixes for a check group were not found")
missing_suffix_errmsg = (
"Check {}group {}; missing the following source file suffixes: {}".format(
"special '{}' sub".format(check_special_option)*(check_special_option is not None),
checkfile_root_subgroup, missing_suffixes
)
)
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR) and checkgroup_errfile is not None:
if not args.get(ARGSTR_DRYRUN):
with open(checkgroup_errfile, 'a') as checkgroup_errfile_fp:
checkgroup_errfile_fp.write(missing_suffix_errmsg+'\n')
if warn_missing_suffix:
eprint(missing_suffix_errmsg)
if type(missing_suffix_flag) is list and len(missing_suffix_flag) == 1:
missing_suffix_flag[0] = True
delete_files = (delete_files or args.get(ARGSTR_RMWHERE_MISSING_SUFFIX))
if missing_suffix_flag[0] and check_special_option is None:
break
src_raster_errfnames = [f+errfile_ext for f in src_rasters if os.path.isfile(os.path.join(checkfile_dir, f+errfile_ext))]
if checkgroup_errfile is not None and os.path.isfile(checkgroup_errfile):
src_raster_errfnames.append(checkgroup_errfile)
if src_raster_errfnames:
warnings.warn("Error files were found among source files for a check group")
if warn_errfile_exists:
eprint("Check group {}; {} error files were found among source selection:".format(
checkfile_root, len(src_raster_errfnames)))
for f in sorted(list(src_raster_errfnames)):
eprint(f)
if type(errfile_count) is list and len(errfile_count) == 1:
errfile_count[0] = len(src_raster_errfnames)
delete_files = (delete_files or args.get(ARGSTR_RMWHERE_ERRFILE_EXISTS))
delete_dryrun = (args.get(ARGSTR_DRYRUN) or not args.get(ARGSTR_DO_DELETE))
if ( (delete_files and checkfile_exists)
and args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_CHECKFILES, ARGCHO_REMOVE_TYPE_BOTH]):
eprint("Removing checkfile"+" (dryrun)"*delete_dryrun)
cmd = "rm {}".format(checkfile)
if args.get(ARGSTR_DO_DELETE):
eprint(cmd)
if not delete_dryrun:
os.remove(checkfile)
if type(checkfile_removed_flag) is list and len(checkfile_removed_flag) == 1:
checkfile_removed_flag[0] = True
src_rasters_to_check = src_rasters
if ( delete_files
and args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_SOURCEFILES, ARGCHO_REMOVE_TYPE_BOTH]):
eprint("Removing source files"+" (dryrun)"*delete_dryrun)
srcfnames_to_remove = list(src_rasters) + src_raster_errfnames
for fn in srcfnames_to_remove:
srcfile_to_remove = os.path.join(checkfile_dir, fn)
cmd = "rm {}".format(srcfile_to_remove)
if args.get(ARGSTR_DO_DELETE):
eprint(cmd)
if not delete_dryrun:
os.remove(srcfile_to_remove)
return -1
return list(src_rasters_to_check) if return_incomplete_src_rasters else bool(src_rasters_to_check)
def main():
global LOGGER
# Invoke argparse argument parsing.
arg_parser = argparser_init()
try:
args = script_utils.ArgumentPasser(PYTHON_EXE, SCRIPT_FILE, arg_parser, sys.argv)
except ScriptArgumentError as e:
arg_parser.error(e)
## Further parse/adjust argument values.
src = args.get(ARGSTR_SRC)
search_depth = args.get(ARGSTR_DEPTH)
verify_by_pairname_dir_depth = args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH)
checkfile_ext = args.get(ARGSTR_CHECKFILE_EXT)
errfile_ext = args.get(ARGSTR_ERRFILE_EXT)
allow_missing_suffix = args.get(ARGSTR_ALLOW_MISSING_SUFFIX)
retry_errors = args.get(ARGSTR_RETRY_ERRORS)
warn_errfile_exists = (not args.get(ARGSTR_SUPPRESS_ERRFILE_EXISTS) or args.get(ARGSTR_RMWHERE_ERRFILE_EXISTS))
warn_missing_suffix = (not args.get(ARGSTR_SUPPRESS_MISSING_SUFFIX) or args.get(ARGSTR_RMWHERE_MISSING_SUFFIX))
warn_missing_checked = (not args.get(ARGSTR_SUPPRESS_MISSING_CHECKED) or args.get(ARGSTR_RMWHERE_MISSING_CHECKED))
warn_new_source = (not args.get(ARGSTR_SUPPRESS_NEW_SOURCE) or args.get(ARGSTR_RMWHERE_NEW_SOURCE))
try_removal = (True in args.get(ARGGRP_RMWHERE))
allow_remove_checkfiles = args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_CHECKFILES, ARGCHO_REMOVE_TYPE_BOTH]
allow_remove_sourcefiles = args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_SOURCEFILES, ARGCHO_REMOVE_TYPE_BOTH]
delete_dryrun = (args.get(ARGSTR_DRYRUN) or not args.get(ARGSTR_DO_DELETE))
if args.get(ARGSTR_DEBUG):
LOGGER.setLevel(logging.DEBUG)
verifying_strips = (args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR) and args.get(ARGSTR_CHECK_SPECIAL) == ARGCHO_CHECK_SPECIAL_STRIPS)
if args.get(ARGSTR_SCHEDULER) is not None:
if args.get(ARGSTR_JOBSCRIPT) is None:
jobscript_default = os.path.join(JOBSCRIPT_DIR, 'head_{}.sh'.format(args.get(ARGSTR_SCHEDULER)))
if not os.path.isfile(jobscript_default):
arg_parser.error(
"Default jobscript ({}) does not exist, ".format(jobscript_default)
+ "please specify one with {} argument".format(ARGSTR_JOBSCRIPT))
else:
args.set(ARGSTR_JOBSCRIPT, jobscript_default)
print("argument {} set automatically to: {}".format(ARGSTR_JOBSCRIPT, args.get(ARGSTR_JOBSCRIPT)))
## Validate argument values.
argstr_mutexl_checkfile = [
ARGSTR_CHECKFILE,
ARGSTR_CHECKFILE_ROOT,
ARGSTR_CHECKFILE_ROOT_REGEX,
ARGSTR_CHECK_SPECIAL
]
argstr_incompat_sched = [ARGSTR_CHECKFILE, ARGSTR_CHECKFILE_ROOT]
if args.get(argstr_mutexl_checkfile).count(None) < (len(argstr_mutexl_checkfile)-1):
arg_parser.error("Only one of the following checkfile arguments may be provided: {}".format(argstr_mutexl_checkfile))
if args.get(ARGSTR_CHECK_SPECIAL) is not None:
check_special_option = args.get(ARGSTR_CHECK_SPECIAL)
for check_special_set_argstr, check_special_set_value in ARGCHOSET_CHECK_SPECIAL_SETTING_DICT[check_special_option]:
if args.provided(check_special_set_argstr):
continue
if check_special_option in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL and check_special_set_argstr == ARGSTR_SRC_SUFFIX:
check_special_set_value = '/'.join([
ARGCHOSET_CHECK_SPECIAL_DEMTYPE_SUFFIX_DICT[args.get(ARGSTR_CHECK_SPECIAL_DEMTYPE)],
check_special_set_value
])
args.set(check_special_set_argstr, check_special_set_value)
print("via provided argument {}={}, argument {} set automatically to: '{}'".format(
ARGSTR_CHECK_SPECIAL, args.get(ARGSTR_CHECK_SPECIAL),
check_special_set_argstr, args.get(check_special_set_argstr)))
if args.get(ARGSTR_INDEX_PAIRNAMES_TO_JSON):
if not args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
arg_parser.error("{} option can only be used in conjuction with {} option".format(
ARGSTR_INDEX_PAIRNAMES_TO_JSON, ARGSTR_VERIFY_BY_PAIRNAME_DIR
))
if args.get(ARGSTR_CHECK_SPECIAL) not in ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT:
arg_parser.error("{} option requires {} must be set to one of {}".format(
ARGSTR_INDEX_PAIRNAMES_TO_JSON, ARGSTR_CHECK_SPECIAL, sorted(ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT.keys())
))
if not os.path.isfile(INDEX_SETSM_SCRIPT):
arg_parser.error(
"{} option requires the 'pgcdemtools' repo to exist alongside this repo, "
"but SETSM indexing script does not exist: {}".format(
ARGSTR_INDEX_PAIRNAMES_TO_JSON, SCRIPT_DIR, INDEX_SETSM_SCRIPT)
)
for removal_argstr in ARGGRP_REQUIRES_RMWHERE:
if args.get(removal_argstr) and not try_removal:
arg_parser.error("{} option can only be used in conjunction with one of the following "
"removal arguments: {}".format(removal_argstr, ARGGRP_RMWHERE))
if args.get(ARGSTR_SCHEDULER) is not None and args.get(argstr_incompat_sched).count(None) < len(argstr_incompat_sched):
arg_parser.error("{} option is incompatible with the following arguments: {}".format(
ARGSTR_SCHEDULER, argstr_incompat_sched
))
if args.get(ARGSTR_TASKS_PER_JOB) is not None and not args.get(ARGSTR_SCHEDULER):
arg_parser.error("{} option requires {} option".format(ARGSTR_TASKS_PER_JOB, ARGSTR_SCHEDULER))
src_suffixes = [s.strip() for s in args.get(ARGSTR_SRC_SUFFIX).split('/')]
if ( endswith_one_of_coll(SETSM_META_SUFFIX, src_suffixes, case_sensitive=False)
and args.get(ARGSTR_CHECK_SPECIAL) not in ARGCHOGRP_CHECK_SPECIAL_SETSM):
arg_parser.error("argument {} suffix '{}' that could match SETSM meta suffix '{}' "
"may only be provided when argument {} is set to one of the following SETSM options: {}".format(
ARGSTR_SRC_SUFFIX, endswith_one_of_coll(SETSM_META_SUFFIX, src_suffixes, case_sensitive=False, return_match=True),
SETSM_META_SUFFIX, ARGSTR_CHECK_SPECIAL, ARGCHOGRP_CHECK_SPECIAL_SETSM
))
checkfile_root_regex = (re.compile(args.get(ARGSTR_CHECKFILE_ROOT_REGEX))
if args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None else None)
if args.get(ARGSTR_VERIFY_QUICK_CHECK):
## Do quick verification check and exit
print("\nDoing verification quick check...")
if not os.path.isdir(args.get(ARGSTR_SRC)):
arg_parser.error("{} must be a directory when {} option is provided".format(
ARGSTR_SRC, ARGSTR_VERIFY_QUICK_CHECK
))
srcdir = args.get(ARGSTR_SRC)
pairname_dir_list = []
for root, dnames, fnames in walk.walk(srcdir, maxdepth=verify_by_pairname_dir_depth):
for dn in dnames:
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn) is not None:
pairname_dir = os.path.join(root, dn)
pairname_dir_list.append(pairname_dir)
pairname_dir_num_total = len(pairname_dir_list)
if len(pairname_dir_list) == 0:
eprint("ERROR: No pairname directories were found with {} and {}={}".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR, ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, verify_by_pairname_dir_depth
))
sys.exit(1)
else:
print("Found {} pairname directories within {}".format(pairname_dir_num_total, srcdir))
print('')
pairname_dir_not_done_list = []
pairname_dir_empty_list = []
for pairname_dir in pairname_dir_list:
pairname_errfile = pairname_dir+errfile_ext
pnamedir_checkfile = pairname_dir+checkfile_ext
pnamedir_jsonfile = pairname_dir+'.json'
pnamedir_errfile_exists = os.path.isfile(pairname_errfile)
pnamedir_checkfile_exists = os.path.isfile(pnamedir_checkfile)
pnamedir_jsonfile_exists = os.path.isfile(pnamedir_jsonfile)
if pnamedir_errfile_exists or not (pnamedir_checkfile_exists and pnamedir_jsonfile_exists):
for _, _, srcfname_list in walk.walk(pairname_dir, maxdepth=1):
break
if len(srcfname_list) == 0:
print("WARNING: Pairname directory is empty: {}".format(pairname_dir))
pairname_dir_empty_list.append(pairname_dir)
if pnamedir_checkfile_exists or pnamedir_jsonfile_exists:
print("ERROR: Empty pairname directory has a checkfile or JSON file: {}".format(pairname_dir))
else:
continue
elif len(srcfname_list) == 1 and verifying_strips:
single_strip_fname = srcfname_list[0]
if single_strip_fname.endswith('.fin'):
if pnamedir_jsonfile_exists:
print("ERROR: Pairname directory with lone strip finfile has JSON file: {}".format(pnamedir_jsonfile))
elif not pnamedir_checkfile_exists:
continue
else:
with open(pnamedir_checkfile, 'r') as check_strips_fin_fp:
strip_finfname = check_strips_fin_fp.read().strip()
if strip_finfname == single_strip_fname:
continue
else:
print("ERROR: Solo strip finfile in pairname directory checkfile ({}) "
"does not match existing lone strip finfile ({}): {}".format(
strip_finfname, single_strip_fname, pnamedir_checkfile
))
print("Pairname directory containing {} files, where {}, has not passed verification: {}".format(
len(srcfname_list),
"(errfile {}, checkfile {}, JSON {})".format(
*['exists' if file_exists else 'DNE' for file_exists in [
pnamedir_errfile_exists,
pnamedir_checkfile_exists,
pnamedir_jsonfile_exists
]]
),
pairname_errfile if pnamedir_errfile_exists else pairname_dir
))
pairname_dir_not_done_list.append(pairname_dir)
print('')
if len(pairname_dir_not_done_list) == 0:
print("All pairname directories have passed verification!")
else:
print("{} pairname directories have not yet passed verification:\n {}".format(
len(pairname_dir_not_done_list), '\n '.join(pairname_dir_not_done_list)
))
if len(pairname_dir_empty_list) != 0:
print("{} pairname directories are empty:\n {}".format(
len(pairname_dir_empty_list), '\n '.join(pairname_dir_empty_list)
))
sys.exit(0)
## Scan source dir/file input to determine which source files should be checked.
checkffileroot_srcfnamechecklist_dict = None
srcffile_checklist = None
num_srcfiles = 0
num_checkgroups = None
srcfile_count = [None]
errfile_count = [None]
missing_suffix_flag = [False]
checkfile_removed_flag = [False]
print("-----")
if not args.get(ARGSTR_CHECKFILE_OFF):
print("Checkfile extension: {}".format(checkfile_ext))
print("Error file extension: {}".format(errfile_ext))
print("Accepted source file suffixes: {}".format(src_suffixes))
print("-----")
print("Any check group warnings would appear here:")
srcdir = None
if os.path.isdir(src):
srcdir = src
if ( args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None
and args.get(ARGSTR_CHECK_SPECIAL) != ARGCHO_CHECK_SPECIAL_ALL_SEPARATE):
checkffileroot_srcfnamechecklist_dict = dict()
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
pairname_dir_list = []
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, os.path.basename(srcdir)) is not None:
pairname_dir_list.append(srcdir)
else:
for root, dnames, fnames in walk.walk(srcdir, maxdepth=verify_by_pairname_dir_depth):
for dn in dnames:
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn) is not None:
pairname_dir = os.path.join(root, dn)
pairname_dir_list.append(pairname_dir)
if len(pairname_dir_list) == 0:
eprint("No pairname directories were found with {} and {}={}".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR, ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, verify_by_pairname_dir_depth
))
for pairname_dir in pairname_dir_list:
srcfname_list = []
for _, _, srcfname_list in walk.walk(pairname_dir, maxdepth=1):
break
if len(srcfname_list) == 1 and verifying_strips:
single_strip_fname = srcfname_list[0]
if single_strip_fname.endswith('.fin'):
strip_finfname = single_strip_fname
with open(pairname_dir+'.check', 'w') as check_strips_fin_fp:
check_strips_fin_fp.write(strip_finfname)
continue
for srcfname in srcfname_list:
if endswith_one_of_coll(srcfname, src_suffixes):
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for filename matching suffix criteria in source directory: {}".format(srcfname))
else:
if pairname_dir not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[pairname_dir] = []
checkffileroot_srcfnamechecklist_dict[pairname_dir].append(srcfname)
else:
for root, dnames, fnames in walk.walk(srcdir, maxdepth=search_depth):
for srcfname in fnames:
if endswith_one_of_coll(srcfname, src_suffixes):
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for filename matching suffix criteria in source directory: {}".format(srcfname))
else:
cf_root_name = match.group(1)
cf_root_full = os.path.join(root, cf_root_name)
if cf_root_full not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[cf_root_full] = []
checkffileroot_srcfnamechecklist_dict[cf_root_full].append(srcfname)
elif args.get(ARGSTR_CHECKFILE_ROOT) is not None:
checkffileroot_srcfnamechecklist_dict = dict()
cf_root_full = os.path.join(srcdir, args.get(ARGSTR_CHECKFILE_ROOT))
checkffileroot_srcfnamechecklist_dict[cf_root_full] = [
os.path.basename(f) for f in glob.glob(cf_root_full+'*') if endswith_one_of_coll(f, src_suffixes)]
else: # if argument --checkfile was provided or if each source raster is allotted a checkfile
srcffile_checklist = []
for root, dnames, fnames in walk.walk(srcdir, maxdepth=search_depth):
for srcfname in fnames:
if endswith_one_of_coll(srcfname, src_suffixes):
srcffile_checklist.append(os.path.join(root, srcfname))
missing_suffixes = [s for s in src_suffixes if not ends_one_of_coll(s, srcffile_checklist)]
if missing_suffixes:
warnings.warn("Source file suffixes were not found")
if warn_missing_suffix:
eprint("Source directory is missing the following file suffixes: {}".format(missing_suffixes))
missing_suffix_flag[0] = True
elif os.path.isfile(src):
if src.endswith('.txt') and not src.endswith((ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_META,
ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_INFO50CM)):
bundle_file = src
task_list = script_utils.read_task_bundle(bundle_file)
if args.get(ARGSTR_CHECK_SPECIAL) == ARGCHO_CHECK_SPECIAL_ALL_SEPARATE:
srcffile_checklist = task_list
if args.get(ARGSTR_CHECKFILE_ROOT) is not None:
srcffile_checklist = [srcffile for srcffile in srcffile_checklist if
os.path.basename(srcffile.startswith(ARGSTR_CHECKFILE_ROOT))]
elif args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None:
srcffile_checklist = [srcffile for srcffile in srcffile_checklist if
re.match(checkfile_root_regex, os.path.basename(srcffile)) is not None]
else:
argstr_incompat_srcfile_cfroots = [ARGSTR_CHECKFILE, ARGSTR_CHECKFILE_ROOT]
if args.get(argstr_incompat_srcfile_cfroots).count(None) < len(argstr_incompat_srcfile_cfroots):
arg_parser.error("argument {} text file containing checkfile roots is "
"incompatible with the following arguments: {}".format(
ARGSTR_SRC, argstr_incompat_srcfile_cfroots
))
checkffileroot_list = task_list
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
checkffileroot_srcfnamechecklist_dict = dict()
pairname_dir_list = []
if verify_by_pairname_dir_depth == 0:
for cff_root in checkffileroot_list:
if not os.path.isdir(cff_root):
warnings.warn("Path in source text file is not an existing directory ({})".format(ARGSTR_VERIFY_BY_PAIRNAME_DIR))
eprint("Path in source text file is not an existing directory: {}".format(cff_root))
elif not re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, os.path.basename(cff_root)) is not None:
warnings.warn("Directory name in source text file does not match pairname regex ({})".format(ARGSTR_VERIFY_BY_PAIRNAME_DIR))
eprint("Directory name in source text file does not match pairname regex: {}".format(cff_root))
else:
pairname_dir_list.append(cff_root)
else:
for cff_root in checkffileroot_list:
for root, dnames, fnames in walk.walk(cff_root, maxdepth=verify_by_pairname_dir_depth):
for dn in dnames:
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn) is not None:
pairname_dir = os.path.join(root, dn)
pairname_dir_list.append(pairname_dir)
if len(pairname_dir_list) == 0:
eprint("No pairname directories were found with {} and {}={}".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR, ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, verify_by_pairname_dir_depth
))
for pairname_dir in pairname_dir_list:
srcfname_list = []
for _, _, srcfname_list in walk.walk(pairname_dir, maxdepth=1):
break
if len(srcfname_list) == 1 and verifying_strips:
single_strip_file = srcfname_list[0]
if single_strip_file.endswith('.fin'):
strip_finfile = single_strip_file
with open(pairname_dir+'.check', 'w') as check_strips_fin_fp:
check_strips_fin_fp.write(strip_finfile)
continue
for srcfname in srcfname_list:
if endswith_one_of_coll(srcfname, src_suffixes):
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for filename matching suffix criteria in source directory: {}".format(srcfname))
else:
if pairname_dir not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[pairname_dir] = []
checkffileroot_srcfnamechecklist_dict[pairname_dir].append(srcfname)
else:
srcffiles = []
for cff_root in checkffileroot_list:
srcffiles.extend(glob.glob(cff_root+'*'))
if args.get(ARGSTR_CHECKFILE) is not None:
srcffile_checklist = srcffiles
elif args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None:
checkffileroot_srcfnamechecklist_dict = dict()
for srcffile in srcffiles:
if endswith_one_of_coll(srcffile, src_suffixes):
srcfdir, srcfname = os.path.split(srcffile)
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for file matching suffix criteria pulled from "
"source text file containing checkfile roots: {}".format(srcffile))
else:
cf_root_name = match.group(1)
cf_root_full = os.path.join(srcfdir, cf_root_name)
if cf_root_full not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[cf_root_full] = []
checkffileroot_srcfnamechecklist_dict[cf_root_full].append(srcfname)
else:
checkffileroot_srcfnamechecklist_dict = {cf_root_full: None for cf_root_full in checkffileroot_list}
# num_srcfiles = None
else:
argstr_incompat_srcfile = [ARGSTR_CHECKFILE_ROOT, ARGSTR_CHECKFILE_ROOT_REGEX, ARGSTR_CHECK_SPECIAL]
if args.get(argstr_incompat_srcfile).count(None) < len(argstr_incompat_srcfile):
arg_parser.error("argument {} source file is incompatible with the following arguments: {}".format(
ARGSTR_SRC, argstr_incompat_srcfile
))
srcffile_checklist = [src]
warn_missing_checked = False
warn_missing_suffix = False
else:
args.set(ARGSTR_CHECKFILE_ROOT, src)
srcdir = os.path.dirname(src)
print("via non-(directory/file) argument {}, argument {} set automatically to: '{}'".format(
ARGSTR_SRC, ARGSTR_CHECKFILE_ROOT, args.get(ARGSTR_CHECKFILE_ROOT)))
checkffileroot_srcfnamechecklist_dict = dict()
cf_root_full = args.get(ARGSTR_CHECKFILE_ROOT)
checkffileroot_srcfnamechecklist_dict[cf_root_full] = [
os.path.basename(f) for f in glob.glob(cf_root_full+'*') if endswith_one_of_coll(f, src_suffixes)]
num_srcfiles_to_check = None
num_checkgroups_to_check = None
num_srcfiles_to_run = None
num_checkgroups_to_run = None
num_srcfiles_err_exist = 0
num_srcfiles_err_skip = 0
num_checkgroups_err_exist = 0
num_checkgroups_err_skip = 0
num_srcfiles_suf_skip = 0
num_checkgroups_suf_miss = 0
num_checkgroups_suf_skip = 0
num_srcfiles_removed = 0
num_checkgroups_removed = 0
num_checkfiles_removed = 0
check_items = None
if checkffileroot_srcfnamechecklist_dict is not None:
num_checkgroups = len(checkffileroot_srcfnamechecklist_dict.keys())
return_incomplete_src_rasters = (args.get(ARGSTR_SCHEDULER) is None)
if return_incomplete_src_rasters:
num_srcfiles_to_check = 0
num_srcfiles_to_run = 0
num_checkgroups_to_check = 0
num_checkgroups_to_run = 0
for cff_root in checkffileroot_srcfnamechecklist_dict:
cff_root_src_rasters = checkffileroot_srcfnamechecklist_dict[cff_root]
checkgroup_errfile = cff_root+errfile_ext
srcfile_count[0] = None
errfile_count[0] = None
missing_suffix_flag[0] = False
checkfile_removed_flag[0] = False
checkffileroot_srcfnamechecklist_dict[cff_root] = checkfile_incomplete(args,
cff_root, checkfile_ext, errfile_ext, src_suffixes,
checkffileroot_srcfnamechecklist_dict[cff_root], return_incomplete_src_rasters,
srcfile_count, errfile_count,
missing_suffix_flag, checkfile_removed_flag,
warn_missing_suffix, warn_errfile_exists,
warn_missing_checked, warn_new_source
)
if checkfile_removed_flag[0]:
num_checkfiles_removed += 1
cff_root_src_rasters_to_check = checkffileroot_srcfnamechecklist_dict[cff_root]
if type(cff_root_src_rasters_to_check) is int and cff_root_src_rasters_to_check == -1:
checkffileroot_srcfnamechecklist_dict[cff_root] = None
num_checkgroups -= 1
num_checkgroups_removed += 1
num_srcfiles_removed += srcfile_count[0]
continue
elif srcfile_count[0] is not None:
num_srcfiles += srcfile_count[0]
if ( cff_root_src_rasters is not None
and ( errfile_count[0] is None
or (not retry_errors and args.get(ARGSTR_CHECKFILE_OFF) and type(cff_root_src_rasters_to_check) is list))):
cff_dir = os.path.join(os.path.dirname(cff_root))
if os.path.isfile(checkgroup_errfile):
srcfname_errlist = cff_root_src_rasters
else:
srcfname_errlist = [fn for fn in cff_root_src_rasters if os.path.isfile(os.path.join(cff_dir, fn+errfile_ext))]
errfile_count[0] = len(srcfname_errlist)
if errfile_count[0] is not None:
num_srcfiles_err_exist += errfile_count[0]
if cff_root_src_rasters_to_check:
num_checkgroups_to_check += 1
if type(cff_root_src_rasters_to_check) is list:
num_srcfiles_to_check_this_group = len(cff_root_src_rasters_to_check)
num_srcfiles_to_check += num_srcfiles_to_check_this_group
else:
num_srcfiles_to_check_this_group = None
if ( (not allow_missing_suffix and missing_suffix_flag[0])
or (not retry_errors and errfile_count[0])):
cff_root_src_rasters_to_check_backup = cff_root_src_rasters_to_check
if not retry_errors and errfile_count[0]:
if args.get(ARGSTR_CHECKFILE_OFF):
if type(cff_root_src_rasters_to_check) is list:
cff_root_src_rasters_to_check = list(set(cff_root_src_rasters_to_check).difference(set(srcfname_errlist)))
num_srcfiles_err_skip += (num_srcfiles_to_check_this_group - len(cff_root_src_rasters_to_check))
if len(cff_root_src_rasters_to_check) == 0:
if num_srcfiles_to_check_this_group > 0:
num_checkgroups_err_skip += 1
else:
if type(cff_root_src_rasters_to_check) is list:
cff_root_src_rasters_to_check = []
num_srcfiles_err_skip += num_srcfiles_to_check_this_group
num_checkgroups_err_exist += 1
if num_srcfiles_to_check_this_group > 0:
num_checkgroups_err_skip += 1
else:
num_checkgroups_err_exist += 1
if cff_root_src_rasters_to_check:
cff_root_src_rasters_to_check = False
num_checkgroups_err_skip += 1
checkffileroot_srcfnamechecklist_dict[cff_root] = cff_root_src_rasters_to_check
if not allow_missing_suffix and missing_suffix_flag[0]:
if type(cff_root_src_rasters_to_check_backup) is list:
cff_root_src_rasters_to_check = []
num_srcfiles_suf_skip += num_srcfiles_to_check_this_group
num_checkgroups_suf_miss += 1
if num_srcfiles_to_check_this_group > 0:
num_checkgroups_suf_skip += 1
else:
num_checkgroups_suf_miss += 1
if cff_root_src_rasters_to_check_backup:
cff_root_src_rasters_to_check = False
num_checkgroups_suf_skip += 1
checkffileroot_srcfnamechecklist_dict[cff_root] = cff_root_src_rasters_to_check
checkffileroot_srcfnamechecklist_dict = {
cff_root: f_list for cff_root, f_list in checkffileroot_srcfnamechecklist_dict.items() if f_list}
check_items = checkffileroot_srcfnamechecklist_dict
num_checkgroups_to_run = len(checkffileroot_srcfnamechecklist_dict.keys())
if num_checkgroups_to_run == 0:
num_srcfiles_to_run = 0
elif type(next(iter(checkffileroot_srcfnamechecklist_dict))) is list:
num_srcfiles_to_run = sum([len(file_list) for file_list in checkffileroot_srcfnamechecklist_dict.values()])
elif srcffile_checklist is not None:
num_srcfiles = len(srcffile_checklist)
srcffile_errlist = [f for f in srcffile_checklist if os.path.isfile(f+errfile_ext)]
num_srcfiles_err_exist = len(srcffile_errlist)
if args.get(ARGSTR_CHECKFILE_OFF):
num_srcfiles_to_check = len(srcffile_checklist)
else:
if args.get(ARGSTR_CHECKFILE):
num_checkgroups = 1
srcffile_checklist = checkfile_incomplete(args,
args.get(ARGSTR_CHECKFILE), None, errfile_ext, src_suffixes,
srcffile_checklist, True,
srcfile_count, errfile_count,
missing_suffix_flag, checkfile_removed_flag,
warn_missing_suffix, warn_errfile_exists,
warn_missing_checked, warn_new_source
)
else:
num_checkgroups = num_srcfiles
srcffile_checklist = [f for f in srcffile_checklist if not os.path.isfile(f+checkfile_ext)]
num_srcfiles_to_check = len(srcffile_checklist)
num_checkgroups_to_check = 1 if (args.get(ARGSTR_CHECKFILE) and num_srcfiles_to_check > 0) else num_srcfiles_to_check
if num_srcfiles_err_exist > 0 and errfile_count[0] is None:
warnings.warn("Error files were found among source files")
if warn_errfile_exists:
eprint("{} error files were found among source selection:".format(num_srcfiles_err_exist))
for fn in sorted(list(srcffile_errlist)):
eprint(fn+errfile_ext)
if not retry_errors and num_srcfiles_err_exist > 0:
if args.get(ARGSTR_CHECKFILE):
srcffile_checklist = []
num_srcfiles_err_skip = num_srcfiles_to_check
num_checkgroups_err_skip = num_checkgroups_to_check
else:
srcffile_checklist = list(set(srcffile_checklist).difference(set(srcffile_errlist)))
num_srcfiles_err_skip = num_srcfiles_to_check - len(srcffile_checklist)
num_checkgroups_err_skip = num_srcfiles_err_skip
if not allow_missing_suffix and missing_suffix_flag[0]:
srcffile_checklist = []
num_srcfiles_suf_skip = num_srcfiles_to_check
num_checkgroups_suf_skip = num_checkgroups_to_check
check_items = srcffile_checklist
num_srcfiles_to_run = len(check_items)
num_checkgroups_to_run = 1 if (args.get(ARGSTR_CHECKFILE) and num_srcfiles_to_run > 0) else num_srcfiles_to_run
else:
raise DeveloperError("Neither `checkffileroot_srcfnamechecklist_dict` "
"nor `srcffile_checklist` have been initialized")
num_errfiles_walk = 0
print("-----")
if not args.get(ARGSTR_CHECKFILE_OFF):
print("Checkfile extension: {}".format(checkfile_ext))
print("Error file extension: {}".format(errfile_ext))
print("Accepted source file suffixes: {}".format(src_suffixes))
if try_removal:
print("-----")
print("{} :: {}{}".format(
ARGSTR_REMOVE_TYPE, args.get(ARGSTR_REMOVE_TYPE),
" ({} and {})".format(ARGCHO_REMOVE_TYPE_CHECKFILES, ARGCHO_REMOVE_TYPE_SOURCEFILES)*(
args.get(ARGSTR_REMOVE_TYPE) == ARGCHO_REMOVE_TYPE_BOTH)))
if allow_remove_checkfiles:
print("Number of checkfiles removed: {}".format(num_checkfiles_removed))
if allow_remove_sourcefiles:
print("Number of check groups removed: {}".format(num_checkgroups_removed))
print("Total number of source files removed: {}".format(num_srcfiles_removed))
if delete_dryrun:
print("(dryrun; must turn on {} and turn off {} to do delete)".format(ARGSTR_DO_DELETE, ARGSTR_DRYRUN))
if args.get(ARGSTR_REMOVE_ONLY):
sys.exit(0)
print("-----")
if os.path.isdir(src):
for root, dnames, fnames in walk.walk(src, maxdepth=search_depth):
for srcfname in fnames:
if srcfname.endswith(errfile_ext):
num_errfiles_walk += 1
print("{} existing error files found within source directory".format(num_errfiles_walk))
print("{} existing error files found among source selection".format(num_srcfiles_err_exist))
if num_srcfiles is not None or num_srcfiles_to_check is not None:
print("Number of source files: {}{}{}{}{}".format(
num_srcfiles if num_srcfiles is not None else '',
', ' if (num_srcfiles is not None and num_srcfiles_to_check is not None) else '',
'{} to check'.format(num_srcfiles_to_check) if num_srcfiles_to_check is not None else '',
' ({} skipped due to missing suffix)'.format(num_srcfiles_suf_skip) if num_srcfiles_suf_skip else '',
' ({} skipped due to existing error file)'.format(num_srcfiles_err_skip) if num_srcfiles_err_skip else ''
))
if num_checkgroups is not None:
print("Number of check groups: {}{}{}, {} to check{}{}".format(
num_checkgroups,
' ({} with missing suffix)'.format(num_checkgroups_suf_miss) if num_checkgroups_suf_miss else '',
' ({} with existing error file)'.format(num_checkgroups_err_exist) if num_checkgroups_err_exist else '',
num_checkgroups_to_check,
' ({} skipped due to missing suffix)'.format(num_checkgroups_suf_skip) if num_checkgroups_suf_skip else '',
' ({} skipped due to existing error file)'.format(num_checkgroups_err_skip) if num_checkgroups_err_skip else ''
))
if args.get(ARGSTR_STATS_ONLY):
sys.exit(0)
print("--> Will run: {}{}{}".format(
'{} check groups'.format(num_checkgroups_to_run) if num_checkgroups_to_run is not None else '',
', ' if (num_srcfiles_to_run is not None and num_checkgroups_to_run is not None) else '',
'{} source files'.format(num_srcfiles_to_run) if num_srcfiles_to_run is not None else '',
))
if ( (checkffileroot_srcfnamechecklist_dict is not None and len(checkffileroot_srcfnamechecklist_dict) == 0)
or (srcffile_checklist is not None and len(srcffile_checklist) == 0)):
sys.exit(0)
# elif args.get(ARGSTR_DRYRUN) and args.get(ARGSTR_SCHEDULER) is not None:
# print("Exiting dryrun")
# sys.exit(0)
# Pause for user review.
print("-----")
wait_seconds = 5
print("Sleeping {} seconds before task submission".format(wait_seconds))
sleep(wait_seconds)
print("-----")
## Create output directories if they don't already exist.
if not args.get(ARGSTR_DRYRUN):
for dir_argstr, dir_path in list(zip(ARGGRP_OUTDIR, args.get_as_list(ARGGRP_OUTDIR))):
if dir_path is not None and not os.path.isdir(dir_path):
print("Creating argument {} directory: {}".format(dir_argstr, dir_path))
os.makedirs(dir_path)
if args.get(ARGSTR_CHECKFILE):
checkfile_dir = os.path.dirname(args.get(ARGSTR_CHECKFILE))
if not os.path.isdir(checkfile_dir):
print("Creating directory to contain output checkfile: {}".format(checkfile_dir))
os.makedirs(checkfile_dir)
## Check rasters.
if check_items is checkffileroot_srcfnamechecklist_dict:
check_items_sorted = sorted(checkffileroot_srcfnamechecklist_dict.keys())
elif check_items is srcffile_checklist:
check_items.sort()
check_items_sorted = check_items
if args.get(ARGSTR_SCHEDULER) is not None:
# Check rasters in batch.
tasks_per_job = args.get(ARGSTR_TASKS_PER_JOB)
check_units = (check_items_sorted if tasks_per_job is None else
script_utils.write_task_bundles(check_items_sorted, tasks_per_job,
args.get(ARGSTR_SCRATCH),
'{}_{}'.format(JOB_ABBREV, ARGSTR_SRC)))
jobnum_fmt = script_utils.get_jobnum_fmtstr(check_units)
last_job_email = args.get(ARGSTR_EMAIL)
args_batch = args
args_single = copy.deepcopy(args)
args_single.unset(ARGGRP_BATCH)
if args.get(ARGSTR_WD) is None and BATCH_ARGDEF_WD is not None:
args_single.set(ARGSTR_WD, BATCH_ARGDEF_WD)
print("argument {} set to default value for batch run with {} option: {}".format(
ARGSTR_WD, ARGSTR_SCHEDULER, args_single.get(ARGSTR_WD)
))
if check_items is srcffile_checklist:
args_single.set(ARGSTR_CHECK_SPECIAL, ARGCHO_CHECK_SPECIAL_ALL_SEPARATE)
if args.get(ARGSTR_CHECK_SPECIAL) is not None:
args_single.unset(ARGGRP_CHECK_REGULAR)
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
args_single.set(ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, 0)
job_name_prefix = args.get(ARGSTR_JOBNAME)
job_num = 0
num_jobs = len(check_units)
for unit in check_units:
job_num += 1
args_single.set(ARGSTR_SRC, unit)
if last_job_email and job_num == num_jobs:
args_single.set(ARGSTR_EMAIL, last_job_email)
cmd_single = args_single.get_cmd()
job_name = job_name_prefix+jobnum_fmt.format(job_num)
cmd = args_single.get_jobsubmit_cmd(
args_batch.get(ARGSTR_SCHEDULER),
jobscript=args_batch.get(ARGSTR_JOBSCRIPT),
jobname=job_name, time_hr=JOB_WALLTIME_HR, memory_gb=JOB_MEMORY_GB, email=args.get(ARGSTR_EMAIL),
envvars=[args_batch.get(ARGSTR_JOBSCRIPT), JOB_ABBREV, cmd_single, PYTHON_VERSION_ACCEPTED_MIN],
hold=True
)
if args_batch.get(ARGSTR_DRYRUN):
print(cmd)
else:
subprocess.call(cmd, shell=True, cwd=args_batch.get(ARGSTR_LOGDIR))
else:
error_trace = None
try:
# Check rasters in serial.
if check_items is checkffileroot_srcfnamechecklist_dict:
for i, cff_root in enumerate(check_items_sorted):
checkfile_dir = os.path.dirname(cff_root) if not os.path.isdir(cff_root) else cff_root
cf_rasterffile_list = [os.path.join(checkfile_dir, rasterfname) for rasterfname in
checkffileroot_srcfnamechecklist_dict[cff_root]]
cf_rasterffile_list.sort()
checkfile = cff_root+checkfile_ext
print("Check group ({}/{}), {} files to check: {}*".format(
i+1, num_checkgroups_to_check, len(cf_rasterffile_list), cff_root))
if not args.get(ARGSTR_DRYRUN):
check_rasters(cf_rasterffile_list, checkfile, args)
elif check_items is srcffile_checklist:
for i, src_rasterffile in enumerate(check_items_sorted):
checkfile = src_rasterffile+checkfile_ext
print("Check source file ({}/{}): {}".format(i+1, num_srcfiles_to_check, src_rasterffile))
if not args.get(ARGSTR_DRYRUN):
check_rasters(src_rasterffile, checkfile, args)
except KeyboardInterrupt:
raise
except Exception as e:
with script_utils.capture_stdout_stderr() as out:
traceback.print_exc()
caught_out, caught_err = out
error_trace = caught_err
eprint(error_trace)
if e.__class__ is ImportError:
print("\nFailed to import necessary module(s)")
print("If running on a Linux system where the jobscripts/init.sh file has been properly"
" set up, try running the following command to activate a working environment"
" in your current shell session:\n{}".format("source {} {}".format(JOBSCRIPT_INIT, JOB_ABBREV)))
print('')
if type(args.get(ARGSTR_EMAIL)) is str:
# Send email notification of script completion.
email_body = SCRIPT_RUNCMD
if error_trace is not None:
email_status = "ERROR"
email_body += "\n{}\n".format(error_trace)
else:
email_status = "COMPLETE"
email_subj = "{} - {}".format(email_status, SCRIPT_FNAME)
script_utils.send_email(args.get(ARGSTR_EMAIL), email_subj, email_body)
if error_trace is not None:
sys.exit(1)
def check_rasters(raster_ffiles, checkfile, args):
import numpy as np
from osgeo import gdal
gdal.UseExceptions()
if args.get(ARGSTR_CHECKFILE) is not None:
checkfile = args.get(ARGSTR_CHECKFILE)
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
checkgroup_errfile = checkfile.replace(args.get(ARGSTR_CHECKFILE_EXT), args.get(ARGSTR_ERRFILE_EXT))
if checkgroup_errfile == checkfile:
checkgroup_errfile = None
elif os.path.isfile(checkgroup_errfile):
LOGGER.info("Removing existing check group error file: {}".format(checkgroup_errfile))
try:
os.remove(checkgroup_errfile)
except:
traceback.print_exc()
else:
checkgroup_errfile = None
checkfile_write = (not args.get(ARGSTR_CHECKFILE_OFF))
checkfile_write_at_end = args.get(ARGSTR_CHECKFILE_WRITE_AT_END)
checkfile_exists = os.path.isfile(checkfile)
if checkfile_exists:
LOGGER.info("Checkfile already exists: {}".format(checkfile))
raster_ffile_list_pass = []
file_check_failure_count = 0
raster_ffile_list = raster_ffiles
checkfile_group_fp = None
if type(raster_ffiles) is not list:
# Input is a single source file to check.
raster_ffile_list = [raster_ffiles]
else:
# Input is a list of source files in a single check group.
raster_ffile_list = raster_ffiles
if checkfile_write:
if checkfile_exists:
with open(checkfile, 'r') as checkfile_group_fp:
rasters_checked = checkfile_group_fp.read().splitlines()
raster_ffile_list_pass.extend(rasters_checked)
rasters_checked = set(rasters_checked)
rasters_to_check = set([os.path.basename(f) for f in raster_ffile_list])
rasters_already_checked = rasters_checked.intersection(rasters_to_check)
if len(rasters_already_checked) > 0:
raise DeveloperError("The following source files have already been checked: {}".format(
rasters_already_checked))
if not checkfile_write_at_end:
LOGGER.info("Opening group checkfile in append mode: {}".format(checkfile))
checkfile_group_fp = open(checkfile, 'a')
# Check each input source file.
for raster_ffile in raster_ffile_list:
raster_ffile_err = raster_ffile+args.get(ARGSTR_ERRFILE_EXT)
if os.path.isfile(raster_ffile_err):
LOGGER.info("Removing existing error file: {}".format(raster_ffile_err))
try:
os.remove(raster_ffile_err)
except:
traceback.print_exc()
errmsg_list = []
if not os.path.isfile(raster_ffile):
errmsg_print_and_list(errmsg_list,
"Source file to check does not exist: {}".format(raster_ffile))
else:
if raster_ffile.endswith(SETSM_META_SUFFIX) or raster_ffile.lower().endswith(SETSM_META_SUFFIX.lower()):
meta_ffile = raster_ffile
if args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_SCENELEVEL:
LOGGER.debug("Checking SETSM scene metadata file: {}".format(meta_ffile))
try:
with open(meta_ffile, 'r') as scenemeta_fp:
meta_errmsg_list = check_setsm_meta(scenemeta_fp)
errmsg_list = meta_errmsg_list
except RuntimeError as e:
errmsg_print_and_list(errmsg_list,
"Text file read error: {}".format(e))
elif args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_STRIPLEVEL:
LOGGER.debug("Checking SETSM strip metadata file: {}".format(meta_ffile))
try:
with open(meta_ffile, 'r') as stripmeta_fp:
in_scenemeta_section = False
current_scenemeta_name = None
scenemeta_txt = ''
for line in stripmeta_fp:
if not in_scenemeta_section:
if line.strip() == SETSM_STRIPMETA_SCENEMETA_SECTION_HEADER:
in_scenemeta_section = True
elif re.match(SETSM_STRIPMETA_SCENEMETA_ITEM_HEADER_REGEX, line) is not None:
if current_scenemeta_name is not None:
meta_errmsg_list = check_setsm_meta(StringIO(scenemeta_txt))
errmsg_list.extend(["{}: {}".format(current_scenemeta_name, err) for err in meta_errmsg_list])
scenemeta_txt = ''
current_scenemeta_name = line.strip()
elif current_scenemeta_name is not None:
scenemeta_txt += line
if current_scenemeta_name is not None:
meta_errmsg_list = check_setsm_meta(StringIO(scenemeta_txt))
errmsg_list.extend(["{}: {}".format(current_scenemeta_name, err) for err in meta_errmsg_list])
except RuntimeError as e:
errmsg_print_and_list(errmsg_list,
"Text file read error: {}".format(e))
else:
errmsg_print_and_list(errmsg_list, ' '.join([
"SETSM metadata text file (matching suffix '{}') could not be checked".format(SETSM_META_SUFFIX),
"because script argument {} is not one of the following SETSM options: {}".format(
ARGSTR_CHECK_SPECIAL, ARGCHOGRP_CHECK_SPECIAL_SETSM)
]))
elif raster_ffile.endswith(ARGCHOSET_CHECK_SPECIAL_DEM_SUFFIX_INFO50CM):
info50cm_ffile = raster_ffile
LOGGER.debug("Checking info50cm.txt file: {}".format(info50cm_ffile))
try:
with open(info50cm_ffile, 'r') as info50cm_fp:
info50cm_text = info50cm_fp.read()
if re.match(INFO50CM_RE, info50cm_text) is None:
errmsg_print_and_list(errmsg_list,
"info50cm file contents do not match expected pattern:\n{}".format(INFO50CM_RE.pattern))
except RuntimeError as e:
errmsg_print_and_list(errmsg_list,
"Text file read error: {}".format(e))
elif endswith_one_of_coll(raster_ffile, GDAL_RASTER_SUFFIXES, case_sensitive=False):
working_on_copy = False
raster_ffile_wd = None
try:
if args.get(ARGSTR_WD) is not None:
raster_ffile_wd = os.path.join(args.get(ARGSTR_WD), os.path.basename(raster_ffile))
LOGGER.debug("Copying source raster to working directory: {} -> {}".format(raster_ffile, raster_ffile_wd))
try:
shutil.copy2(raster_ffile, raster_ffile_wd)
raster_ffile = raster_ffile_wd
working_on_copy = True
except shutil.SameFileError as e:
raster_ffile_wd = None
LOGGER.debug(e)
LOGGER.debug("Checking raster: {}".format(raster_ffile))
setsm_suffix = None
if args.get(ARGSTR_CHECK_SETSM_VALIDRANGE):
for suffix in SETSM_RASTER_SUFFIX_VALIDRANGE_DICT:
if raster_ffile.endswith(suffix):
setsm_suffix = suffix
break
try:
ds = gdal.Open(raster_ffile, gdal.GA_ReadOnly)
except RuntimeError as e:
errmsg_print_and_list(errmsg_list,
"Raster file read error: {}".format(e))
raise RasterFileReadError()
num_bands = ds.RasterCount
LOGGER.debug("{} bands{}".format(
num_bands, ', check SETSM suffix: {}'.format(setsm_suffix) if setsm_suffix is not None else ''))
if setsm_suffix is not None and num_bands > 1:
errmsg_print_and_list(errmsg_list, ' '.join([
"SETSM raster has {} bands, more than expected (1 band).".format(num_bands),
"All bands will be checked for valid SETSM data range."
]))
for band_index in range(num_bands):
band_num = band_index + 1
band = ds.GetRasterBand(band_num)
LOGGER.debug("Processing Band {}".format(band_num))
if args.get(ARGSTR_CHECK_METHOD) == ARGCHO_CHECK_METHOD_CHECKSUM:
try:
LOGGER.debug("Doing checksum")
checksum = band.Checksum()
LOGGER.debug("Checksum succeeded: {}".format(checksum))
except RuntimeError as e:
errmsg_print_and_list(errmsg_list,
"Band {} checksum error: {}".format(band_num, e))
if args.get(ARGSTR_CHECK_METHOD) == ARGCHO_CHECK_METHOD_READ or setsm_suffix is not None:
try:
LOGGER.debug("Reading band data array")
data_array = band.ReadAsArray()
LOGGER.debug("Data read succeeded")
except RuntimeError as e:
errmsg_print_and_list(errmsg_list,
"Band {} data read error: {}".format(band_num, e))
LOGGER.debug("Continuing to next band")
continue
if setsm_suffix is not None:
valid_range = SETSM_RASTER_SUFFIX_VALIDRANGE_DICT[setsm_suffix]
nodata_val = band.GetNoDataValue()
LOGGER.debug("Checking SETSM suffix '{}' valid range: {} (NoData value: {})".format(
setsm_suffix, valid_range, nodata_val))
valid_min, valid_max = valid_range
data_array_invalid = np.logical_or(data_array < valid_min, data_array > valid_max)
if nodata_val is not None:
data_array_nodata = (np.isnan(data_array) if np.isnan(nodata_val)
else (data_array == nodata_val))
data_array_invalid[data_array_nodata] = False
if not np.any(data_array_invalid):
LOGGER.debug("SETSM check succeeded")
else:
errmsg_print_and_list(errmsg_list,
"Band {} failed SETSM suffix '{}' valid range check of {}".format(
band_num, setsm_suffix, valid_range))
shape = (ds.RasterYSize, ds.RasterXSize)
geo_trans = ds.GetGeoTransform()
grid_x = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
grid_y = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
invalid_image_coords = [(i, j) for i, j in np.argwhere(data_array_invalid)]
invalid_geo_coords = [(grid_x[j], grid_y[i]) for i, j in invalid_image_coords]
invalid_values = [v for v in data_array[np.where(data_array_invalid)]]
errmsg_setsm_details_list = [
"Invalid (i, j) image coordinates: {}".format(invalid_image_coords),
"Invalid (x, y) georeferenced coordinates: {}".format(invalid_geo_coords),
"Invalid values: {}".format(invalid_values)
]
for line in errmsg_setsm_details_list:
LOGGER.error(line)
errmsg_list.extend(errmsg_setsm_details_list)
except RasterFileReadError:
pass
except:
raise
finally:
if args.get(ARGSTR_WD) is not None and working_on_copy and raster_ffile_wd is not None:
LOGGER.debug("Removing working copy of source raster: {}".format(raster_ffile_wd))
os.remove(raster_ffile_wd)
else:
# File to check is neither a raster nor a SETSM metadata file.
# As long as the file exists, it passes.
pass
if len(errmsg_list) > 0:
file_check_failure_count += 1
LOGGER.error("Source file failed check(s): {}".format(raster_ffile))
if checkgroup_errfile is not None:
LOGGER.debug("Appending to check group error file: {}".format(checkgroup_errfile))
with open(checkgroup_errfile, 'a') as raster_ffile_err_fp:
raster_ffile_err_fp.write("--- {} ---\n".format(raster_ffile))
for line in errmsg_list:
raster_ffile_err_fp.write(line+'\n')
else:
LOGGER.info("Writing{} error file: {}".format(
' over existing' if os.path.isfile(raster_ffile_err) else '', raster_ffile_err))
with open(raster_ffile_err, 'w') as raster_ffile_err_fp:
for line in errmsg_list:
raster_ffile_err_fp.write(line+'\n')
if checkfile_write and not args.get(ARGSTR_KEEP_CHECKFILE_WITH_ERRORS):
if checkfile_group_fp is not None:
checkfile_group_fp.close()
if os.path.isfile(checkfile):
LOGGER.info("Removing checkfile after encountering source file errors: {}".format(checkfile))
os.remove(checkfile)
if checkfile_write:
LOGGER.info("No longer writing to checkfile after encountering source file errors: {}".format(checkfile))
LOGGER.info("To continue writing to checkfile despite encountering source file errors, "
"pass the {} script argument".format(ARGSTR_KEEP_CHECKFILE_WITH_ERRORS))
checkfile_write = False
else:
LOGGER.debug("Source file passed check(s)")
raster_ffile_list_pass.append(raster_ffile)
if checkfile_write and not checkfile_write_at_end:
if checkfile_group_fp is None:
LOGGER.debug("Writing single checkfile: {}".format(checkfile))
with open(checkfile, 'w'):
pass
else:
LOGGER.debug("Adding filename to group checkfile list: {}".format(checkfile))
checkfile_group_fp.write(os.path.basename(raster_ffile)+'\n')
if checkfile_group_fp is not None:
checkfile_group_fp.close()
LOGGER.info("{} of {} source files passed checks{}".format(
len(raster_ffile_list_pass), len(raster_ffile_list),
", {} source files failed checks".format(file_check_failure_count) if file_check_failure_count > 0 else ''
))
if checkfile_write and checkfile_write_at_end:
if args.get(ARGSTR_INDEX_PAIRNAMES_TO_JSON):
pairname_dir = checkfile.replace(args.get(ARGSTR_CHECKFILE_EXT), '')
pairname_rootdir = os.path.dirname(pairname_dir)
if not os.path.isdir(pairname_dir):
errmsg_list = []
errmsg_print_and_list(errmsg_list,
"Pairname directory does not exist in expected location: {}".format(pairname_dir)
)
errmsg_print_and_list(errmsg_list,
"Cannot generate JSON index file for pairname directory as requesed by {} option".format(ARGSTR_INDEX_PAIRNAMES_TO_JSON)
)
if checkgroup_errfile is not None:
LOGGER.debug("Appending to check group error file: {}".format(checkgroup_errfile))
with open(checkgroup_errfile, 'a') as raster_ffile_err_fp:
for line in errmsg_list:
raster_ffile_err_fp.write(line+'\n')
else:
index_mode = ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT[args.get(ARGSTR_CHECK_SPECIAL)]
index_setsm_cmd = """ python {} {} {} --mode {} --write-json --overwrite --skip-region-lookup --np """.format(
INDEX_SETSM_SCRIPT, pairname_dir, pairname_rootdir, index_mode
)
LOGGER.info("Running command to create JSON index file for pairname dir: {}".format(index_setsm_cmd))
index_setsm_rc = subprocess.call(index_setsm_cmd, shell=True)
if index_setsm_rc != 0:
LOGGER.error("Index script returned non-zero exit status ({}); will not write checkfile".format(index_setsm_rc))
checkfile_write = False
if checkfile_write:
LOGGER.info("Writing group checkfile: {}".format(checkfile))
with open(checkfile, 'w') as checkfile_group_fp:
for raster_ffile in raster_ffile_list_pass:
checkfile_group_fp.write(os.path.basename(raster_ffile)+'\n')
if checkgroup_errfile is not None and os.path.isfile(checkgroup_errfile):
LOGGER.info("Check group error file exists: {}".format(checkgroup_errfile))
def check_setsm_meta(meta_fp):
errmsg_list = []
meta_txt_buf = meta_fp.read()
meta_fp.close()
image1_satID = None
image2_satID = None
image1_wv_correct_value = None
image2_wv_correct_value = None
for meta_key in SETSM_META_REQUIRED_KEY_SORTED_LIST:
item_regex, item_is_key_value, item_req_count = SETSM_META_REQUIRED_DICT[meta_key]
search_message = "Searching metadata text for item '{}' (item regex = {})".format(meta_key, repr(item_regex.pattern))
LOGGER.debug(search_message)
errmsg_list_this_key = []
item_matches_stripped = [item.strip() for item in re.findall(item_regex, meta_txt_buf)]
num_matches = len(item_matches_stripped)
match_results = "Item '{}'; {} of {} instances found: {}".format(
meta_key, num_matches, item_req_count, item_matches_stripped)
LOGGER.debug(match_results)
if num_matches != item_req_count:
errmsg_print_and_list(errmsg_list_this_key, match_results)
if not item_is_key_value:
if len(set([item.lower() for item in item_matches_stripped])) < len(item_matches_stripped):
errmsg_print_and_list(errmsg_list_this_key,
"Item '{}'; duplicate items found: {}""".format(meta_key, item_matches_stripped))
else:
item_matches_parts = [[s.strip() for s in item.split('=')] for item in item_matches_stripped]
split_issue = False
for item_matches_index, item_parts in enumerate(item_matches_parts):
if len(item_parts) != 2:
split_issue = True
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; splitting item string by '=' character did not result in two parts: {}".format(
meta_key, item_matches_stripped[item_matches_index]))
if not split_issue:
item_keys_norm = [' '.join(item_parts[0].lower().replace('_', ' ').split()) for item_parts in item_matches_parts]
item_values = [item_parts[1] for item_parts in item_matches_parts]
item_keys_contains_image_prefix_count = [
key.startswith(SETSM_META_KEY_PREFIX_IMAGE) for key in item_keys_norm
].count(True)
if 0 < item_keys_contains_image_prefix_count < len(item_keys_norm):
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; item matches are inconsistent "
"in starting with Image 1/2 prefix: {}".format(meta_key, item_matches_stripped))
elif item_keys_contains_image_prefix_count > 0:
if len(set(item_keys_norm)) < len(item_keys_norm):
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; duplicate keys found: {}".format(meta_key, item_matches_stripped))
if meta_key == SETSM_META_KEY_IMAGE_PATH:
for item_matches_index in range(len(item_matches_stripped)):
satID = os.path.basename(item_values[item_matches_index])[0:4].upper()
if item_keys_norm[item_matches_index] == SETSM_META_KEY_PREFIX_IMAGE_1:
if image1_satID is not None:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; two {} keys found: {}".format(
meta_key, SETSM_META_KEY_PREFIX_IMAGE_1, item_matches_stripped))
break
image1_satID = satID
elif item_keys_norm[item_matches_index] == SETSM_META_KEY_PREFIX_IMAGE_2:
if image2_satID is not None:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; two {} keys found: {}".format(
meta_key, SETSM_META_KEY_PREFIX_IMAGE_2, item_matches_stripped))
break
image2_satID = satID
if image1_satID is None or image2_satID is None:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; could not parse satID for {}{}{}: {}".format(
meta_key,
SETSM_META_KEY_PREFIX_IMAGE_1 if image1_satID is None else '',
' or ' if image1_satID is None and image2_satID is None else '',
SETSM_META_KEY_PREFIX_IMAGE_2 if image2_satID is None else '',
item_matches_stripped))
elif meta_key in SETSM_META_KEYGRP_GSD:
for item_matches_index, value in enumerate(item_values):
if float(value) >= 1.5:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; value {} >= 1.5: {}".format(
meta_key, value, item_matches_stripped[item_matches_index]))
elif meta_key == SETSM_META_KEY_WV_CORRECT:
for item_matches_index in range(len(item_matches_stripped)):
wv_correct = int(item_values[item_matches_index])
if item_keys_norm[item_matches_index].startswith(SETSM_META_KEY_PREFIX_IMAGE_1):
if image1_wv_correct_value is not None:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; two {} keys found: {}".format(
meta_key, SETSM_META_KEY_PREFIX_IMAGE_1, item_matches_stripped))
break
image1_wv_correct_value = wv_correct
elif item_keys_norm[item_matches_index].startswith(SETSM_META_KEY_PREFIX_IMAGE_2):
if image2_wv_correct_value is not None:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; two {} keys found: {}".format(
meta_key, SETSM_META_KEY_PREFIX_IMAGE_2, item_matches_stripped))
break
image2_wv_correct_value = wv_correct
if image1_wv_correct_value is None or image2_wv_correct_value is None:
errmsg_print_and_list(errmsg_list_this_key,
"Key/value item '{}'; could not parse wv_correct value for {}{}{}: {}".format(
meta_key,
SETSM_META_KEY_PREFIX_IMAGE_1 if image1_wv_correct_value is None else '',
' or ' if image1_wv_correct_value is None and image2_wv_correct_value is None else '',
SETSM_META_KEY_PREFIX_IMAGE_2 if image2_wv_correct_value is None else '',
item_matches_stripped))
if len(errmsg_list_this_key) > 0:
errmsg_list_this_key.insert(0, search_message)
errmsg_list.extend(errmsg_list_this_key)
if image1_satID in SETSM_META_WV_CORRECT_SATIDS and image1_wv_correct_value != 1:
errmsg_print_and_list(errmsg_list, "Image 1 with satID '{}' requires wv_correct application, but {}{}".format(image1_satID,
'Image 1 {} meta key was not found'.format(SETSM_META_KEY_WV_CORRECT) if image1_wv_correct_value is None else '',
'Image 1 {} flag value is {}'.format(SETSM_META_KEY_WV_CORRECT, image1_wv_correct_value) if image1_wv_correct_value is not None else ''))
if image2_satID in SETSM_META_WV_CORRECT_SATIDS and image2_wv_correct_value != 1:
errmsg_print_and_list(errmsg_list, "Image 2 with satID '{}' requires wv_correct application, but {}{}".format(image2_satID,
'Image 2 {} meta key was not found'.format(SETSM_META_KEY_WV_CORRECT) if image2_wv_correct_value is None else '',
'Image 2 {} flag value is {}'.format(SETSM_META_KEY_WV_CORRECT, image2_wv_correct_value) if image2_wv_correct_value is not None else ''))
return errmsg_list
def errmsg_print_and_list(errmsg_list, errmsg):
LOGGER.error(errmsg)
errmsg_list.append(errmsg)
if __name__ == '__main__':
main()
| [
"osgeo.gdal.Open",
"lib.walk.walk",
"re.compile",
"lib.script_utils.eprint",
"lib.script_utils.LOGGER.setLevel",
"time.sleep",
"lib.script_utils.LOGGER.debug",
"lib.script_utils.VersionString",
"sys.exit",
"copy.deepcopy",
"lib.script_utils.ArgumentPasser",
"numpy.arange",
"lib.script_utils.... | [((1014, 1040), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1030, 1040), False, 'import os\n'), ((1056, 1085), 'os.path.basename', 'os.path.basename', (['SCRIPT_FILE'], {}), '(SCRIPT_FILE)\n', (1072, 1085), False, 'import os\n'), ((1112, 1142), 'os.path.splitext', 'os.path.splitext', (['SCRIPT_FNAME'], {}), '(SCRIPT_FNAME)\n', (1128, 1142), False, 'import os\n'), ((1156, 1184), 'os.path.dirname', 'os.path.dirname', (['SCRIPT_FILE'], {}), '(SCRIPT_FILE)\n', (1171, 1184), False, 'import os\n'), ((1262, 1283), 'os.getenv', 'os.getenv', (['"""HOSTNAME"""'], {}), "('HOSTNAME')\n", (1271, 1283), False, 'import os\n'), ((1471, 1500), 'lib.script_utils.LOGGER.setLevel', 'LOGGER.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (1486, 1500), False, 'from lib.script_utils import LOGGER, eprint\n'), ((8442, 8623), 're.compile', 're.compile', (['"""^([A-Z0-9]{4}_\\\\d{8}_[0-9A-F]{16}_[0-9A-F]{16}_(R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_(R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_\\\\d{1}(-\\\\d{2})?)_[a-z0-9_]+\\\\.\\\\w+$"""'], {}), "(\n '^([A-Z0-9]{4}_\\\\d{8}_[0-9A-F]{16}_[0-9A-F]{16}_(R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_(R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_\\\\d{1}(-\\\\d{2})?)_[a-z0-9_]+\\\\.\\\\w+$'\n )\n", (8452, 8623), False, 'import re\n'), ((8648, 8712), 're.compile', 're.compile', (['"""^([A-Z0-9]{4}_\\\\d{8}_[0-9A-F]{16}_[0-9A-F]{16}).*$"""'], {}), "('^([A-Z0-9]{4}_\\\\d{8}_[0-9A-F]{16}_[0-9A-F]{16}).*$')\n", (8658, 8712), False, 'import re\n'), ((8761, 8874), 're.compile', 're.compile', (['"""^([A-Z0-9]{4}_\\\\d{8}_[0-9A-F]{16}_[0-9A-F]{16}_\\\\d+c?m(_lsf)?_seg\\\\d+)_[a-z0-9_]+\\\\.\\\\w+$"""'], {}), "(\n '^([A-Z0-9]{4}_\\\\d{8}_[0-9A-F]{16}_[0-9A-F]{16}_\\\\d+c?m(_lsf)?_seg\\\\d+)_[a-z0-9_]+\\\\.\\\\w+$'\n )\n", (8771, 8874), False, 'import re\n'), ((11472, 11510), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""jobscripts"""'], {}), "(SCRIPT_DIR, 'jobscripts')\n", (11484, 11510), False, 'import os\n'), ((11528, 11566), 'os.path.join', 'os.path.join', (['JOBSCRIPT_DIR', '"""init.sh"""'], {}), "(JOBSCRIPT_DIR, 'init.sh')\n", (11540, 11566), False, 'import os\n'), ((11781, 11844), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '""".."""', '"""pgcdemtools"""', '"""index_setsm.py"""'], {}), "(SCRIPT_DIR, '..', 'pgcdemtools', 'index_setsm.py')\n", (11793, 11844), False, 'import os\n'), ((12203, 12241), 're.compile', 're.compile', (['"""^\\\\s*scene \\\\d+ name=.*$"""'], {}), "('^\\\\s*scene \\\\d+ name=.*$')\n", (12213, 12241), False, 'import re\n'), ((15958, 16451), 're.compile', 're.compile', (['"""scenedemid=[A-Z][A-Z0-9]{2}\\\\d{1}_\\\\d{8}_[A-Z0-9]{16}_[A-Z0-9]{16}_(?:R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_(?:R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_0(?:-\\\\d{2})?\nstripdemid=[A-Z][A-Z0-9]{2}\\\\d{1}_\\\\d{8}_[A-Z0-9]{16}_[A-Z0-9]{16}_50cm_v\\\\d{6}\nfilesz_dem=(\\\\d+\\\\.\\\\d+([eE][-\\\\+]?\\\\d+)?)\nfilesz_lsf=((?:\\\\d+\\\\.\\\\d+)?([eE][-\\\\+]?\\\\d+)?)\nfilesz_mt=(\\\\d+\\\\.\\\\d+([eE][-\\\\+]?\\\\d+)?)\nfilesz_or=(\\\\d+\\\\.\\\\d+([eE][-\\\\+]?\\\\d+)?)\nfilesz_or2=((?:\\\\d+\\\\.\\\\d+)?([eE][-\\\\+]?\\\\d+)?)\n\\\\Z"""'], {}), '(\n """scenedemid=[A-Z][A-Z0-9]{2}\\\\d{1}_\\\\d{8}_[A-Z0-9]{16}_[A-Z0-9]{16}_(?:R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_(?:R\\\\d+C\\\\d+-)?\\\\d{12}_\\\\d{2}_P\\\\d{3}_0(?:-\\\\d{2})?\nstripdemid=[A-Z][A-Z0-9]{2}\\\\d{1}_\\\\d{8}_[A-Z0-9]{16}_[A-Z0-9]{16}_50cm_v\\\\d{6}\nfilesz_dem=(\\\\d+\\\\.\\\\d+([eE][-\\\\+]?\\\\d+)?)\nfilesz_lsf=((?:\\\\d+\\\\.\\\\d+)?([eE][-\\\\+]?\\\\d+)?)\nfilesz_mt=(\\\\d+\\\\.\\\\d+([eE][-\\\\+]?\\\\d+)?)\nfilesz_or=(\\\\d+\\\\.\\\\d+([eE][-\\\\+]?\\\\d+)?)\nfilesz_or2=((?:\\\\d+\\\\.\\\\d+)?([eE][-\\\\+]?\\\\d+)?)\n\\\\Z"""\n )\n', (15968, 16451), False, 'import re\n'), ((255, 310), 'lib.script_utils.VersionString', 'script_utils.VersionString', (['PYTHON_VERSION_ACCEPTED_MIN'], {}), '(PYTHON_VERSION_ACCEPTED_MIN)\n', (281, 310), False, 'from lib import script_utils\n'), ((685, 714), 'lib.script_utils.VersionString', 'script_utils.VersionString', (['(3)'], {}), '(3)\n', (711, 714), False, 'from lib import script_utils\n'), ((11352, 11375), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (11370, 11375), False, 'import os\n'), ((13462, 13487), 're.compile', 're.compile', (['item_re', 're.I'], {}), '(item_re, re.I)\n', (13472, 13487), False, 'import re\n'), ((34086, 34111), 'os.path.isfile', 'os.path.isfile', (['checkfile'], {}), '(checkfile)\n', (34100, 34111), False, 'import os\n'), ((34231, 34258), 'os.path.basename', 'os.path.basename', (['checkfile'], {}), '(checkfile)\n', (34247, 34258), False, 'import os\n'), ((57201, 57219), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (57214, 57219), False, 'import os\n'), ((80146, 80164), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (80159, 80164), False, 'import os\n'), ((82760, 82779), 'time.sleep', 'sleep', (['wait_seconds'], {}), '(wait_seconds)\n', (82765, 82779), False, 'from time import sleep\n'), ((88940, 88960), 'osgeo.gdal.UseExceptions', 'gdal.UseExceptions', ([], {}), '()\n', (88958, 88960), False, 'from osgeo import gdal\n'), ((89760, 89785), 'os.path.isfile', 'os.path.isfile', (['checkfile'], {}), '(checkfile)\n', (89774, 89785), False, 'import os\n'), ((115906, 115926), 'lib.script_utils.LOGGER.error', 'LOGGER.error', (['errmsg'], {}), '(errmsg)\n', (115918, 115926), False, 'from lib.script_utils import LOGGER, eprint\n'), ((33979, 34005), 'os.path.dirname', 'os.path.dirname', (['checkfile'], {}), '(checkfile)\n', (33994, 34005), False, 'import os\n'), ((45370, 45444), 'lib.script_utils.ArgumentPasser', 'script_utils.ArgumentPasser', (['PYTHON_EXE', 'SCRIPT_FILE', 'arg_parser', 'sys.argv'], {}), '(PYTHON_EXE, SCRIPT_FILE, arg_parser, sys.argv)\n', (45397, 45444), False, 'from lib import script_utils\n'), ((46791, 46821), 'lib.script_utils.LOGGER.setLevel', 'LOGGER.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (46806, 46821), False, 'from lib.script_utils import LOGGER, eprint\n'), ((52131, 52187), 'lib.walk.walk', 'walk.walk', (['srcdir'], {'maxdepth': 'verify_by_pairname_dir_depth'}), '(srcdir, maxdepth=verify_by_pairname_dir_depth)\n', (52140, 52187), False, 'from lib import walk\n'), ((56495, 56506), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (56503, 56506), False, 'import sys\n'), ((61947, 61966), 'os.path.isfile', 'os.path.isfile', (['src'], {}), '(src)\n', (61961, 61966), False, 'import os\n'), ((80202, 80239), 'lib.walk.walk', 'walk.walk', (['src'], {'maxdepth': 'search_depth'}), '(src, maxdepth=search_depth)\n', (80211, 80239), False, 'from lib import walk\n'), ((81897, 81908), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (81905, 81908), False, 'import sys\n'), ((82461, 82472), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (82469, 82472), False, 'import sys\n'), ((84250, 84293), 'lib.script_utils.get_jobnum_fmtstr', 'script_utils.get_jobnum_fmtstr', (['check_units'], {}), '(check_units)\n', (84280, 84293), False, 'from lib import script_utils\n'), ((84391, 84410), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (84404, 84410), False, 'import copy\n'), ((91362, 91394), 'os.path.isfile', 'os.path.isfile', (['raster_ffile_err'], {}), '(raster_ffile_err)\n', (91376, 91394), False, 'import os\n'), ((107323, 107357), 'os.path.isfile', 'os.path.isfile', (['checkgroup_errfile'], {}), '(checkgroup_errfile)\n', (107337, 107357), False, 'import os\n'), ((107953, 107981), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['search_message'], {}), '(search_message)\n', (107965, 107981), False, 'from lib.script_utils import LOGGER, eprint\n'), ((108323, 108350), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['match_results'], {}), '(match_results)\n', (108335, 108350), False, 'from lib.script_utils import LOGGER, eprint\n'), ((17008, 17138), 'lib.script_utils.ARGTYPE_PATH', 'script_utils.ARGTYPE_PATH', ([], {'argstr': 'ARGSTR_SRC', 'abspath_fn': 'os.path.abspath', 'existcheck_fn': 'os.path.exists', 'existcheck_reqval': '(True)'}), '(argstr=ARGSTR_SRC, abspath_fn=os.path.abspath,\n existcheck_fn=os.path.exists, existcheck_reqval=True)\n', (17033, 17138), False, 'from lib import script_utils\n'), ((17474, 17593), 'lib.script_utils.ARGTYPE_NUM', 'script_utils.ARGTYPE_NUM', ([], {'argstr': 'ARGSTR_DEPTH', 'numeric_type': 'int', 'allow_neg': '(False)', 'allow_zero': '(False)', 'allow_inf': '(True)'}), '(argstr=ARGSTR_DEPTH, numeric_type=int, allow_neg=\n False, allow_zero=False, allow_inf=True)\n', (17498, 17593), False, 'from lib import script_utils\n'), ((19284, 19393), 'lib.script_utils.ARGTYPE_PATH', 'script_utils.ARGTYPE_PATH', ([], {'argstr': 'ARGSTR_CHECKFILE', 'existcheck_fn': 'os.path.isdir', 'existcheck_reqval': '(False)'}), '(argstr=ARGSTR_CHECKFILE, existcheck_fn=os.path.\n isdir, existcheck_reqval=False)\n', (19309, 19393), False, 'from lib import script_utils\n'), ((22405, 22545), 'lib.script_utils.ARGTYPE_NUM', 'script_utils.ARGTYPE_NUM', ([], {'argstr': 'ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH', 'numeric_type': 'int', 'allow_neg': '(False)', 'allow_zero': '(True)', 'allow_inf': '(True)'}), '(argstr=ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH,\n numeric_type=int, allow_neg=False, allow_zero=True, allow_inf=True)\n', (22429, 22545), False, 'from lib import script_utils\n'), ((29279, 29388), 'lib.script_utils.ARGTYPE_PATH', 'script_utils.ARGTYPE_PATH', ([], {'argstr': 'ARGSTR_JOBSCRIPT', 'existcheck_fn': 'os.path.isfile', 'existcheck_reqval': '(True)'}), '(argstr=ARGSTR_JOBSCRIPT, existcheck_fn=os.path.\n isfile, existcheck_reqval=True)\n', (29304, 29388), False, 'from lib import script_utils\n'), ((30099, 30207), 'lib.script_utils.ARGTYPE_PATH', 'script_utils.ARGTYPE_PATH', ([], {'argstr': 'ARGSTR_SCRATCH', 'existcheck_fn': 'os.path.isfile', 'existcheck_reqval': '(False)'}), '(argstr=ARGSTR_SCRATCH, existcheck_fn=os.path.\n isfile, existcheck_reqval=False)\n', (30124, 30207), False, 'from lib import script_utils\n'), ((30402, 30502), 'lib.script_utils.ARGTYPE_PATH', 'script_utils.ARGTYPE_PATH', ([], {'argstr': 'ARGSTR_WD', 'existcheck_fn': 'os.path.isdir', 'existcheck_reqval': '(True)'}), '(argstr=ARGSTR_WD, existcheck_fn=os.path.isdir,\n existcheck_reqval=True)\n', (30427, 30502), False, 'from lib import script_utils\n'), ((30979, 31086), 'lib.script_utils.ARGTYPE_PATH', 'script_utils.ARGTYPE_PATH', ([], {'argstr': 'ARGSTR_LOGDIR', 'existcheck_fn': 'os.path.isfile', 'existcheck_reqval': '(False)'}), '(argstr=ARGSTR_LOGDIR, existcheck_fn=os.path.\n isfile, existcheck_reqval=False)\n', (31004, 31086), False, 'from lib import script_utils\n'), ((31720, 31764), 'lib.script_utils.ARGTYPE_BOOL_PLUS', 'script_utils.ARGTYPE_BOOL_PLUS', ([], {'parse_fn': 'str'}), '(parse_fn=str)\n', (31750, 31764), False, 'from lib import script_utils\n'), ((34013, 34042), 'os.path.isdir', 'os.path.isdir', (['checkfile_root'], {}), '(checkfile_root)\n', (34026, 34042), False, 'import os\n'), ((35045, 35138), 'warnings.warn', 'warnings.warn', (['"""There are more (new?) source files to be added to an existing checkfile"""'], {}), "(\n 'There are more (new?) source files to be added to an existing checkfile')\n", (35058, 35138), False, 'import warnings\n'), ((35645, 35732), 'warnings.warn', 'warnings.warn', (['"""Files listed in a checkfile were not captured in source selection"""'], {}), "(\n 'Files listed in a checkfile were not captured in source selection')\n", (35658, 35732), False, 'import warnings\n'), ((43087, 43121), 'os.path.isfile', 'os.path.isfile', (['checkgroup_errfile'], {}), '(checkgroup_errfile)\n', (43101, 43121), False, 'import os\n'), ((43228, 43304), 'warnings.warn', 'warnings.warn', (['"""Error files were found among source files for a check group"""'], {}), "('Error files were found among source files for a check group')\n", (43241, 43304), False, 'import warnings\n'), ((44070, 44128), 'lib.script_utils.eprint', 'eprint', (["('Removing checkfile' + ' (dryrun)' * delete_dryrun)"], {}), "('Removing checkfile' + ' (dryrun)' * delete_dryrun)\n", (44076, 44128), False, 'from lib.script_utils import LOGGER, eprint\n'), ((44647, 44708), 'lib.script_utils.eprint', 'eprint', (["('Removing source files' + ' (dryrun)' * delete_dryrun)"], {}), "('Removing source files' + ' (dryrun)' * delete_dryrun)\n", (44653, 44708), False, 'from lib.script_utils import LOGGER, eprint\n'), ((49715, 49749), 'os.path.isfile', 'os.path.isfile', (['INDEX_SETSM_SCRIPT'], {}), '(INDEX_SETSM_SCRIPT)\n', (49729, 49749), False, 'import os\n'), ((52753, 52764), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (52761, 52764), False, 'import sys\n'), ((53229, 53261), 'os.path.isfile', 'os.path.isfile', (['pairname_errfile'], {}), '(pairname_errfile)\n', (53243, 53261), False, 'import os\n'), ((53302, 53336), 'os.path.isfile', 'os.path.isfile', (['pnamedir_checkfile'], {}), '(pnamedir_checkfile)\n', (53316, 53336), False, 'import os\n'), ((53376, 53409), 'os.path.isfile', 'os.path.isfile', (['pnamedir_jsonfile'], {}), '(pnamedir_jsonfile)\n', (53390, 53409), False, 'import os\n'), ((69163, 69183), 'os.path.dirname', 'os.path.dirname', (['src'], {}), '(src)\n', (69178, 69183), False, 'import os\n'), ((78839, 78961), 'lib.script_utils.DeveloperError', 'DeveloperError', (['"""Neither `checkffileroot_srcfnamechecklist_dict` nor `srcffile_checklist` have been initialized"""'], {}), "(\n 'Neither `checkffileroot_srcfnamechecklist_dict` nor `srcffile_checklist` have been initialized'\n )\n", (78853, 78961), False, 'from lib.script_utils import ScriptArgumentError, DeveloperError\n'), ((80108, 80119), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (80116, 80119), False, 'import sys\n'), ((83308, 83336), 'os.path.isdir', 'os.path.isdir', (['checkfile_dir'], {}), '(checkfile_dir)\n', (83321, 83336), False, 'import os\n'), ((83444, 83470), 'os.makedirs', 'os.makedirs', (['checkfile_dir'], {}), '(checkfile_dir)\n', (83455, 83470), False, 'import os\n'), ((88821, 88832), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (88829, 88832), False, 'import sys\n'), ((89308, 89342), 'os.path.isfile', 'os.path.isfile', (['checkgroup_errfile'], {}), '(checkgroup_errfile)\n', (89322, 89342), False, 'import os\n'), ((91641, 91669), 'os.path.isfile', 'os.path.isfile', (['raster_ffile'], {}), '(raster_ffile)\n', (91655, 91669), False, 'import os\n'), ((104263, 104306), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['"""Source file passed check(s)"""'], {}), "('Source file passed check(s)')\n", (104275, 104306), False, 'from lib.script_utils import LOGGER, eprint\n'), ((105375, 105404), 'os.path.dirname', 'os.path.dirname', (['pairname_dir'], {}), '(pairname_dir)\n', (105390, 105404), False, 'import os\n'), ((34823, 34842), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (34839, 34842), False, 'import os\n'), ((44228, 44239), 'lib.script_utils.eprint', 'eprint', (['cmd'], {}), '(cmd)\n', (44234, 44239), False, 'from lib.script_utils import LOGGER, eprint\n'), ((44290, 44310), 'os.remove', 'os.remove', (['checkfile'], {}), '(checkfile)\n', (44299, 44310), False, 'import os\n'), ((44859, 44890), 'os.path.join', 'os.path.join', (['checkfile_dir', 'fn'], {}), '(checkfile_dir, fn)\n', (44871, 44890), False, 'import os\n'), ((47176, 47209), 'os.path.isfile', 'os.path.isfile', (['jobscript_default'], {}), '(jobscript_default)\n', (47190, 47209), False, 'import os\n'), ((53557, 53592), 'lib.walk.walk', 'walk.walk', (['pairname_dir'], {'maxdepth': '(1)'}), '(pairname_dir, maxdepth=1)\n', (53566, 53592), False, 'from lib import walk\n'), ((59851, 59891), 'lib.walk.walk', 'walk.walk', (['srcdir'], {'maxdepth': 'search_depth'}), '(srcdir, maxdepth=search_depth)\n', (59860, 59891), False, 'from lib import walk\n'), ((61295, 61335), 'lib.walk.walk', 'walk.walk', (['srcdir'], {'maxdepth': 'search_depth'}), '(srcdir, maxdepth=search_depth)\n', (61304, 61335), False, 'from lib import walk\n'), ((62218, 62260), 'lib.script_utils.read_task_bundle', 'script_utils.read_task_bundle', (['bundle_file'], {}), '(bundle_file)\n', (62247, 62260), False, 'from lib import script_utils\n'), ((69552, 69571), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (69568, 69571), False, 'import os\n'), ((72304, 72338), 'os.path.isfile', 'os.path.isfile', (['checkgroup_errfile'], {}), '(checkgroup_errfile)\n', (72318, 72338), False, 'import os\n'), ((77532, 77590), 'warnings.warn', 'warnings.warn', (['"""Error files were found among source files"""'], {}), "('Error files were found among source files')\n", (77545, 77590), False, 'import warnings\n'), ((83168, 83189), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (83179, 83189), False, 'import os\n'), ((87794, 87813), 'lib.script_utils.eprint', 'eprint', (['error_trace'], {}), '(error_trace)\n', (87800, 87813), False, 'from lib.script_utils import LOGGER, eprint\n'), ((91514, 91541), 'os.remove', 'os.remove', (['raster_ffile_err'], {}), '(raster_ffile_err)\n', (91523, 91541), False, 'import os\n'), ((103635, 103660), 'os.path.isfile', 'os.path.isfile', (['checkfile'], {}), '(checkfile)\n', (103649, 103660), False, 'import os\n'), ((105425, 105452), 'os.path.isdir', 'os.path.isdir', (['pairname_dir'], {}), '(pairname_dir)\n', (105438, 105452), False, 'import os\n'), ((106713, 106757), 'subprocess.call', 'subprocess.call', (['index_setsm_cmd'], {'shell': '(True)'}), '(index_setsm_cmd, shell=True)\n', (106728, 106757), False, 'import subprocess\n'), ((108075, 108111), 're.findall', 're.findall', (['item_regex', 'meta_txt_buf'], {}), '(item_regex, meta_txt_buf)\n', (108085, 108111), False, 'import re\n'), ((34852, 34883), 'glob.glob', 'glob.glob', (["(checkfile_root + '*')"], {}), "(checkfile_root + '*')\n", (34861, 34883), False, 'import glob\n'), ((35419, 35428), 'lib.script_utils.eprint', 'eprint', (['f'], {}), '(f)\n', (35425, 35428), False, 'from lib.script_utils import LOGGER, eprint\n'), ((36037, 36046), 'lib.script_utils.eprint', 'eprint', (['f'], {}), '(f)\n', (36043, 36046), False, 'from lib.script_utils import LOGGER, eprint\n'), ((36253, 36272), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (36269, 36272), False, 'import os\n'), ((41700, 41770), 'warnings.warn', 'warnings.warn', (['"""Source file suffixes for a check group were not found"""'], {}), "('Source file suffixes for a check group were not found')\n", (41713, 41770), False, 'import warnings\n'), ((42996, 43040), 'os.path.join', 'os.path.join', (['checkfile_dir', '(f + errfile_ext)'], {}), '(checkfile_dir, f + errfile_ext)\n', (43008, 43040), False, 'import os\n'), ((43585, 43594), 'lib.script_utils.eprint', 'eprint', (['f'], {}), '(f)\n', (43591, 43594), False, 'from lib.script_utils import LOGGER, eprint\n'), ((45014, 45025), 'lib.script_utils.eprint', 'eprint', (['cmd'], {}), '(cmd)\n', (45020, 45025), False, 'from lib.script_utils import LOGGER, eprint\n'), ((45084, 45112), 'os.remove', 'os.remove', (['srcfile_to_remove'], {}), '(srcfile_to_remove)\n', (45093, 45112), False, 'import os\n'), ((52238, 52296), 're.match', 're.match', (['ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL', 'dn'], {}), '(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn)\n', (52246, 52296), False, 'import re\n'), ((52345, 52367), 'os.path.join', 'os.path.join', (['root', 'dn'], {}), '(root, dn)\n', (52357, 52367), False, 'import os\n'), ((57785, 57841), 'lib.walk.walk', 'walk.walk', (['srcdir'], {'maxdepth': 'verify_by_pairname_dir_depth'}), '(srcdir, maxdepth=verify_by_pairname_dir_depth)\n', (57794, 57841), False, 'from lib import walk\n'), ((58552, 58587), 'lib.walk.walk', 'walk.walk', (['pairname_dir'], {'maxdepth': '(1)'}), '(pairname_dir, maxdepth=1)\n', (58561, 58587), False, 'from lib import walk\n'), ((61016, 61035), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (61032, 61035), False, 'import os\n'), ((61679, 61731), 'warnings.warn', 'warnings.warn', (['"""Source file suffixes were not found"""'], {}), "('Source file suffixes were not found')\n", (61692, 61731), False, 'import warnings\n'), ((69581, 69610), 'glob.glob', 'glob.glob', (["(cf_root_full + '*')"], {}), "(cf_root_full + '*')\n", (69590, 69610), False, 'import glob\n'), ((72258, 72283), 'os.path.dirname', 'os.path.dirname', (['cff_root'], {}), '(cff_root)\n', (72273, 72283), False, 'import os\n'), ((76360, 76391), 'os.path.isfile', 'os.path.isfile', (['(f + errfile_ext)'], {}), '(f + errfile_ext)\n', (76374, 76391), False, 'import os\n'), ((83038, 83061), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (83051, 83061), False, 'import os\n'), ((87621, 87657), 'lib.script_utils.capture_stdout_stderr', 'script_utils.capture_stdout_stderr', ([], {}), '()\n', (87655, 87657), False, 'from lib import script_utils\n'), ((87682, 87703), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (87701, 87703), False, 'import traceback\n'), ((89476, 89505), 'os.remove', 'os.remove', (['checkgroup_errfile'], {}), '(checkgroup_errfile)\n', (89485, 89505), False, 'import os\n'), ((91578, 91599), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (91597, 91599), False, 'import traceback\n'), ((103796, 103816), 'os.remove', 'os.remove', (['checkfile'], {}), '(checkfile)\n', (103805, 103816), False, 'import os\n'), ((36282, 36313), 'glob.glob', 'glob.glob', (["(checkfile_root + '*')"], {}), "(checkfile_root + '*')\n", (36291, 36313), False, 'import glob\n'), ((42546, 42575), 'lib.script_utils.eprint', 'eprint', (['missing_suffix_errmsg'], {}), '(missing_suffix_errmsg)\n', (42552, 42575), False, 'from lib.script_utils import LOGGER, eprint\n'), ((57623, 57647), 'os.path.basename', 'os.path.basename', (['srcdir'], {}), '(srcdir)\n', (57639, 57647), False, 'import os\n'), ((61045, 61074), 'glob.glob', 'glob.glob', (["(cf_root_full + '*')"], {}), "(cf_root_full + '*')\n", (61054, 61074), False, 'import glob\n'), ((77812, 77836), 'lib.script_utils.eprint', 'eprint', (['(fn + errfile_ext)'], {}), '(fn + errfile_ext)\n', (77818, 77836), False, 'from lib.script_utils import LOGGER, eprint\n'), ((86428, 86453), 'os.path.dirname', 'os.path.dirname', (['cff_root'], {}), '(cff_root)\n', (86443, 86453), False, 'import os\n'), ((86542, 86582), 'os.path.join', 'os.path.join', (['checkfile_dir', 'rasterfname'], {}), '(checkfile_dir, rasterfname)\n', (86554, 86582), False, 'import os\n'), ((89542, 89563), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (89561, 89563), False, 'import traceback\n'), ((38959, 38970), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (38967, 38970), False, 'import sys\n'), ((59243, 59283), 're.match', 're.match', (['checkfile_root_regex', 'srcfname'], {}), '(checkfile_root_regex, srcfname)\n', (59251, 59283), False, 'import re\n'), ((60046, 60086), 're.match', 're.match', (['checkfile_root_regex', 'srcfname'], {}), '(checkfile_root_regex, srcfname)\n', (60054, 60086), False, 'import re\n'), ((65578, 65613), 'lib.walk.walk', 'walk.walk', (['pairname_dir'], {'maxdepth': '(1)'}), '(pairname_dir, maxdepth=1)\n', (65587, 65613), False, 'from lib import walk\n'), ((86461, 86484), 'os.path.isdir', 'os.path.isdir', (['cff_root'], {}), '(cff_root)\n', (86474, 86484), False, 'import os\n'), ((90642, 90661), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (90658, 90661), False, 'import os\n'), ((95244, 95280), 're.match', 're.match', (['INFO50CM_RE', 'info50cm_text'], {}), '(INFO50CM_RE, info50cm_text)\n', (95252, 95280), False, 'import re\n'), ((103194, 103226), 'os.path.isfile', 'os.path.isfile', (['raster_ffile_err'], {}), '(raster_ffile_err)\n', (103208, 103226), False, 'import os\n'), ((104797, 104827), 'os.path.basename', 'os.path.basename', (['raster_ffile'], {}), '(raster_ffile)\n', (104813, 104827), False, 'import os\n'), ((107243, 107273), 'os.path.basename', 'os.path.basename', (['raster_ffile'], {}), '(raster_ffile)\n', (107259, 107273), False, 'import os\n'), ((37788, 37827), 're.match', 're.match', (['subgroup_root_regex', 'srcfname'], {}), '(subgroup_root_regex, srcfname)\n', (37796, 37827), False, 'import re\n'), ((57916, 57974), 're.match', 're.match', (['ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL', 'dn'], {}), '(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn)\n', (57924, 57974), False, 'import re\n'), ((58035, 58057), 'os.path.join', 'os.path.join', (['root', 'dn'], {}), '(root, dn)\n', (58047, 58057), False, 'import os\n'), ((60412, 60444), 'os.path.join', 'os.path.join', (['root', 'cf_root_name'], {}), '(root, cf_root_name)\n', (60424, 60444), False, 'import os\n'), ((61496, 61524), 'os.path.join', 'os.path.join', (['root', 'srcfname'], {}), '(root, srcfname)\n', (61508, 61524), False, 'import os\n'), ((64746, 64804), 'lib.walk.walk', 'walk.walk', (['cff_root'], {'maxdepth': 'verify_by_pairname_dir_depth'}), '(cff_root, maxdepth=verify_by_pairname_dir_depth)\n', (64755, 64804), False, 'from lib import walk\n'), ((67033, 67058), 'glob.glob', 'glob.glob', (["(cff_root + '*')"], {}), "(cff_root + '*')\n", (67042, 67058), False, 'import glob\n'), ((72514, 72553), 'os.path.join', 'os.path.join', (['cff_dir', '(fn + errfile_ext)'], {}), '(cff_dir, fn + errfile_ext)\n', (72526, 72553), False, 'import os\n'), ((77227, 77260), 'os.path.isfile', 'os.path.isfile', (['(f + checkfile_ext)'], {}), '(f + checkfile_ext)\n', (77241, 77260), False, 'import os\n'), ((96950, 96991), 'osgeo.gdal.Open', 'gdal.Open', (['raster_ffile', 'gdal.GA_ReadOnly'], {}), '(raster_ffile, gdal.GA_ReadOnly)\n', (96959, 96991), False, 'from osgeo import gdal\n'), ((102302, 102328), 'os.remove', 'os.remove', (['raster_ffile_wd'], {}), '(raster_ffile_wd)\n', (102311, 102328), False, 'import os\n'), ((38332, 38373), 'os.path.join', 'os.path.join', (['checkfile_dir', 'cf_root_name'], {}), '(checkfile_dir, cf_root_name)\n', (38344, 38373), False, 'import os\n'), ((63796, 63819), 'os.path.isdir', 'os.path.isdir', (['cff_root'], {}), '(cff_root)\n', (63809, 63819), False, 'import os\n'), ((66308, 66348), 're.match', 're.match', (['checkfile_root_regex', 'srcfname'], {}), '(checkfile_root_regex, srcfname)\n', (66316, 66348), False, 'import re\n'), ((95956, 95986), 'os.path.basename', 'os.path.basename', (['raster_ffile'], {}), '(raster_ffile)\n', (95972, 95986), False, 'import os\n'), ((96176, 96219), 'shutil.copy2', 'shutil.copy2', (['raster_ffile', 'raster_ffile_wd'], {}), '(raster_ffile, raster_ffile_wd)\n', (96188, 96219), False, 'import shutil\n'), ((110599, 110648), 'os.path.basename', 'os.path.basename', (['item_values[item_matches_index]'], {}), '(item_values[item_matches_index])\n', (110615, 110648), False, 'import os\n'), ((67504, 67527), 'os.path.split', 'os.path.split', (['srcffile'], {}), '(srcffile)\n', (67517, 67527), False, 'import os\n'), ((67568, 67608), 're.match', 're.match', (['checkfile_root_regex', 'srcfname'], {}), '(checkfile_root_regex, srcfname)\n', (67576, 67608), False, 'import re\n'), ((94110, 94133), 'io.StringIO', 'StringIO', (['scenemeta_txt'], {}), '(scenemeta_txt)\n', (94118, 94133), False, 'from io import StringIO\n'), ((96467, 96482), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['e'], {}), '(e)\n', (96479, 96482), False, 'from lib.script_utils import LOGGER, eprint\n'), ((98192, 98222), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['"""Doing checksum"""'], {}), "('Doing checksum')\n", (98204, 98222), False, 'from lib.script_utils import LOGGER, eprint\n'), ((98757, 98796), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['"""Reading band data array"""'], {}), "('Reading band data array')\n", (98769, 98796), False, 'from lib.script_utils import LOGGER, eprint\n'), ((98893, 98928), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['"""Data read succeeded"""'], {}), "('Data read succeeded')\n", (98905, 98928), False, 'from lib.script_utils import LOGGER, eprint\n'), ((99786, 99847), 'numpy.logical_or', 'np.logical_or', (['(data_array < valid_min)', '(data_array > valid_max)'], {}), '(data_array < valid_min, data_array > valid_max)\n', (99799, 99847), True, 'import numpy as np\n'), ((62883, 62909), 'os.path.basename', 'os.path.basename', (['srcffile'], {}), '(srcffile)\n', (62899, 62909), False, 'import os\n'), ((64895, 64953), 're.match', 're.match', (['ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL', 'dn'], {}), '(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn)\n', (64903, 64953), False, 'import re\n'), ((65022, 65044), 'os.path.join', 'os.path.join', (['root', 'dn'], {}), '(root, dn)\n', (65034, 65044), False, 'import os\n'), ((68032, 68067), 'os.path.join', 'os.path.join', (['srcfdir', 'cf_root_name'], {}), '(srcfdir, cf_root_name)\n', (68044, 68067), False, 'import os\n'), ((93327, 93386), 're.match', 're.match', (['SETSM_STRIPMETA_SCENEMETA_ITEM_HEADER_REGEX', 'line'], {}), '(SETSM_STRIPMETA_SCENEMETA_ITEM_HEADER_REGEX, line)\n', (93335, 93386), False, 'import re\n'), ((99169, 99208), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['"""Continuing to next band"""'], {}), "('Continuing to next band')\n", (99181, 99208), False, 'from lib.script_utils import LOGGER, eprint\n'), ((100221, 100247), 'numpy.any', 'np.any', (['data_array_invalid'], {}), '(data_array_invalid)\n', (100227, 100247), True, 'import numpy as np\n'), ((100285, 100322), 'lib.script_utils.LOGGER.debug', 'LOGGER.debug', (['"""SETSM check succeeded"""'], {}), "('SETSM check succeeded')\n", (100297, 100322), False, 'from lib.script_utils import LOGGER, eprint\n'), ((64176, 64202), 'os.path.basename', 'os.path.basename', (['cff_root'], {}), '(cff_root)\n', (64192, 64202), False, 'import os\n'), ((99988, 100008), 'numpy.isnan', 'np.isnan', (['nodata_val'], {}), '(nodata_val)\n', (99996, 100008), True, 'import numpy as np\n'), ((99964, 99984), 'numpy.isnan', 'np.isnan', (['data_array'], {}), '(data_array)\n', (99972, 99984), True, 'import numpy as np\n'), ((101817, 101835), 'lib.script_utils.LOGGER.error', 'LOGGER.error', (['line'], {}), '(line)\n', (101829, 101835), False, 'from lib.script_utils import LOGGER, eprint\n'), ((93551, 93574), 'io.StringIO', 'StringIO', (['scenemeta_txt'], {}), '(scenemeta_txt)\n', (93559, 93574), False, 'from io import StringIO\n'), ((100824, 100843), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (100833, 100843), True, 'import numpy as np\n'), ((100919, 100938), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (100928, 100938), True, 'import numpy as np\n'), ((101034, 101065), 'numpy.argwhere', 'np.argwhere', (['data_array_invalid'], {}), '(data_array_invalid)\n', (101045, 101065), True, 'import numpy as np\n'), ((101258, 101286), 'numpy.where', 'np.where', (['data_array_invalid'], {}), '(data_array_invalid)\n', (101266, 101286), True, 'import numpy as np\n')] |
"""Functionals used to define the dual sets."""
import sympy
import numpy
from .symbolic import subs, x, t, PiecewiseFunction, sym_sum, to_sympy, to_float
from .vectors import vdot
from .calculus import derivative, jacobian_component, grad, diff, div
from .basis_function import BasisFunction
from . import mappings
class BaseFunctional:
"""A functional."""
def __init__(self, entity=(None, None), mapping="identity"):
self.entity = entity
self.mapping = mapping
def eval(self, fun, symbolic=True):
"""Apply to the functional to a function."""
raise NotImplementedError
def dof_point(self):
"""Get the location of the DOF in the cell."""
return tuple(None for i in range(self.reference.gdim))
def dof_direction(self):
"""Get the direction of the DOF."""
return None
def entity_dim(self):
"""Get the dimension of the entitiy this DOF is associated with."""
return self.entity[0]
def perform_mapping(self, fs, map, inverse_map, tdim):
"""Map functions to a cell."""
return [getattr(mappings, self.mapping)(f, map, inverse_map, tdim) for f in fs]
get_points_and_weights = None
name = None
class PointEvaluation(BaseFunctional):
"""A point evaluation."""
def __init__(self, point, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.point = point
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
value = subs(function, x, self.point)
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
def get_points_and_weights(self, max_order=None):
"""Get points and weights that can be used to numerically evaluate functional."""
return numpy.array([self.point]), numpy.array([1])
name = "Point evaluation"
class WeightedPointEvaluation(BaseFunctional):
"""A point evaluation."""
def __init__(self, point, weight, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.point = point
self.weight = weight
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
value = subs(function, x, self.point) * self.weight
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
def get_points_and_weights(self, max_order=None):
"""Get points and weights that can be used to numerically evaluate functional."""
return numpy.array([self.point]), numpy.array([self.weight])
name = "Weighted point evaluation"
class DerivativePointEvaluation(BaseFunctional):
"""A point evaluation of a given derivative."""
def __init__(self, point, derivative, entity=(None, None), mapping=None):
super().__init__(entity, mapping)
self.point = point
self.derivative = derivative
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
for i, j in zip(x, self.derivative):
for k in range(j):
function = diff(function, i)
value = subs(function, x, self.point)
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
def perform_mapping(self, fs, map, inverse_map, tdim):
"""Map functions to a cell."""
if self.mapping is not None:
return super().perform_mapping(fs, map, inverse_map, tdim)
out = []
J = sympy.Matrix([[diff(map[i], x[j]) for j in range(tdim)] for i in range(tdim)])
for dofs in zip(*[fs[i::tdim] for i in range(tdim)]):
for i in range(tdim):
out.append(sym_sum(a * b for a, b in zip(dofs, J.row(i))))
return [subs(b, x, inverse_map) for b in out]
name = "Point derivative evaluation"
class PointDirectionalDerivativeEvaluation(BaseFunctional):
"""A point evaluation of a derivative in a fixed direction."""
def __init__(self, point, direction, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.point = point
self.dir = direction
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
if isinstance(function, PiecewiseFunction):
function = function.get_piece(self.point)
value = subs(derivative(function, self.dir), x, self.point)
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
def dof_direction(self):
"""Get the direction of the DOF."""
return self.dir
name = "Point evaluation of directional derivative"
class PointNormalDerivativeEvaluation(PointDirectionalDerivativeEvaluation):
"""A point evaluation of a normal derivative."""
def __init__(self, point, edge, entity=(None, None), mapping="identity"):
super().__init__(point, edge.normal(), entity=entity, mapping=mapping)
self.reference = edge
name = "Point evaluation of normal derivative"
class PointComponentSecondDerivativeEvaluation(BaseFunctional):
"""A point evaluation of a component of a second derivative."""
def __init__(self, point, component, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.point = point
self.component = component
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
value = subs(jacobian_component(function, self.component), x, self.point)
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
name = "Point evaluation of Jacobian component"
class PointInnerProduct(BaseFunctional):
"""An evaluation of an inner product at a point."""
def __init__(self, point, lvec, rvec, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.point = point
self.lvec = lvec
self.rvec = rvec
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
v = subs(function, x, self.point)
tdim = len(self.lvec)
assert len(function) == tdim ** 2
value = vdot(self.lvec,
tuple(vdot(v[tdim * i: tdim * (i + 1)], self.rvec)
for i in range(0, tdim)))
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
def dof_direction(self):
"""Get the location of the DOF in the cell."""
if self.rvec != self.lvec:
return None
return self.lvec
name = "Point inner product"
class DotPointEvaluation(BaseFunctional):
"""A point evaluation in a given direction."""
def __init__(self, point, vector, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.point = point
self.vector = vector
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
value = vdot(subs(function, x, self.point), subs(self.vector, x, self.point))
if symbolic:
return value
else:
return to_float(value)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return self.point
def dof_direction(self):
"""Get the direction of the DOF."""
return self.vector
name = "Dot point evaluation"
class IntegralAgainst(BaseFunctional):
"""An integral against a function."""
def __init__(self, reference, f, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.reference = reference
if isinstance(f, BasisFunction):
f = f.get_function()
f = subs(f, x, t)
if isinstance(f, tuple):
if len(f) == self.reference.tdim:
self.f = mappings.contravariant(
f, reference.get_map_to_self(), reference.get_inverse_map_to_self(),
reference.tdim)
else:
assert len(f) == self.reference.tdim ** 2
self.f = mappings.double_contravariant(
f, reference.get_map_to_self(), reference.get_inverse_map_to_self(),
reference.tdim)
else:
self.f = f
def dof_point(self):
"""Get the location of the DOF in the cell."""
return tuple(sympy.Rational(sum(i), len(i)) for i in zip(*self.reference.vertices))
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
point = [i for i in self.reference.origin]
for i, a in enumerate(zip(*self.reference.axes)):
for j, k in zip(a, t):
point[i] += j * k
integrand = self.dot(subs(function, x, point))
value = self.reference.integral(integrand)
if symbolic:
return value
else:
return to_float(value)
def dot(self, function):
"""Dot a function with the moment function."""
return vdot(function, self.f)
name = "Integral against"
class IntegralOfDivergenceAgainst(BaseFunctional):
"""An integral of the divergence against a function."""
def __init__(self, reference, f, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.reference = reference
if isinstance(f, BasisFunction):
f = f.get_function()
self.f = subs(f, x, t)
def dof_point(self):
"""Get the location of the DOF in the cell."""
return tuple(sympy.Rational(sum(i), len(i)) for i in zip(*self.reference.vertices))
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
point = [i for i in self.reference.origin]
for i, a in enumerate(zip(*self.reference.axes)):
for j, k in zip(a, t):
point[i] += j * k
integrand = self.dot(subs(div(function), x, point))
value = self.reference.integral(integrand)
if symbolic:
return value
else:
return to_float(value)
def dot(self, function):
"""Dot a function with the moment function."""
return function * self.f
name = "Integral of divergence against"
class IntegralOfDirectionalMultiderivative(BaseFunctional):
"""An integral of a directional derivative of a scalar function."""
def __init__(self, reference, directions, orders, scale=1, entity=(None, None),
mapping="identity"):
super().__init__(entity, mapping)
self.reference = reference
self.directions = directions
self.orders = orders
self.scale = scale
def dof_point(self):
"""Get the location of the DOF in the cell."""
return tuple(sympy.Rational(sum(i), len(i)) for i in zip(*self.reference.vertices))
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
for dir, o in zip(self.directions, self.orders):
for i in range(o):
function = sum(d * diff(function, x[j]) for j, d in enumerate(dir))
point = [i for i in self.reference.origin]
for i, a in enumerate(zip(*self.reference.axes)):
for j, k in zip(a, t):
point[i] += j * k
integrand = self.scale * subs(function, x, point)
value = self.reference.integral(integrand)
if symbolic:
return value
else:
return to_float(value)
def perform_mapping(self, fs, map, inverse_map, tdim):
"""Map functions to a cell."""
if sum(self.orders) > 0:
raise NotImplementedError("Mapping high order derivatives not implemented")
return super().perform_mapping(fs, map, inverse_map, tdim)
name = "Integral of a directional derivative"
class IntegralMoment(BaseFunctional):
"""An integral moment."""
def __init__(self, reference, f, dof, entity=(None, None), mapping="identity"):
super().__init__(entity, mapping)
self.reference = reference
self.dof = dof
if isinstance(f, BasisFunction):
f = f.get_function()
f = subs(f, x, t)
if isinstance(f, tuple):
if len(f) == self.reference.tdim:
self.f = mappings.contravariant(
f, reference.get_map_to_self(), reference.get_inverse_map_to_self(),
reference.tdim)
else:
assert len(f) == self.reference.tdim ** 2
self.f = mappings.double_contravariant(
f, reference.get_map_to_self(), reference.get_inverse_map_to_self(),
reference.tdim)
else:
self.f = f
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
point = [i for i in self.reference.origin]
for i, a in enumerate(zip(*self.reference.axes)):
for j, k in zip(a, t):
point[i] += j * k
integrand = self.dot(subs(function, x, point))
if isinstance(integrand, PiecewiseFunction):
integrand = integrand.get_piece(self.reference.midpoint())
value = self.reference.integral(to_sympy(integrand))
if symbolic:
return value
else:
return to_float(value)
def dot(self, function):
"""Dot a function with the moment function."""
return vdot(function, self.f)
def dof_point(self):
"""Get the location of the DOF in the cell."""
p = self.dof.dof_point()
return tuple(
o + sum(self.reference.axes[j][i] * c for j, c in enumerate(p))
for i, o in enumerate(self.reference.origin)
)
def dof_direction(self):
"""Get the direction of the DOF."""
p = self.dof.dof_direction()
if p is None:
return None
return tuple(
sum(self.reference.axes[j][i] * c for j, c in enumerate(p))
for i in range(self.reference.gdim)
)
name = "Integral moment"
class VecIntegralMoment(IntegralMoment):
"""An integral moment applied to a component of a vector."""
def __init__(self, reference, f, dot_with, dof, entity=(None, None), mapping="identity"):
super().__init__(reference, f, dof, entity=entity, mapping=mapping)
self.dot_with = dot_with
def dot(self, function):
"""Dot a function with the moment function."""
return vdot(function, self.dot_with) * self.f
def dof_direction(self):
"""Get the direction of the DOF."""
return self.dot_with
name = "Vector integral moment"
class DerivativeIntegralMoment(IntegralMoment):
"""An integral moment of the derivative of a scalar function."""
def __init__(self, reference, f, dot_with, dof, entity=(None, None), mapping="identity"):
super().__init__(reference, f, dof, entity=entity, mapping=mapping)
self.dot_with = dot_with
def dot(self, function):
"""Dot a function with the moment function."""
return vdot(function, self.dot_with) * self.f
def dof_direction(self):
"""Get the direction of the DOF."""
return self.dot_with
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
point = [i for i in self.reference.origin]
for i, a in enumerate(zip(*self.reference.axes)):
for j, k in zip(a, t):
point[i] += j * k
integrand = self.dot(subs(grad(function, self.reference.gdim), x, point))
value = self.reference.integral(integrand)
if symbolic:
return value
else:
return to_float(value)
name = "Derivative integral moment"
class DivergenceIntegralMoment(IntegralMoment):
"""An integral moment of the divergence of a vector function."""
def __init__(self, reference, f, dof, entity=(None, None), mapping="identity"):
super().__init__(reference, f, dof, entity=entity, mapping=mapping)
def eval(self, function, symbolic=True):
"""Apply to the functional to a function."""
point = [i for i in self.reference.origin]
for i, a in enumerate(zip(*self.reference.axes)):
for j, k in zip(a, t):
point[i] += j * k
integrand = self.dot(subs(div(function), x, point))
value = self.reference.integral(integrand)
if symbolic:
return value
else:
return to_float(value)
name = "Integral moment of divergence"
class TangentIntegralMoment(VecIntegralMoment):
"""An integral moment in the tangential direction."""
def __init__(self, reference, f, dof, entity=(None, None), mapping="covariant"):
super().__init__(reference, f, reference.tangent(), dof, entity=entity, mapping=mapping)
name = "Tangential integral moment"
class NormalIntegralMoment(VecIntegralMoment):
"""An integral moment in the normal direction."""
def __init__(self, reference, f, dof, entity=(None, None), mapping="contravariant"):
super().__init__(reference, f, reference.normal(), dof, entity=entity, mapping=mapping)
name = "Normal integral moment"
class NormalDerivativeIntegralMoment(DerivativeIntegralMoment):
"""An integral moment in the normal direction."""
def __init__(self, reference, f, dof, entity=(None, None), mapping="identity"):
super().__init__(reference, f, reference.normal(), dof, entity=entity, mapping=mapping)
name = "Normal derivative integral moment"
class InnerProductIntegralMoment(IntegralMoment):
"""An integral moment of the inner product with a vector."""
def __init__(self, reference, f, inner_with_left, inner_with_right, dof,
entity=(None, None), mapping="identity"):
super().__init__(reference, f, dof, entity=entity, mapping=mapping)
self.inner_with_left = inner_with_left
self.inner_with_right = inner_with_right
def dot(self, function):
"""Take the inner product of a function with the moment direction."""
tdim = len(self.inner_with_left)
return vdot(self.inner_with_left,
tuple(vdot(function[tdim * i: tdim * (i + 1)], self.inner_with_right)
for i in range(0, tdim))) * self.f * self.reference.jacobian()
def dof_direction(self):
"""Get the direction of the DOF."""
if self.inner_with_left != self.inner_with_right:
return None
return self.inner_with_left
name = "Inner product integral moment"
class NormalInnerProductIntegralMoment(InnerProductIntegralMoment):
"""An integral moment of the inner product with the normal direction."""
def __init__(self, reference, f, dof, entity=(None, None), mapping="double_contravariant"):
super().__init__(reference, f, reference.normal(), reference.normal(), dof, entity=entity,
mapping=mapping)
name = "Normal inner product integral moment"
| [
"numpy.array"
] | [((1947, 1972), 'numpy.array', 'numpy.array', (['[self.point]'], {}), '([self.point])\n', (1958, 1972), False, 'import numpy\n'), ((1974, 1990), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (1985, 1990), False, 'import numpy\n'), ((2801, 2826), 'numpy.array', 'numpy.array', (['[self.point]'], {}), '([self.point])\n', (2812, 2826), False, 'import numpy\n'), ((2828, 2854), 'numpy.array', 'numpy.array', (['[self.weight]'], {}), '([self.weight])\n', (2839, 2854), False, 'import numpy\n')] |
from flask import Flask
from flask import Flask, render_template,Response,request ,make_response, session
import pandas as pd
from werkzeug.utils import secure_filename
import matplotlib.pyplot as plt
from darkflow.net.build import TFNet
import numpy as np
import label_image
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import cv2
import pytesseract
from PIL import Image
import random
import pickle
import os
from os.path import isfile, join
app = Flask(__name__,static_folder = "templates")
#**************** Modify This Functions ****************#
def processFaceAndEyeImage(imagePath): #You can see the function name here. It is for Face & Eyye image.
#read images from directories
img = cv2.imread(imagePath,0)
# Trained XML classifiers describes some features of some
# object we want to detect a cascade function is trained
# from a lot of positive(faces) and negative(non-faces)
# images.
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
def detect_face(img):
face_img = img.copy()
face_rects = face_cascade.detectMultiScale(face_img)
for(x,y,w,h) in face_rects:
cv2.rectangle(face_img,(x,y),(x+w,y+h),(255,255,255),5)
return face_img
result1 = detect_face(img)
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
def detect_eyes(img):
face_img = img.copy()
eyes_rects = eye_cascade.detectMultiScale(face_img,scaleFactor=1.2,minNeighbors=5)
for(x,y,w,h) in eyes_rects:
cv2.rectangle(face_img,(x,y),(x+w,y+h),(255,255,255),2)
return face_img
result2 = detect_eyes(img)
img1 = "result1face.jpg"
img2 = "result2eye.jpg"
cv2.imwrite('templates/'+img1,result1)
cv2.imwrite('templates/'+img2,result2)
return "<img src='templates/"+img1+"?="+str(random.randint(0,100000000000000000000))+"'></img><br><img src='templates/"+img2+"'></img><br><br><a href='http://127.0.0.1:5000/'>Home</a>"
#The function will return above line and show it to user.
def processFaceAndEyeVideo(videoPath):
# Trained XML classifiers describes some features of some
# object we want to detect a cascade function is trained
# from a lot of positive(faces) and negative(non-faces)
# images.
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
def detect_face(img):
face_img = img.copy()
face_rects = face_cascade.detectMultiScale(face_img)
for(x,y,w,h) in face_rects:
cv2.rectangle(face_img,(x,y),(x+w,y+h),(255,255,255),10)
return face_img
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
def detect_eyes(img):
face_img = img.copy()
eyes_rects = eye_cascade.detectMultiScale(face_img)
for(x,y,w,h) in eyes_rects:
cv2.rectangle(face_img,(x,y),(x+w,y+h),(255,255,255),10)
return face_img
cap = cv2.VideoCapture(videoPath)
codecformat = cv2.VideoWriter_fourcc(*'XVID')
size = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
out = cv2.VideoWriter('templates/faceandeyeVideo.avi',codecformat, 20.0, size)
# loop runs if capturing has been initialized.
while 1:
# reads frames from a camera
ret, img = cap.read()
if ret == False:
break
# convert to gray scale of each frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# To draw a rectangle in a face
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Detects eyes of different sizes in the input image
eyes = eye_cascade.detectMultiScale(roi_gray)
#To draw a rectangle in eyes
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,127,255),2)
out.write(img)
#cv2.imshow('frame',frame)
#convert_frames_to_video('templates/faceandeyeVideo/', 'templates/faceandeyeVideo.avi', 24)
return "<a style='font-size:35px;font-weight:900;text-align:center;' href='templates/faceandeyeVideo.avi' download>Download Video</a><br><br><a href='http://127.0.0.1:5000/'>Home</a>"
#This is the code for face and image video. You need to change this...just like this...for all functions
def processCelebrityImage(imagePath,gender):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
if gender == "male":
recognizer.read("male.yml")
print("Male")
elif gender == "female":
recognizer.read("female.yml")
print("Female")
else:
return "Something went wrong"
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()}
frame = cv2.imread(imagePath)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x,y,w,h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
id_, conf = recognizer.predict(roi_gray)
if conf>=45 and conf <=85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
cv2.putText(frame, name, (x,y), font, 1, color, 2, cv2.LINE_AA)
img_item = "my-image.png"
cv2.imwrite(img_item, roi_color)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,255),5)
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
cv2.putText(frame, name, (x,y), font, 1, color, 2, cv2.LINE_AA)
cv2.imwrite('templates/'+img_item,frame)
if len(faces) == 0:
return "No Face Found"
return "<img src='templates/"+img_item+"?="+str(random.randint(0,100000000000000000000))+"'></img><br><br><a href='http://127.0.0.1:5000/'>Home</a>"
def processCelebrityVideo(videoPath, gender):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
if gender == "male":
recognizer.read("male.yml")
elif gender == "female":
recognizer.read("female.yml")
else:
return "Something went wrong"
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()}
cap = cv2.VideoCapture(videoPath)
codecformat = cv2.VideoWriter_fourcc(*'XVID')
size = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
out = cv2.VideoWriter('templates/celebrityVideo.avi',codecformat, 20.0, size)
while (True):
ret,frame = cap.read()
if ret==False:
break
# frame = cv2.flip(frame,0)
# else:
# break
print(ret)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
print(faces)
for (x,y,w,h) in faces:
#print(x,y,w,h)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
id_, conf = recognizer.predict(roi_gray)
if conf>=45 and conf <=85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x,y), font, 1, color, stroke, cv2.LINE_AA)
color = (255, 0, 0)
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame,(x, y), (end_cord_x, end_cord_y), color, stroke)
out.write(frame)
#cv2.imshow('frame',frame)
#convert_frames_to_video('templates/celebrityVideo/', 'templates/celebrityVideo.avi', 24)
return "<a style='font-size:35px;font-weight:900;text-align:center;' href='templates/celebrityVideo.avi' download>Download Video</a> <br><br><a href='http://127.0.0.1:5000/'>Home</a></button>"
def processObjectImage(imagePath):
#get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
options = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolov2.weights',
'threshold': 0.3,
'gpu': 1.0
}
tfnet = TFNet(options)
img = cv2.imread(imagePath, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# use YOLO to predict the image
result = tfnet.return_predict(img)
tl = (result[0]['topleft']['x'], result[0]['topleft']['y'])
br = (result[0]['bottomright']['x'], result[0]['bottomright']['y'])
label = result[0]['label']
# add the box and label and display it
img = cv2.rectangle(img, tl, br, (0, 255, 0), 7)
img = cv2.putText(img, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
cv2.imwrite('templates/object_detection.jpg',img)
return "<img src='templates/object_detection.jpg?="+str(random.randint(0,100000000000000000000))+"'></img><br><br><button style='font-size:20px;font-weight:900;color:black;background-color:lightblue;border:0;padding:20px 10px;'><a href='http://127.0.0.1:5000/'>Home</a></button>"
def processObjectVideo(videoPath):
option = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolov2.weights',
'threshold': 0.15,
'gpu': 1.0
}
tfnet = TFNet(option)
capture = cv2.VideoCapture(videoPath)
colors = [tuple(255 * np.random.rand(3)) for i in range(5)]
size = (
int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
codecformat = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('templates/objectoutput.avi',codecformat, 20.0, size)
while (capture.isOpened()):
ret, frame = capture.read()
if ret:
results = tfnet.return_predict(frame)
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
frame = cv2.rectangle(frame, tl, br, color, 7)
frame = cv2.putText(frame, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
out.write(frame)
else:
break
return "<a style='font-size:35px;font-weight:900;text-align:center;' href='templates/objectoutput.avi' download>Download Video</a><br><br><button style='font-size:20px;font-weight:900;color:black;background-color:lightblue;border:0;padding:20px 10px;'><a href='http://127.0.0.1:5000/'>Home</a></button> "
def processReadTextImage(imagePath):
img = Image.open(imagePath)
pytesseract.pytesseract.tesseract_cmd = 'tesseract'
result = pytesseract.image_to_string(img)
return result + "<br><br><button style='font-size:20px;font-weight:900;color:black;background-color:lightblue;border:0;padding:20px 10px;'><a href='http://127.0.0.1:5000/'>Home</a></button>"
def processFacialImage(imagePath):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("face-trainner2.yml")
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()}
frame = cv2.imread(imagePath)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x,y,w,h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
id_, conf = recognizer.predict(roi_gray)
if conf>=45 and conf <=85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
cv2.putText(frame, name, (x,y), font, 1, color, 2, cv2.LINE_AA)
img_item = "my-image.png"
cv2.imwrite(img_item, roi_color)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,255),5)
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
cv2.putText(frame, name, (x,y), font, 1, color, 2, cv2.LINE_AA)
cv2.imwrite('templates/'+img_item,frame)
if len(faces) == 0:
return "No Face Found"
return "<img src='templates/"+img_item+"?="+str(random.randint(0,100000000000000000000))+"'></img><br><br><button style='font-size:20px;font-weight:900;color:black;background-color:lightblue;border:0;padding:20px 10px;'><a href='http://127.0.0.1:5000/'>Home</a></button>"
def processFacialVideo(videoPath):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("face-trainner.yml")
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()}
cap = cv2.VideoCapture(videoPath)
codecformat = cv2.VideoWriter_fourcc(*'XVID')
size = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
out = cv2.VideoWriter('templates/facialVideo.avi',codecformat, 20.0, size)
while (True):
ret,frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x,y,w,h) in faces:
#print(x,y,w,h)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
id_, conf = recognizer.predict(roi_gray)
if conf>=45 and conf <=85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x,y), font, 1, color, stroke, cv2.LINE_AA)
#img_item = "my-image.png"
#cv2.imwrite(img_item, roi_gray)
color = (255, 0, 0)
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame,(x, y), (end_cord_x, end_cord_y), color, stroke)
out.write(frame)
#convert_frames_to_video('templates/facialVideo/', 'templates/facialVideo.avi', 24)
return "<a style='font-size:35px;font-weight:900;text-align:center;' href='templates/facialVideo.avi' download>Download Video</a><br><br><button style='font-size:20px;font-weight:900;color:black;background-color:lightblue;border:0;padding:20px 10px;'><a href='http://127.0.0.1:5000/'>Home</a></button> "
def processFacialErImage(imagePath):
size = 4
# We load the xml file
classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
im = cv2.imread(imagePath, 0 )
#im=cv2.flip(im,1,0) #Flip to act as a mirror
# Resize the image to speed up detection
mini = cv2.resize(im, (int(im.shape[1]/size), int(im.shape[0]/size)))
# detect MultiScale / faces
faces = classifier.detectMultiScale(mini)
# Draw rectangles around each face
for f in faces:
(x, y, w, h) = [v * size for v in f] #Scale the shapesize backup
cv2.rectangle(im, (x,y), (x+w,y+h), (0,255,0), 4)
#Save just the rectangle faces in SubRecFaces
sub_face = im[y:y+h, x:x+w]
FaceFileName = "test.jpg" #Saving the current image for testing.
#cv2.imwrite(FaceFileName, sub_face)
text = label_image.main(FaceFileName)# Getting the Result from the label_image file, i.e., Classification Result.
text = text.title()# Title Case looks Stunning.
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(im, text,(x,y), font, 1, (255,0,0), 2)
if len(faces) == 0:
return "No Face Found"
cv2.imwrite('templates/'+FaceFileName,im)
return "<img src='templates/"+FaceFileName+"?="+str(random.randint(0,100000000000000000000))+"'></img><br><br><button style='font-size:20px;font-weight:900;color:black;background-color:lightblue;border:0;padding:20px 10px;'><a href='http://1192.168.127.12:5000/'>Home</a></button>"
def processActivityImage(videoPath):
#imagePath Contains the path of the image file, you can read it
#Process it
#And return the output in text (If the output is something other than text, let me know
return "Activity Image Output" #Change this to your output
def processActivityVideo(videoPath):
#videoPath Contains the path of the video file, you can read it
#Process it
#And return the output in text (If the output is something other than text, let me know
return "Activity Video Output" #Change this to your output
#******************Flask App Code Starts ****************
@app.route('/')
def index():
return render_template('/index.html')
@app.route('/imageorvideo.html')
def imageorvideo():
dowhat = request.args.get('dowhat')
if dowhat == "celebrity":
maleorfemale = "yes"
return render_template('/imageorvideo.html',dowhat=dowhat,maleorfemale=maleorfemale)
else:
return render_template('/imageorvideo.html',dowhat=dowhat,maleorfemale="") #Here we are passing a message variable. You can customize it like this
@app.route('/faceandeye.html', methods = ['POST', 'GET'])
def faceandeye():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processFaceAndEyeImage(filePath)
return output #render_template('/faceandeye.html',output=output)
elif "mp4" in extension or "wmv" in extension or "mkv" in extension or "webm" in extension or "avi" in extension:
output = processFaceAndEyeVideo(filePath)
return output #render_template('/faceandeye.html',output=output)
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
@app.route('/celebrity.html', methods = ['POST', 'GET'])
def celebrity():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processCelebrityImage(filePath,request.form['gender'])
return output#render_template('/celebrity.html',output=output)
elif "mp4" in extension or "wmv" in extension or "mkv" in extension or "webm" in extension or "avi" in extension:
output = processCelebrityVideo(filePath,request.form['gender'])
return output #render_template('/celebrityVideo.html')
#return redirect('/templates/celebrityVideo.mp4')
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
@app.route('/object.html', methods = ['POST', 'GET'])
def object():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processObjectImage(filePath)
return output#render_template('/object.html',output=output)
elif "mp4" in extension or "wmv" in extension or "mkv" in extension or "webm" in extension or "avi" in extension:
output = processObjectVideo(filePath)
return output#render_template('/object.html',output=output)
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
@app.route('/readtext.html', methods = ['POST', 'GET'])
def readtext():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processReadTextImage(filePath)
return output #render_template('/readtext.html',output=output)
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
@app.route('/facial.html', methods = ['POST', 'GET'])
def facial():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processFacialImage(filePath)
return output #render_template('/facial.html',output=output)
elif "mp4" in extension or "wmv" in extension or "mkv" in extension or "webm" in extension or "avi" in extension:
output = processFacialVideo(filePath)
return output #render_template('/facial.html',output=output)
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
@app.route('/facialer.html', methods = ['POST', 'GET'])
def facialer():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processFacialErImage(filePath)
return output #render_template('/facial.html',output=output)
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
@app.route('/activity.html', methods = ['POST', 'GET'])
def activity():
if request.method == 'POST':
f = request.files['fileToUpload']
filePath = f.filename
f.save(secure_filename(filePath))
extension = filePath.split(".")
extension = extension[len(extension)-1]
if "jpeg" in extension or "jpg" in extension or "png" in extension:
output = processActivityImage(filePath)
return render_template('/activity.html',output=output)
elif "mp4" in extension or "wmv" in extension or "mkv" in extension or "webm" in extension or "avi" in extension:
output = processActivityVideo(filePath)
return render_template('/activity.html',output=output)
else:
return "Invalid File uploaded"
else:
return render_template('/index.html')
if __name__ == "__main__":
app.run(debug=True)
#port = int(os.environ.get("PORT", 5000))
#app.run(host='0.0.0.0', port=port) | [
"cv2.rectangle",
"flask.render_template",
"flask.request.args.get",
"numpy.random.rand",
"flask.Flask",
"cv2.face.LBPHFaceRecognizer_create",
"werkzeug.utils.secure_filename",
"cv2.CascadeClassifier",
"label_image.main",
"cv2.VideoWriter",
"cv2.VideoWriter_fourcc",
"random.randint",
"pickle.... | [((538, 580), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""templates"""'}), "(__name__, static_folder='templates')\n", (543, 580), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((791, 815), 'cv2.imread', 'cv2.imread', (['imagePath', '(0)'], {}), '(imagePath, 0)\n', (801, 815), False, 'import cv2\n'), ((1027, 1084), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt2.xml"""'], {}), "('haarcascade_frontalface_alt2.xml')\n", (1048, 1084), False, 'import cv2\n'), ((1352, 1396), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_eye.xml"""'], {}), "('haarcascade_eye.xml')\n", (1373, 1396), False, 'import cv2\n'), ((1729, 1770), 'cv2.imwrite', 'cv2.imwrite', (["('templates/' + img1)", 'result1'], {}), "('templates/' + img1, result1)\n", (1740, 1770), False, 'import cv2\n'), ((1770, 1811), 'cv2.imwrite', 'cv2.imwrite', (["('templates/' + img2)", 'result2'], {}), "('templates/' + img2, result2)\n", (1781, 1811), False, 'import cv2\n'), ((2313, 2370), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt2.xml"""'], {}), "('haarcascade_frontalface_alt2.xml')\n", (2334, 2370), False, 'import cv2\n'), ((2606, 2650), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_eye.xml"""'], {}), "('haarcascade_eye.xml')\n", (2627, 2650), False, 'import cv2\n'), ((2879, 2906), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoPath'], {}), '(videoPath)\n', (2895, 2906), False, 'import cv2\n'), ((2927, 2958), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2949, 2958), False, 'import cv2\n'), ((3068, 3141), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""templates/faceandeyeVideo.avi"""', 'codecformat', '(20.0)', 'size'], {}), "('templates/faceandeyeVideo.avi', codecformat, 20.0, size)\n", (3083, 3141), False, 'import cv2\n'), ((4470, 4527), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt2.xml"""'], {}), "('haarcascade_frontalface_alt2.xml')\n", (4491, 4527), False, 'import cv2\n'), ((4545, 4581), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (4579, 4581), False, 'import cv2\n'), ((4937, 4958), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (4947, 4958), False, 'import cv2\n'), ((4970, 5009), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (4982, 5009), False, 'import cv2\n'), ((6092, 6149), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt2.xml"""'], {}), "('haarcascade_frontalface_alt2.xml')\n", (6113, 6149), False, 'import cv2\n'), ((6165, 6201), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (6199, 6201), False, 'import cv2\n'), ((6519, 6546), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoPath'], {}), '(videoPath)\n', (6535, 6546), False, 'import cv2\n'), ((6565, 6596), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (6587, 6596), False, 'import cv2\n'), ((6706, 6778), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""templates/celebrityVideo.avi"""', 'codecformat', '(20.0)', 'size'], {}), "('templates/celebrityVideo.avi', codecformat, 20.0, size)\n", (6721, 6778), False, 'import cv2\n'), ((8246, 8260), 'darkflow.net.build.TFNet', 'TFNet', (['options'], {}), '(options)\n', (8251, 8260), False, 'from darkflow.net.build import TFNet\n'), ((8269, 8308), 'cv2.imread', 'cv2.imread', (['imagePath', 'cv2.IMREAD_COLOR'], {}), '(imagePath, cv2.IMREAD_COLOR)\n', (8279, 8308), False, 'import cv2\n'), ((8317, 8353), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8329, 8353), False, 'import cv2\n'), ((8643, 8685), 'cv2.rectangle', 'cv2.rectangle', (['img', 'tl', 'br', '(0, 255, 0)', '(7)'], {}), '(img, tl, br, (0, 255, 0), 7)\n', (8656, 8685), False, 'import cv2\n'), ((8694, 8764), 'cv2.putText', 'cv2.putText', (['img', 'label', 'tl', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(0, 0, 0)', '(2)'], {}), '(img, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)\n', (8705, 8764), False, 'import cv2\n'), ((8770, 8820), 'cv2.imwrite', 'cv2.imwrite', (['"""templates/object_detection.jpg"""', 'img'], {}), "('templates/object_detection.jpg', img)\n", (8781, 8820), False, 'import cv2\n'), ((9271, 9284), 'darkflow.net.build.TFNet', 'TFNet', (['option'], {}), '(option)\n', (9276, 9284), False, 'from darkflow.net.build import TFNet\n'), ((9299, 9326), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoPath'], {}), '(videoPath)\n', (9315, 9326), False, 'import cv2\n'), ((9514, 9545), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (9536, 9545), False, 'import cv2\n'), ((9554, 9624), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""templates/objectoutput.avi"""', 'codecformat', '(20.0)', 'size'], {}), "('templates/objectoutput.avi', codecformat, 20.0, size)\n", (9569, 9624), False, 'import cv2\n'), ((10483, 10504), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (10493, 10504), False, 'from PIL import Image\n'), ((10570, 10602), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['img'], {}), '(img)\n', (10597, 10602), False, 'import pytesseract\n'), ((10851, 10908), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt2.xml"""'], {}), "('haarcascade_frontalface_alt2.xml')\n", (10872, 10908), False, 'import cv2\n'), ((10926, 10962), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (10960, 10962), False, 'import cv2\n'), ((11167, 11188), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (11177, 11188), False, 'import cv2\n'), ((11200, 11239), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (11212, 11239), False, 'import cv2\n'), ((12011, 12054), 'cv2.imwrite', 'cv2.imwrite', (["('templates/' + img_item)", 'frame'], {}), "('templates/' + img_item, frame)\n", (12022, 12054), False, 'import cv2\n'), ((12437, 12494), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt2.xml"""'], {}), "('haarcascade_frontalface_alt2.xml')\n", (12458, 12494), False, 'import cv2\n'), ((12510, 12546), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (12544, 12546), False, 'import cv2\n'), ((12746, 12773), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoPath'], {}), '(videoPath)\n', (12762, 12773), False, 'import cv2\n'), ((12790, 12821), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (12812, 12821), False, 'import cv2\n'), ((12931, 13000), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""templates/facialVideo.avi"""', 'codecformat', '(20.0)', 'size'], {}), "('templates/facialVideo.avi', codecformat, 20.0, size)\n", (12946, 13000), False, 'import cv2\n'), ((14350, 14406), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt.xml"""'], {}), "('haarcascade_frontalface_alt.xml')\n", (14371, 14406), False, 'import cv2\n'), ((14416, 14440), 'cv2.imread', 'cv2.imread', (['imagePath', '(0)'], {}), '(imagePath, 0)\n', (14426, 14440), False, 'import cv2\n'), ((15369, 15413), 'cv2.imwrite', 'cv2.imwrite', (["('templates/' + FaceFileName)", 'im'], {}), "('templates/' + FaceFileName, im)\n", (15380, 15413), False, 'import cv2\n'), ((16345, 16375), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (16360, 16375), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((16445, 16471), 'flask.request.args.get', 'request.args.get', (['"""dowhat"""'], {}), "('dowhat')\n", (16461, 16471), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((3352, 3389), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3364, 3389), False, 'import cv2\n'), ((4861, 4875), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4872, 4875), False, 'import pickle\n'), ((5493, 5525), 'cv2.imwrite', 'cv2.imwrite', (['img_item', 'roi_color'], {}), '(img_item, roi_color)\n', (5504, 5525), False, 'import cv2\n'), ((5530, 5594), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 255, 255)', '(5)'], {}), '(frame, (x, y), (x + w, y + h), (255, 255, 255), 5)\n', (5543, 5594), False, 'import cv2\n'), ((5712, 5776), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(x, y)', 'font', '(1)', 'color', '(2)', 'cv2.LINE_AA'], {}), '(frame, name, (x, y), font, 1, color, 2, cv2.LINE_AA)\n', (5723, 5776), False, 'import cv2\n'), ((5780, 5823), 'cv2.imwrite', 'cv2.imwrite', (["('templates/' + img_item)", 'frame'], {}), "('templates/' + img_item, frame)\n", (5791, 5823), False, 'import cv2\n'), ((6445, 6459), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6456, 6459), False, 'import pickle\n'), ((6932, 6971), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (6944, 6971), False, 'import cv2\n'), ((11091, 11105), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11102, 11105), False, 'import pickle\n'), ((11723, 11755), 'cv2.imwrite', 'cv2.imwrite', (['img_item', 'roi_color'], {}), '(img_item, roi_color)\n', (11734, 11755), False, 'import cv2\n'), ((11760, 11824), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 255, 255)', '(5)'], {}), '(frame, (x, y), (x + w, y + h), (255, 255, 255), 5)\n', (11773, 11824), False, 'import cv2\n'), ((11942, 12006), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(x, y)', 'font', '(1)', 'color', '(2)', 'cv2.LINE_AA'], {}), '(frame, name, (x, y), font, 1, color, 2, cv2.LINE_AA)\n', (11953, 12006), False, 'import cv2\n'), ((12674, 12688), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12685, 12688), False, 'import pickle\n'), ((13056, 13095), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (13068, 13095), False, 'import cv2\n'), ((14812, 14869), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(4)'], {}), '(im, (x, y), (x + w, y + h), (0, 255, 0), 4)\n', (14825, 14869), False, 'import cv2\n'), ((15072, 15102), 'label_image.main', 'label_image.main', (['FaceFileName'], {}), '(FaceFileName)\n', (15088, 15102), False, 'import label_image\n'), ((15268, 15322), 'cv2.putText', 'cv2.putText', (['im', 'text', '(x, y)', 'font', '(1)', '(255, 0, 0)', '(2)'], {}), '(im, text, (x, y), font, 1, (255, 0, 0), 2)\n', (15279, 15322), False, 'import cv2\n'), ((16541, 16620), 'flask.render_template', 'render_template', (['"""/imageorvideo.html"""'], {'dowhat': 'dowhat', 'maleorfemale': 'maleorfemale'}), "('/imageorvideo.html', dowhat=dowhat, maleorfemale=maleorfemale)\n", (16556, 16620), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((16639, 16708), 'flask.render_template', 'render_template', (['"""/imageorvideo.html"""'], {'dowhat': 'dowhat', 'maleorfemale': '""""""'}), "('/imageorvideo.html', dowhat=dowhat, maleorfemale='')\n", (16654, 16708), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((17548, 17578), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (17563, 17578), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((18442, 18472), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (18457, 18472), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((19216, 19246), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (19231, 19246), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((19776, 19806), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (19791, 19806), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((20550, 20580), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (20565, 20580), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((21106, 21136), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (21121, 21136), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((21876, 21906), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (21891, 21906), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((1225, 1292), 'cv2.rectangle', 'cv2.rectangle', (['face_img', '(x, y)', '(x + w, y + h)', '(255, 255, 255)', '(5)'], {}), '(face_img, (x, y), (x + w, y + h), (255, 255, 255), 5)\n', (1238, 1292), False, 'import cv2\n'), ((1567, 1634), 'cv2.rectangle', 'cv2.rectangle', (['face_img', '(x, y)', '(x + w, y + h)', '(255, 255, 255)', '(2)'], {}), '(face_img, (x, y), (x + w, y + h), (255, 255, 255), 2)\n', (1580, 1634), False, 'import cv2\n'), ((2511, 2579), 'cv2.rectangle', 'cv2.rectangle', (['face_img', '(x, y)', '(x + w, y + h)', '(255, 255, 255)', '(10)'], {}), '(face_img, (x, y), (x + w, y + h), (255, 255, 255), 10)\n', (2524, 2579), False, 'import cv2\n'), ((2790, 2858), 'cv2.rectangle', 'cv2.rectangle', (['face_img', '(x, y)', '(x + w, y + h)', '(255, 255, 255)', '(10)'], {}), '(face_img, (x, y), (x + w, y + h), (255, 255, 255), 10)\n', (2803, 2858), False, 'import cv2\n'), ((3585, 3645), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n', (3598, 3645), False, 'import cv2\n'), ((5395, 5459), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(x, y)', 'font', '(1)', 'color', '(2)', 'cv2.LINE_AA'], {}), '(frame, name, (x, y), font, 1, color, 2, cv2.LINE_AA)\n', (5406, 5459), False, 'import cv2\n'), ((7587, 7656), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(end_cord_x, end_cord_y)', 'color', 'stroke'], {}), '(frame, (x, y), (end_cord_x, end_cord_y), color, stroke)\n', (7600, 7656), False, 'import cv2\n'), ((11625, 11689), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(x, y)', 'font', '(1)', 'color', '(2)', 'cv2.LINE_AA'], {}), '(frame, name, (x, y), font, 1, color, 2, cv2.LINE_AA)\n', (11636, 11689), False, 'import cv2\n'), ((13761, 13830), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(end_cord_x, end_cord_y)', 'color', 'stroke'], {}), '(frame, (x, y), (end_cord_x, end_cord_y), color, stroke)\n', (13774, 13830), False, 'import cv2\n'), ((16962, 16987), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (16977, 16987), False, 'from werkzeug.utils import secure_filename\n'), ((17762, 17787), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (17777, 17787), False, 'from werkzeug.utils import secure_filename\n'), ((18648, 18673), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (18663, 18673), False, 'from werkzeug.utils import secure_filename\n'), ((19426, 19451), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (19441, 19451), False, 'from werkzeug.utils import secure_filename\n'), ((19980, 20005), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (19995, 20005), False, 'from werkzeug.utils import secure_filename\n'), ((20758, 20783), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (20773, 20783), False, 'from werkzeug.utils import secure_filename\n'), ((21314, 21339), 'werkzeug.utils.secure_filename', 'secure_filename', (['filePath'], {}), '(filePath)\n', (21329, 21339), False, 'from werkzeug.utils import secure_filename\n'), ((21545, 21593), 'flask.render_template', 'render_template', (['"""/activity.html"""'], {'output': 'output'}), "('/activity.html', output=output)\n", (21560, 21593), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((3897, 3969), 'cv2.rectangle', 'cv2.rectangle', (['roi_color', '(ex, ey)', '(ex + ew, ey + eh)', '(0, 127, 255)', '(2)'], {}), '(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 127, 255), 2)\n', (3910, 3969), False, 'import cv2\n'), ((5919, 5959), 'random.randint', 'random.randint', (['(0)', '(100000000000000000000)'], {}), '(0, 100000000000000000000)\n', (5933, 5959), False, 'import random\n'), ((7411, 7480), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(x, y)', 'font', '(1)', 'color', 'stroke', 'cv2.LINE_AA'], {}), '(frame, name, (x, y), font, 1, color, stroke, cv2.LINE_AA)\n', (7422, 7480), False, 'import cv2\n'), ((8878, 8918), 'random.randint', 'random.randint', (['(0)', '(100000000000000000000)'], {}), '(0, 100000000000000000000)\n', (8892, 8918), False, 'import random\n'), ((9351, 9368), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (9365, 9368), True, 'import numpy as np\n'), ((9955, 9993), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'tl', 'br', 'color', '(7)'], {}), '(frame, tl, br, color, 7)\n', (9968, 9993), False, 'import cv2\n'), ((10007, 10079), 'cv2.putText', 'cv2.putText', (['frame', 'label', 'tl', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(0, 0, 0)', '(2)'], {}), '(frame, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)\n', (10018, 10079), False, 'import cv2\n'), ((12150, 12190), 'random.randint', 'random.randint', (['(0)', '(100000000000000000000)'], {}), '(0, 100000000000000000000)\n', (12164, 12190), False, 'import random\n'), ((13519, 13588), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(x, y)', 'font', '(1)', 'color', 'stroke', 'cv2.LINE_AA'], {}), '(frame, name, (x, y), font, 1, color, stroke, cv2.LINE_AA)\n', (13530, 13588), False, 'import cv2\n'), ((15465, 15505), 'random.randint', 'random.randint', (['(0)', '(100000000000000000000)'], {}), '(0, 100000000000000000000)\n', (15479, 15505), False, 'import random\n'), ((21765, 21813), 'flask.render_template', 'render_template', (['"""/activity.html"""'], {'output': 'output'}), "('/activity.html', output=output)\n", (21780, 21813), False, 'from flask import Flask, render_template, Response, request, make_response, session\n'), ((1855, 1895), 'random.randint', 'random.randint', (['(0)', '(100000000000000000000)'], {}), '(0, 100000000000000000000)\n', (1869, 1895), False, 'import random\n')] |
import re
from abc import ABC, abstractmethod
from unittest.mock import MagicMock
import numpy as np
import pint.errors
import pytest
from openscm_units import unit_registry as ur
from openscm_twolayermodel.errors import ModelStateError, UnitError
class ModelTester(ABC):
tmodel = None
parameters = None
@abstractmethod
def test_init(self):
"""
Test the model initialises as intended
"""
pass
def test_init_no_units(self):
"""
Test error thrown if the model is initiliased with a unitless
quantity
"""
for parameter in self.parameters.keys():
error_msg = "{} must be a pint.Quantity".format(parameter)
with pytest.raises(TypeError, match=error_msg):
self.tmodel(**{parameter: 34.3})
@abstractmethod
def test_init_wrong_units(self):
"""
Test error thrown if the model is initiliased with wrong units
for a quantity
"""
# e.g.
for parameter, value in self.parameters.items():
error_msg = "{} units must be {}".format(parameter, value.units)
with pytest.raises(TypeError, match=error_msg):
self.tmodel(**{parameter: 34.3 * ur("kg")})
def test_run(self):
test = self.tmodel()
test.step = MagicMock()
test.run()
test.step.assert_called()
class TwoLayerVariantTester(ModelTester):
def test_init_wrong_units(self):
helper = self.tmodel()
for parameter in self.parameters.keys():
tinp = 34.3 * ur("kg")
default = getattr(helper, parameter)
try:
tinp.to(default.units)
except pint.errors.DimensionalityError:
pass
error_msg = re.escape("Wrong units for `{}`".format(parameter))
with pytest.raises(UnitError, match=error_msg):
self.tmodel(**{parameter: tinp})
def test_set_erf(self, check_equal_pint):
terf = np.array([0, 1, 2]) * ur("W/m^2")
res = self.tmodel()
res.erf = terf
check_equal_pint(res.erf, terf)
def test_set_erf_unitless_error(self, check_equal_pint):
terf = np.array([0, 1, 2])
res = self.tmodel()
with pytest.raises(TypeError, match="erf must be a pint.Quantity"):
res.erf = terf
def test_reset_not_set_error(self):
error_msg = "The model's drivers have not been set yet, call :meth:`self.set_drivers` first."
with pytest.raises(ModelStateError, match=error_msg):
self.tmodel().reset()
| [
"numpy.array",
"unittest.mock.MagicMock",
"openscm_units.unit_registry",
"pytest.raises"
] | [((1341, 1352), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1350, 1352), False, 'from unittest.mock import MagicMock\n'), ((2235, 2254), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2243, 2254), True, 'import numpy as np\n'), ((2031, 2050), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2039, 2050), True, 'import numpy as np\n'), ((2053, 2064), 'openscm_units.unit_registry', 'ur', (['"""W/m^2"""'], {}), "('W/m^2')\n", (2055, 2064), True, 'from openscm_units import unit_registry as ur\n'), ((2297, 2358), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""erf must be a pint.Quantity"""'}), "(TypeError, match='erf must be a pint.Quantity')\n", (2310, 2358), False, 'import pytest\n'), ((2543, 2590), 'pytest.raises', 'pytest.raises', (['ModelStateError'], {'match': 'error_msg'}), '(ModelStateError, match=error_msg)\n', (2556, 2590), False, 'import pytest\n'), ((730, 771), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'error_msg'}), '(TypeError, match=error_msg)\n', (743, 771), False, 'import pytest\n'), ((1164, 1205), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'error_msg'}), '(TypeError, match=error_msg)\n', (1177, 1205), False, 'import pytest\n'), ((1595, 1603), 'openscm_units.unit_registry', 'ur', (['"""kg"""'], {}), "('kg')\n", (1597, 1603), True, 'from openscm_units import unit_registry as ur\n'), ((1877, 1918), 'pytest.raises', 'pytest.raises', (['UnitError'], {'match': 'error_msg'}), '(UnitError, match=error_msg)\n', (1890, 1918), False, 'import pytest\n'), ((1256, 1264), 'openscm_units.unit_registry', 'ur', (['"""kg"""'], {}), "('kg')\n", (1258, 1264), True, 'from openscm_units import unit_registry as ur\n')] |
import functools
import json
import uuid
from typing import Any, Dict, List, Optional, Sequence, Set
import numpy as np
import pandas as pd
import xarray as xr
from starfish.constants import Indices, AugmentedEnum
from starfish.intensity_table import IntensityTable
class Codebook(xr.DataArray):
"""Codebook for an image-based transcriptomics experiment
The codebook is a three dimensional tensor whose values are the expected intensity of a spot
for each code in each hybridization round and each color channel. This class supports the
construction of synthetic codebooks for testing, and exposes decode methods to assign gene
identifiers to spots. This codebook provides an in-memory representation of the codebook
defined in the spaceTx format.
The codebook is a subclass of xarray, and exposes the complete public API of that package in
addition to the methods and constructors listed below.
Constructors
------------
from_code_array(code_array, n_hyb, n_ch)
construct a codebook from a spaceTx-spec array of codewords
from_json(json_codebook, n_hyb, n_ch)
load a codebook from a spaceTx spec-compliant json file
synthetic_one_hot_codebook
Construct a codebook of random codes where only one channel is on per hybridization round.
This is the typical codebook format for in-situ sequencing and non-multiplex smFISH
experiments.
Methods
-------
decode_euclidean(intensities)
find the closest code for each spot in intensities by euclidean distance
decode_per_channel_maximum(intensities)
find codes that match the per-channel max intensity for each spot in intensities
code_length()
return the total length of the codes in the codebook
Attributes
----------
Constants.CODEWORD
name of codeword field in spaceTx spec
Constants.GENE
name of gene specifier in spaceTx spec
Constants.VALUE
name of value specifier in SpaceTx spec
Examples
--------
>>> from starfish.util.synthesize import SyntheticData
>>> sd = SyntheticData(n_ch=3, n_hyb=4, n_codes=2)
>>> sd.codebook()
<xarray.Codebook (gene_name: 2, c: 3, h: 4)>
array([[[0, 0, 0, 0],
[0, 0, 1, 1],
[1, 1, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 1]]], dtype=uint8)
Coordinates:
* gene_name (gene_name) object 08b1a822-a1b4-4e06-81ea-8a4bd2b004a9 ...
* c (c) int64 0 1 2
* h (h) int64 0 1 2 3
See Also
--------
TODO <link to spaceTx format>
"""
class Constants(AugmentedEnum):
CODEWORD = 'codeword'
GENE = 'gene_name'
VALUE = 'v'
@property
def code_length(self) -> int:
"""return the length of codes in this codebook"""
return int(np.dot(*self.shape[1:]))
@classmethod
def _empty_codebook(cls, code_names: Sequence[str], n_ch: int, n_hyb: int):
"""create an empty codebook of shape (code_names, n_ch, n_hyb)
Parameters
----------
code_names : Sequence[str]
the genes to be coded
n_ch : int
number of channels used to build the codes
n_hyb : int
number of hybridization rounds used to build the codes
Examples
--------
>>> from starfish.codebook import Codebook
>>> Codebook._empty_codebook(['ACTA', 'ACTB'], n_ch=3, n_hyb=2)
<xarray.Codebook (gene_name: 2, c: 3, h: 2)>
array([[[0, 0],
[0, 0],
[0, 0]],
[[0, 0],
[0, 0],
[0, 0]]], dtype=uint8)
Coordinates:
* gene_name (gene_name) object 'ACTA' 'ACTB'
* c (c) int64 0 1 2
* h (h) int64 0 1
Returns
-------
Codebook :
codebook whose values are all zero
"""
codes_index = pd.Index(code_names, name=Codebook.Constants.GENE.value)
return cls(
data=np.zeros((codes_index.shape[0], n_ch, n_hyb), dtype=np.uint8),
coords=(
codes_index,
pd.Index(np.arange(n_ch), name=Indices.CH.value),
pd.Index(np.arange(n_hyb), name=Indices.HYB.value),
)
)
@classmethod
def from_code_array(
cls, code_array: List[Dict[str, Any]],
n_hyb: Optional[int]=None, n_ch: Optional[int]=None) -> "Codebook":
"""construct a codebook from a spaceTx-spec array of codewords
Parameters
----------
code_array : List[Dict[str, Any]]
Array of dictionaries, each containing a codeword and gene_name
n_hyb : Optional[int]
The number of hybridization rounds used in the codes. Will be inferred if not provided
n_ch : Optional[int]
The number of channels used in the codes. Will be inferred if not provided
Examples
--------
>>> from starfish.constants import Indices
>>> from starfish.codebook import Codebook
>>> import tempfile
>>> import json
>>> import os
>>> dir_ = tempfile.mkdtemp()
>>> codebook = [
>>> {
>>> Codebook.Constants.CODEWORD.value: [
>>> {Indices.HYB.value: 0, Indices.CH.value: 3, Codebook.Constants.VALUE.value: 1},
>>> {Indices.HYB.value: 1, Indices.CH.value: 3, Codebook.Constants.VALUE.value: 1},
>>> ],
>>> Codebook.Constants.GENE.value: "ACTB_human"
>>> },
>>> {
>>> Codebook.Constants.CODEWORD.value: [
>>> {Indices.HYB.value: 0, Indices.CH.value: 3, Codebook.Constants.VALUE.value: 1},
>>> {Indices.HYB.value: 1, Indices.CH.value: 1, Codebook.Constants.VALUE.value: 1},
>>> ],
>>> Codebook.Constants.GENE.value: "ACTB_mouse"
>>> },
>>> ]
>>> json_codebook = os.path.join(dir_, 'codebook.json')
>>> with open(json_codebook, 'w') as f:
>>> json.dump(codebook, f)
<xarray.Codebook (gene_name: 2, c: 4, h: 2)>
array([[[0, 0],
[0, 0],
[0, 0],
[1, 1]],
[[0, 0],
[0, 1],
[0, 0],
[1, 0]]], dtype=uint8)
Coordinates:
* gene_name (gene_name) object 'ACTB_human' 'ACTB_mouse'
* c (c) int64 0 1 2 3
* h (h) int64 0 1
Codebook.from_json(json_codebook)
Returns
-------
Codebook :
Codebook with shape (genes, channels, hybridization_rounds)
"""
# guess the max hyb and channel if not provided, otherwise check provided values are valid
max_hyb, max_ch = 0, 0
for code in code_array:
for entry in code[Codebook.Constants.CODEWORD.value]:
max_hyb = max(max_hyb, entry[Indices.HYB])
max_ch = max(max_ch, entry[Indices.CH])
# set n_ch and n_hyb if either were not provided
n_hyb = n_hyb if n_hyb is not None else max_hyb + 1
n_ch = n_ch if n_ch is not None else max_ch + 1
# raise errors if provided n_hyb or n_ch are out of range
if max_hyb + 1 > n_hyb:
raise ValueError(
f'code detected that requires a hybridization value ({max_hyb + 1}) that is '
f'greater than provided n_hyb: {n_hyb}')
if max_ch + 1 > n_ch:
raise ValueError(
f'code detected that requires a channel value ({max_ch + 1}) that is greater '
f'than provided n_hyb: {n_ch}')
# verify codebook structure and fields
for code in code_array:
if not isinstance(code, dict):
raise ValueError(f'codebook must be an array of dictionary codes. Found: {code}.')
# verify all necessary fields are present
required_fields = {Codebook.Constants.CODEWORD.value, Codebook.Constants.GENE.value}
missing_fields = required_fields.difference(code)
if missing_fields:
raise ValueError(
f'Each entry of codebook must contain {required_fields}. Missing fields: '
f'{missing_fields}')
gene_names = [w[Codebook.Constants.GENE.value] for w in code_array]
code_data = cls._empty_codebook(gene_names, n_ch, n_hyb)
# fill the codebook
for code_dict in code_array:
codeword = code_dict[Codebook.Constants.CODEWORD.value]
gene = code_dict[Codebook.Constants.GENE.value]
for entry in codeword:
code_data.loc[gene, entry[Indices.CH.value], entry[Indices.HYB.value]] = entry[
Codebook.Constants.VALUE.value]
return code_data
@classmethod
def from_json(
cls, json_codebook: str, n_hyb: Optional[int]=None, n_ch: Optional[int]=None
) -> "Codebook":
"""Load a codebook from a spaceTx spec-compliant json file
Parameters
----------
json_codebook : str
path to json file containing a spaceTx codebook
n_hyb : Optional[int]
The number of hybridization rounds used in the codes. Will be inferred if not provided
n_ch : Optional[int]
The number of channels used in the codes. Will be inferred if not provided
Examples
--------
>>> from starfish.constants import Indices
>>> from starfish.codebook import Codebook
>>> codebook = [
>>> {
>>> Codebook.Constants.CODEWORD.value: [
>>> {Indices.HYB.value: 0, Indices.CH.value: 3, Codebook.Constants.VALUE.value: 1},
>>> {Indices.HYB.value: 1, Indices.CH.value: 3, Codebook.Constants.VALUE.value: 1},
>>> ],
>>> Codebook.Constants.GENE.value: "ACTB_human"
>>> },
>>> {
>>> Codebook.Constants.CODEWORD.value: [
>>> {Indices.HYB.value: 0, Indices.CH.value: 3, Codebook.Constants.VALUE.value: 1},
>>> {Indices.HYB.value: 1, Indices.CH.value: 1, Codebook.Constants.VALUE.value: 1},
>>> ],
>>> Codebook.Constants.GENE.value: "ACTB_mouse"
>>> },
>>> ]
>>> Codebook.from_json(codebook)
<xarray.Codebook (gene_name: 2, c: 4, h: 2)>
array([[[0, 0],
[0, 0],
[0, 0],
[1, 1]],
[[0, 0],
[0, 1],
[0, 0],
[1, 0]]], dtype=uint8)
Coordinates:
* gene_name (gene_name) object 'ACTB_human' 'ACTB_mouse'
* c (c) int64 0 1 2 3
* h (h) int64 0 1
Returns
-------
Codebook :
Codebook with shape (genes, channels, hybridization_rounds)
"""
with open(json_codebook, 'r') as f:
code_array = json.load(f)
return cls.from_code_array(code_array, n_hyb, n_ch)
def to_json(self, filename: str) -> None:
"""save a codebook to json
Notes
-----
This enforces the following typing of codebooks:
ch, hyb: int
value: float
gene: str
Parameters
----------
filename : str
filename
"""
code_array = []
for gene in self[self.Constants.GENE.value]:
codeword = []
for ch in self[Indices.CH.value]:
for hyb in self[Indices.HYB.value]:
if self.loc[gene, ch, hyb]:
codeword.append(
{
Indices.CH.value: int(ch),
Indices.HYB.value: int(hyb),
self.Constants.VALUE.value: float(self.loc[gene, ch, hyb])
})
code_array.append({
self.Constants.CODEWORD.value: codeword,
self.Constants.GENE.value: str(gene.values)
})
with open(filename, 'w') as f:
json.dump(code_array, f)
def decode_euclidean(self, intensities: IntensityTable) -> IntensityTable:
"""Assign the closest gene by euclidean distance to each feature in an intensity table
Parameters
----------
intensities : IntensityTable
features to be decoded
Returns
-------
IntensityTable :
intensity table containing additional data variables for gene assignments and feature
qualities
"""
def _min_euclidean_distance(observation: xr.DataArray, codes: Codebook) -> np.ndarray:
"""find the code with the closest euclidean distance to observation
Parameters
----------
observation : xr.DataArray
2-dimensional DataArray of shape (n_ch, n_hyb)
codes :
Codebook containing codes to compare to observation
Returns
-------
np.ndarray :
1-d vector containing the distance of each code to observation
"""
squared_diff = (codes - observation) ** 2
code_distances = np.sqrt(squared_diff.sum((Indices.CH, Indices.HYB)))
# order of codes changes here (automated sorting on the reshaping?)
return code_distances
# normalize both the intensities and the codebook
norm_intensities = intensities.groupby(IntensityTable.Constants.FEATURES.value).apply(
lambda x: x / x.sum())
norm_codes = self.groupby(Codebook.Constants.GENE.value).apply(lambda x: x / x.sum())
# calculate pairwise euclidean distance between codes and features
func = functools.partial(_min_euclidean_distance, codes=norm_codes)
distances = norm_intensities.groupby(IntensityTable.Constants.FEATURES.value).apply(func)
# calculate quality of each decoded spot
qualities = 1 - distances.min(Codebook.Constants.GENE.value)
qualities_index = pd.Index(qualities)
# identify genes associated with closest codes
closest_code_index = distances.argmin(Codebook.Constants.GENE.value)
gene_ids = distances.indexes[
Codebook.Constants.GENE.value].values[closest_code_index.values]
gene_index = pd.Index(gene_ids)
# set new values on the intensity table in-place
intensities[IntensityTable.Constants.GENE.value] = (
IntensityTable.Constants.FEATURES.value, gene_index)
intensities[IntensityTable.Constants.QUALITY.value] = (
IntensityTable.Constants.FEATURES.value, qualities_index)
return intensities
def decode_per_hyb_max(self, intensities: IntensityTable) -> IntensityTable:
"""decode each feature by selecting the per-hybridization round max-valued channel
Notes
-----
If no code matches the per-channel max of a feature, it will be assigned np.nan instead
of a gene value
Parameters
----------
intensities : IntensityTable
features to be decoded
Returns
-------
IntensityTable :
intensity table containing additional data variables for gene assignments
"""
def _view_row_as_element(array: np.ndarray) -> np.ndarray:
"""view an entire code as a single element
This view allows vectors (codes) to be compared for equality without need for multiple
comparisons by casting the data in each code to a structured dtype that registers as
a single value
Parameters
----------
array : np.ndarray
2-dimensional numpy array of shape (n_observations, (n_ch * n_hyb)) where
observations may be either features or codes.
Returns
-------
np.ndarray :
1-dimensional vector of shape n_observations
"""
nrows, ncols = array.shape
dtype = {'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array.dtype]}
return array.view(dtype)
max_channels = intensities.argmax(Indices.CH.value)
codes = self.argmax(Indices.CH.value)
a = _view_row_as_element(codes.values.reshape(self.shape[0], -1))
b = _view_row_as_element(max_channels.values.reshape(intensities.shape[0], -1))
genes = np.empty(intensities.shape[0], dtype=object)
genes.fill(np.nan)
for i in np.arange(a.shape[0]):
genes[np.where(a[i] == b)[0]] = codes['gene_name'][i]
gene_index = pd.Index(genes.astype('U'))
intensities[IntensityTable.Constants.GENE.value] = (
IntensityTable.Constants.FEATURES.value, gene_index)
return intensities
@classmethod
def synthetic_one_hot_codebook(
cls, n_hyb: int, n_channel: int, n_codes: int, gene_names: Optional[Sequence]=None
) -> "Codebook":
"""Generate codes where one channel is "on" in each hybridization round
Parameters
----------
n_hyb : int
number of hybridization rounds per code
n_channel : int
number of channels per code
n_codes : int
number of codes to generate
gene_names : Optional[List[str]]
if provided, names for genes in codebook
Examples
--------
>>> from starfish.codebook import Codebook
>>> Codebook.synthetic_one_hot_codebook(n_hyb=2, n_channel=3, n_codes=2)
<xarray.Codebook (gene_name: 2, c: 3, h: 2)>
array([[[0, 1],
[0, 0],
[1, 0]],
[[1, 1],
[0, 0],
[0, 0]]], dtype=uint8)
Coordinates:
* gene_name (gene_name) object b25180dc-8af5-48f1-bff4-b5649683516d ...
* c (c) int64 0 1 2
* h (h) int64 0 1
Returns
-------
List[Dict] :
list of codewords
"""
# TODO ambrosejcarr: clean up this code, generate Codebooks directly using _empty_codebook
# construct codes; this can be slow when n_codes is large and n_codes ~= n_possible_codes
codes: Set = set()
while len(codes) < n_codes:
codes.add(tuple([np.random.randint(0, n_channel) for _ in np.arange(n_hyb)]))
# construct codewords from code
codewords = [
[
{
Indices.HYB.value: h, Indices.CH.value: c, 'v': 1
} for h, c in enumerate(code)
] for code in codes
]
# make a codebook from codewords
if gene_names is None:
# use a reverse-sorted list of integers as codewords
gene_names = [uuid.uuid4() for _ in range(n_codes)]
assert n_codes == len(gene_names)
codebook = [{Codebook.Constants.CODEWORD.value: w, Codebook.Constants.GENE.value: g}
for w, g in zip(codewords, gene_names)]
return cls.from_code_array(codebook, n_hyb=n_hyb, n_ch=n_channel)
| [
"numpy.where",
"json.dump",
"uuid.uuid4",
"pandas.Index",
"numpy.dot",
"numpy.zeros",
"numpy.empty",
"functools.partial",
"numpy.random.randint",
"json.load",
"numpy.arange"
] | [((4004, 4060), 'pandas.Index', 'pd.Index', (['code_names'], {'name': 'Codebook.Constants.GENE.value'}), '(code_names, name=Codebook.Constants.GENE.value)\n', (4012, 4060), True, 'import pandas as pd\n'), ((14123, 14183), 'functools.partial', 'functools.partial', (['_min_euclidean_distance'], {'codes': 'norm_codes'}), '(_min_euclidean_distance, codes=norm_codes)\n', (14140, 14183), False, 'import functools\n'), ((14427, 14446), 'pandas.Index', 'pd.Index', (['qualities'], {}), '(qualities)\n', (14435, 14446), True, 'import pandas as pd\n'), ((14716, 14734), 'pandas.Index', 'pd.Index', (['gene_ids'], {}), '(gene_ids)\n', (14724, 14734), True, 'import pandas as pd\n'), ((16881, 16925), 'numpy.empty', 'np.empty', (['intensities.shape[0]'], {'dtype': 'object'}), '(intensities.shape[0], dtype=object)\n', (16889, 16925), True, 'import numpy as np\n'), ((16971, 16992), 'numpy.arange', 'np.arange', (['a.shape[0]'], {}), '(a.shape[0])\n', (16980, 16992), True, 'import numpy as np\n'), ((2882, 2905), 'numpy.dot', 'np.dot', (['*self.shape[1:]'], {}), '(*self.shape[1:])\n', (2888, 2905), True, 'import numpy as np\n'), ((11247, 11259), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11256, 11259), False, 'import json\n'), ((12422, 12446), 'json.dump', 'json.dump', (['code_array', 'f'], {}), '(code_array, f)\n', (12431, 12446), False, 'import json\n'), ((4098, 4159), 'numpy.zeros', 'np.zeros', (['(codes_index.shape[0], n_ch, n_hyb)'], {'dtype': 'np.uint8'}), '((codes_index.shape[0], n_ch, n_hyb), dtype=np.uint8)\n', (4106, 4159), True, 'import numpy as np\n'), ((19272, 19284), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19282, 19284), False, 'import uuid\n'), ((17012, 17031), 'numpy.where', 'np.where', (['(a[i] == b)'], {}), '(a[i] == b)\n', (17020, 17031), True, 'import numpy as np\n'), ((4236, 4251), 'numpy.arange', 'np.arange', (['n_ch'], {}), '(n_ch)\n', (4245, 4251), True, 'import numpy as np\n'), ((4302, 4318), 'numpy.arange', 'np.arange', (['n_hyb'], {}), '(n_hyb)\n', (4311, 4318), True, 'import numpy as np\n'), ((18794, 18825), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_channel'], {}), '(0, n_channel)\n', (18811, 18825), True, 'import numpy as np\n'), ((18835, 18851), 'numpy.arange', 'np.arange', (['n_hyb'], {}), '(n_hyb)\n', (18844, 18851), True, 'import numpy as np\n')] |
import os
import lmdb
import glob
import numpy as np
from pathlib import Path
from typing import Callable, List, Dict
from core.utils.data_utils.data_writter import write_json, write_episode_lmdb
from core.utils.others.image_helper import save_image, is_image
def default_post_process_fn(observations):
sensor_data = {}
others = {}
for key, value in observations.items():
if is_image(value):
sensor_data[key] = value
return sensor_data, others
class BenchmarkDatasetSaver():
"""
Benchmark dataset saver in DI-drive. It can save dataset in standard benchmark dataset form
defined in DI-drive. User can pass a post-process function to specialize 'sensor_data' and
'others' saved in dataset.
:Arguments:
- save_dir (str): Dataset folder path.
- obs_cfg (Dict): Observation config dict in simulator.
- post_process_fn (Callable, optional): Post-process function defined by user. Defaults to None.
:Interfaces: make_dataset_path, save_episodes_data, make_index
"""
def __init__(self, save_dir: str, obs_cfg: Dict, post_process_fn: Callable = None):
"""
[summary]
:Arguments:
- save_dir (str): [description]
- obs_cfg (Dict): [description]
- post_process_fn (Callable, optional): [description]. Defaults to None.
"""
self._save_dir = save_dir
self._obs_cfg = obs_cfg
self._post_process_fn = post_process_fn
if self._post_process_fn is None:
self._post_process_fn = default_post_process_fn
def save_episodes_data(self, episodes_data: List, start_episode: int = 0):
"""
Save data from several episodes sampled from collector, with 'env_param' and 'data' key
saved in each episode.
:Arguments:
- episode_count (int): Start count of episode to save.
- episodes_data (List): Saved data of episodes.
"""
for episode, episode_data in enumerate(episodes_data):
data = list()
episode_path = Path(self._save_dir).joinpath('episode_%05d' % (start_episode + episode))
BenchmarkDatasetSaver._make_episode_path(episode_path, episode_data['env_param'])
for idx, frame_data in enumerate(episode_data['data']):
observations = frame_data['obs']
actions = frame_data['action']
if 'real_steer' not in actions:
actions['real_steer'] = actions['steer']
actions['real_throttle'] = actions['throttle']
actions['real_brake'] = actions['brake']
measurements = [
observations['tick'],
observations['timestamp'],
observations['forward_vector'],
observations['acceleration'],
observations['location'],
observations['speed'],
observations['command'],
actions['steer'],
actions['throttle'],
actions['brake'],
actions['real_steer'],
actions['real_throttle'],
actions['real_brake'],
observations['tl_state'],
observations['tl_dis'],
]
measurements = [x if x.shape != () else np.float32([x]) for x in measurements]
measurements = np.concatenate(measurements, 0)
sensor_data, others = self._post_process_fn(observations)
data.append((measurements, sensor_data, others))
BenchmarkDatasetSaver._save_episode_data(episode_path, data)
def make_dataset_path(self, dataset_metainfo: Dict):
"""
Make dataset folder and write dataset meta infomation into a json file.
:Arguments:
- dataset_metainfo (Dict): the metainfo of datasets
"""
if not os.path.exists(self._save_dir):
os.makedirs(self._save_dir)
obs_name = ['rgb', 'depth', 'segmentation']
obs_metainfo = {}
for obs_item in self._obs_cfg:
if obs_item.type in obs_name:
type_name = obs_item.type
obs_item = obs_item.copy().pop('type')
obs_metainfo.update({type_name: obs_item})
dataset_metainfo.update({'obs': obs_metainfo})
write_json(os.path.join(self._save_dir, 'metainfo.json'), dataset_metainfo)
@staticmethod
def _make_episode_path(episode_path, env_params):
os.makedirs(episode_path, exist_ok=True)
write_json(os.path.join(episode_path, 'episode_metainfo.json'), env_params)
@staticmethod
def _save_episode_data(episode_path, data):
write_episode_lmdb(episode_path, data)
for i, x in enumerate(data):
sensor_data = x[1]
for k, v in sensor_data.items():
save_image(os.path.join(episode_path, "%s_%05d.png" % (k, i)), v)
def make_index(self, command_index: int = 11):
"""
Make an index txt file to save all the command of each frame in dataset.
:Arguments:
- command_index (int, optional): The index of command in 'measurements.lmdb'. Defaults to 11.
"""
index_path = os.path.join(self._save_dir, 'index.txt')
episode_list = glob.glob('%s/episode*' % self._save_dir)
episode_list = sorted(episode_list)
with open(index_path, 'w') as index_f:
for episode_path in episode_list:
eph = os.path.split(episode_path)[-1]
txn = lmdb.open(os.path.join(episode_path, 'measurements.lmdb')).begin(write=False)
n = int(txn.get('len'.encode()))
for i in range(n):
info = ''
info += eph + ','
measurements = np.frombuffer(txn.get(('measurements_%05d' % i).encode()), np.float32)
info += str(i) + ','
info += str(int(measurements[command_index])) + '\n'
index_f.write(info)
| [
"core.utils.data_utils.data_writter.write_episode_lmdb",
"os.path.exists",
"os.makedirs",
"pathlib.Path",
"core.utils.others.image_helper.is_image",
"os.path.join",
"os.path.split",
"numpy.concatenate",
"numpy.float32",
"glob.glob"
] | [((398, 413), 'core.utils.others.image_helper.is_image', 'is_image', (['value'], {}), '(value)\n', (406, 413), False, 'from core.utils.others.image_helper import save_image, is_image\n'), ((4619, 4659), 'os.makedirs', 'os.makedirs', (['episode_path'], {'exist_ok': '(True)'}), '(episode_path, exist_ok=True)\n', (4630, 4659), False, 'import os\n'), ((4819, 4857), 'core.utils.data_utils.data_writter.write_episode_lmdb', 'write_episode_lmdb', (['episode_path', 'data'], {}), '(episode_path, data)\n', (4837, 4857), False, 'from core.utils.data_utils.data_writter import write_json, write_episode_lmdb\n'), ((5358, 5399), 'os.path.join', 'os.path.join', (['self._save_dir', '"""index.txt"""'], {}), "(self._save_dir, 'index.txt')\n", (5370, 5399), False, 'import os\n'), ((5423, 5464), 'glob.glob', 'glob.glob', (["('%s/episode*' % self._save_dir)"], {}), "('%s/episode*' % self._save_dir)\n", (5432, 5464), False, 'import glob\n'), ((4009, 4039), 'os.path.exists', 'os.path.exists', (['self._save_dir'], {}), '(self._save_dir)\n', (4023, 4039), False, 'import os\n'), ((4053, 4080), 'os.makedirs', 'os.makedirs', (['self._save_dir'], {}), '(self._save_dir)\n', (4064, 4080), False, 'import os\n'), ((4473, 4518), 'os.path.join', 'os.path.join', (['self._save_dir', '"""metainfo.json"""'], {}), "(self._save_dir, 'metainfo.json')\n", (4485, 4518), False, 'import os\n'), ((4679, 4730), 'os.path.join', 'os.path.join', (['episode_path', '"""episode_metainfo.json"""'], {}), "(episode_path, 'episode_metainfo.json')\n", (4691, 4730), False, 'import os\n'), ((3503, 3534), 'numpy.concatenate', 'np.concatenate', (['measurements', '(0)'], {}), '(measurements, 0)\n', (3517, 3534), True, 'import numpy as np\n'), ((2091, 2111), 'pathlib.Path', 'Path', (['self._save_dir'], {}), '(self._save_dir)\n', (2095, 2111), False, 'from pathlib import Path\n'), ((4998, 5048), 'os.path.join', 'os.path.join', (['episode_path', "('%s_%05d.png' % (k, i))"], {}), "(episode_path, '%s_%05d.png' % (k, i))\n", (5010, 5048), False, 'import os\n'), ((5625, 5652), 'os.path.split', 'os.path.split', (['episode_path'], {}), '(episode_path)\n', (5638, 5652), False, 'import os\n'), ((3433, 3448), 'numpy.float32', 'np.float32', (['[x]'], {}), '([x])\n', (3443, 3448), True, 'import numpy as np\n'), ((5689, 5736), 'os.path.join', 'os.path.join', (['episode_path', '"""measurements.lmdb"""'], {}), "(episode_path, 'measurements.lmdb')\n", (5701, 5736), False, 'import os\n')] |
import pandas as pd
import numpy as np
import scanpy.api as sc
from pathlib import Path
FILE = Path(__file__).parent / Path('_scripts/seurat_hvg.csv')
def test_higly_variable_genes_compare_to_seurat():
seurat_hvg_info = pd.read_csv(FILE, sep=' ')
pbmc = sc.datasets.pbmc68k_reduced()
pbmc.X = pbmc.raw.X
pbmc.var_names_make_unique()
sc.pp.normalize_per_cell(pbmc, counts_per_cell_after=1e4)
sc.pp.log1p(pbmc)
sc.pp.highly_variable_genes(pbmc, flavor='seurat', min_mean=0.0125, max_mean=3, min_disp=0.5)
np.testing.assert_array_equal(seurat_hvg_info['highly_variable'], pbmc.var['highly_variable'])
#np.testing.assert_allclose(4, 3.9999, rtol=2e-05, atol=2e-05) - (still) Not equal to tolerance rtol=2e-05, atol=2e-05
np.testing.assert_allclose(seurat_hvg_info['means'],
pbmc.var['means'],
rtol=2e-05,
atol=2e-05)
np.testing.assert_allclose(seurat_hvg_info['dispersions'],
pbmc.var['dispersions'],
rtol=2e-05,
atol=2e-05)
np.testing.assert_allclose(seurat_hvg_info['dispersions_norm'],
pbmc.var['dispersions_norm'],
rtol=2e-05,
atol=2e-05)
| [
"pandas.read_csv",
"pathlib.Path",
"numpy.testing.assert_allclose",
"scanpy.api.pp.highly_variable_genes",
"scanpy.api.datasets.pbmc68k_reduced",
"scanpy.api.pp.normalize_per_cell",
"scanpy.api.pp.log1p",
"numpy.testing.assert_array_equal"
] | [((120, 151), 'pathlib.Path', 'Path', (['"""_scripts/seurat_hvg.csv"""'], {}), "('_scripts/seurat_hvg.csv')\n", (124, 151), False, 'from pathlib import Path\n'), ((226, 252), 'pandas.read_csv', 'pd.read_csv', (['FILE'], {'sep': '""" """'}), "(FILE, sep=' ')\n", (237, 252), True, 'import pandas as pd\n'), ((265, 294), 'scanpy.api.datasets.pbmc68k_reduced', 'sc.datasets.pbmc68k_reduced', ([], {}), '()\n', (292, 294), True, 'import scanpy.api as sc\n'), ((357, 418), 'scanpy.api.pp.normalize_per_cell', 'sc.pp.normalize_per_cell', (['pbmc'], {'counts_per_cell_after': '(10000.0)'}), '(pbmc, counts_per_cell_after=10000.0)\n', (381, 418), True, 'import scanpy.api as sc\n'), ((419, 436), 'scanpy.api.pp.log1p', 'sc.pp.log1p', (['pbmc'], {}), '(pbmc)\n', (430, 436), True, 'import scanpy.api as sc\n'), ((441, 538), 'scanpy.api.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['pbmc'], {'flavor': '"""seurat"""', 'min_mean': '(0.0125)', 'max_mean': '(3)', 'min_disp': '(0.5)'}), "(pbmc, flavor='seurat', min_mean=0.0125,\n max_mean=3, min_disp=0.5)\n", (468, 538), True, 'import scanpy.api as sc\n'), ((540, 639), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["seurat_hvg_info['highly_variable']", "pbmc.var['highly_variable']"], {}), "(seurat_hvg_info['highly_variable'], pbmc.var[\n 'highly_variable'])\n", (569, 639), True, 'import numpy as np\n'), ((763, 862), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["seurat_hvg_info['means']", "pbmc.var['means']"], {'rtol': '(2e-05)', 'atol': '(2e-05)'}), "(seurat_hvg_info['means'], pbmc.var['means'],\n rtol=2e-05, atol=2e-05)\n", (789, 862), True, 'import numpy as np\n'), ((956, 1068), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["seurat_hvg_info['dispersions']", "pbmc.var['dispersions']"], {'rtol': '(2e-05)', 'atol': '(2e-05)'}), "(seurat_hvg_info['dispersions'], pbmc.var[\n 'dispersions'], rtol=2e-05, atol=2e-05)\n", (982, 1068), True, 'import numpy as np\n'), ((1161, 1283), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["seurat_hvg_info['dispersions_norm']", "pbmc.var['dispersions_norm']"], {'rtol': '(2e-05)', 'atol': '(2e-05)'}), "(seurat_hvg_info['dispersions_norm'], pbmc.var[\n 'dispersions_norm'], rtol=2e-05, atol=2e-05)\n", (1187, 1283), True, 'import numpy as np\n'), ((96, 110), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (100, 110), False, 'from pathlib import Path\n')] |
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
def plot_hist(df, sample=True, n_cols=3, **histplot_kwargs):
sample_threshold = 100000
if sample:
sample_number = min(sample_threshold, df.shape[0])
_df = df.sample(sample_number)
_df = _df.select_dtypes(include="number")
n_rows = _df.shape[1] / n_cols
n_rows = np.ceil(n_rows)
n_rows = int(n_rows)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(20, 15))
for i, column in enumerate(_df.columns):
sns.histplot(
_df[column], bins=50, ax=axes[i // n_cols, i % n_cols], **histplot_kwargs
)
plt.tight_layout()
| [
"numpy.ceil",
"seaborn.histplot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] | [((377, 392), 'numpy.ceil', 'np.ceil', (['n_rows'], {}), '(n_rows)\n', (384, 392), True, 'import numpy as np\n'), ((435, 493), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'n_rows', 'ncols': 'n_cols', 'figsize': '(20, 15)'}), '(nrows=n_rows, ncols=n_cols, figsize=(20, 15))\n', (447, 493), True, 'from matplotlib import pyplot as plt\n'), ((662, 680), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (678, 680), True, 'from matplotlib import pyplot as plt\n'), ((548, 640), 'seaborn.histplot', 'sns.histplot', (['_df[column]'], {'bins': '(50)', 'ax': 'axes[i // n_cols, i % n_cols]'}), '(_df[column], bins=50, ax=axes[i // n_cols, i % n_cols], **\n histplot_kwargs)\n', (560, 640), True, 'import seaborn as sns\n')] |
import numpy as np
import cv2
segmentation_colors = np.array([[0, 0, 0],
[255, 191, 0],
[192, 67, 251]], dtype=np.uint8)
detection_color = (191, 255, 0)
label = "car"
ORIGINAL_HORIZON_POINTS = np.float32([[571, 337], [652, 337]])
num_horizon_points = 0
new_horizon_points = []
def util_draw_seg(seg_map, image, alpha = 0.5):
# Convert segmentation prediction to colors
color_segmap = cv2.resize(image, (seg_map.shape[1], seg_map.shape[0]))
color_segmap[seg_map>0] = segmentation_colors[seg_map[seg_map>0]]
# Resize to match the image shape
color_segmap = cv2.resize(color_segmap, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_segmap))
else:
combined_img = cv2.addWeighted(image, alpha, color_segmap, (1-alpha),0)
return combined_img
# Ref: https://github.com/datvuthanh/HybridNets/blob/d43b0aa8de2a1d3280084270d29cf4c7abf640ae/utils/plot.py#L52
def util_draw_detections(boxes, scores, image, text=True):
tl = int(round(0.0015 * max(image.shape[0:2]))) # line thickness
tf = max(tl, 1) # font thickness
for box, score in zip(boxes, scores):
c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(image, c1, c2, detection_color, thickness=tl)
if text:
s_size = cv2.getTextSize(str('{:.0%}'.format(score)), 0, fontScale=float(tl) / 3, thickness=tf)[0]
t_size = cv2.getTextSize(label, 0, fontScale=float(tl) / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0] + s_size[0] + 15, c1[1] - t_size[1] - 3
cv2.rectangle(image, c1, c2, detection_color, -1) # filled
cv2.putText(image, '{}: {:.0%}'.format(label, score), (c1[0], c1[1] - 2), 0, float(tl) / 3, [0, 0, 0],
thickness=tf, lineType=cv2.FONT_HERSHEY_SIMPLEX)
return image
def util_draw_bird_eye_view(seg_map, hoizon_points=ORIGINAL_HORIZON_POINTS):
img_h, img_w = seg_map.shape[:2]
bird_eye_view_w, bird_eye_view_h = (img_h, img_h)
offset = bird_eye_view_w/2.5
bird_eye_view_points = np.float32([[offset, bird_eye_view_h], [bird_eye_view_w - offset, bird_eye_view_h],
[offset, 0], [bird_eye_view_w - offset, 0]])
image_points = np.vstack((np.float32([[0, img_h], [img_w, img_h]]), hoizon_points))
M = cv2.getPerspectiveTransform(image_points, bird_eye_view_points)
bird_eye_seg_map = cv2.warpPerspective(seg_map, M, (bird_eye_view_w, bird_eye_view_h))
return bird_eye_seg_map
# Ref: https://github.com/datvuthanh/HybridNets/blob/d43b0aa8de2a1d3280084270d29cf4c7abf640ae/utils/utils.py#L615
def transform_boxes(boxes, anchors):
y_centers_a = (anchors[:, 0] + anchors[:, 2]) / 2
x_centers_a = (anchors[:, 1] + anchors[:, 3]) / 2
ha = anchors[:, 2] - anchors[:, 0]
wa = anchors[:, 3] - anchors[:, 1]
w = np.exp(boxes[:, 3]) * wa
h = np.exp(boxes[:, 2]) * ha
y_centers = boxes[:, 0] * ha + y_centers_a
x_centers = boxes[:, 1] * wa + x_centers_a
ymin = y_centers - h / 2.
xmin = x_centers - w / 2.
ymax = y_centers + h / 2.
xmax = x_centers + w / 2.
return np.vstack((xmin, ymin, xmax, ymax)).T
# Ref: https://python-ai-learn.com/2021/02/14/nmsfast/
def iou_np(box, boxes, area, areas):
x_min = np.maximum(box[0], boxes[:,0])
y_min = np.maximum(box[1], boxes[:,1])
x_max = np.minimum(box[2], boxes[:,2])
y_max = np.minimum(box[3], boxes[:,3])
w = np.maximum(0, x_max - x_min + 1)
h = np.maximum(0, y_max - y_min + 1)
intersect = w*h
iou_np = intersect / (area + areas - intersect)
return iou_np
# Ref: https://python-ai-learn.com/2021/02/14/nmsfast/
def nms_fast(bboxes, scores, iou_threshold=0.5):
areas = (bboxes[:,2] - bboxes[:,0] + 1) \
* (bboxes[:,3] - bboxes[:,1] + 1)
sort_index = np.argsort(scores)
i = -1
while(len(sort_index) >= 1 - i):
max_scr_ind = sort_index[i]
ind_list = sort_index[:i]
iou = iou_np(bboxes[max_scr_ind], bboxes[ind_list], \
areas[max_scr_ind], areas[ind_list])
del_index = np.where(iou >= iou_threshold)
sort_index = np.delete(sort_index, del_index)
i -= 1
bboxes = bboxes[sort_index]
scores = scores[sort_index]
return bboxes, scores
def get_horizon_points(image):
cv2.namedWindow("Get horizon points", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Get horizon points", get_horizon_point)
# Draw horizontal line
image = cv2.line(image, (0,image.shape[0]//2),
(image.shape[1],image.shape[0]//2),
(0, 0, 251), 1)
cv2.imshow("Get horizon points", image)
num_lines = 0
while True:
if (num_lines == 0) and (num_horizon_points == 1):
image = cv2.line(image, (0,image.shape[0]),
(new_horizon_points[0][0], new_horizon_points[0][1]),
(192, 67, 251), 3)
image = cv2.circle(image, (new_horizon_points[0][0], new_horizon_points[0][1]),
5, (251, 191, 67), -1)
cv2.imshow("Get horizon points", image)
num_lines += 1
elif(num_lines == 1) and (num_horizon_points == 2):
image = cv2.line(image, (image.shape[1],image.shape[0]),
(new_horizon_points[1][0], new_horizon_points[1][1]),
(192, 67, 251), 3)
image = cv2.circle(image, (new_horizon_points[1][0], new_horizon_points[1][1]),
5, (251, 191, 67), -1)
cv2.imshow("Get horizon points", image)
num_lines += 1
break
cv2.waitKey(100)
cv2.waitKey(1000)
cv2.destroyWindow("Get horizon points")
horizon_points = np.float32(new_horizon_points)
print(f"horizon_points = np.{repr(horizon_points)}")
return horizon_points
def get_horizon_point(event,x,y,flags,param):
global num_horizon_points, new_horizon_points
if event == cv2.EVENT_LBUTTONDBLCLK:
new_horizon_points.append([x,y])
num_horizon_points += 1
| [
"cv2.rectangle",
"numpy.hstack",
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"cv2.warpPerspective",
"cv2.setMouseCallback",
"numpy.where",
"numpy.delete",
"cv2.line",
"numpy.exp",
"cv2.addWeighted",
"numpy.vstack",
"numpy.maximum",
"cv2.waitKey",
"cv2.getPerspectiveTransform",
"cv2... | [((53, 121), 'numpy.array', 'np.array', (['[[0, 0, 0], [255, 191, 0], [192, 67, 251]]'], {'dtype': 'np.uint8'}), '([[0, 0, 0], [255, 191, 0], [192, 67, 251]], dtype=np.uint8)\n', (61, 121), True, 'import numpy as np\n'), ((225, 261), 'numpy.float32', 'np.float32', (['[[571, 337], [652, 337]]'], {}), '([[571, 337], [652, 337]])\n', (235, 261), True, 'import numpy as np\n'), ((420, 475), 'cv2.resize', 'cv2.resize', (['image', '(seg_map.shape[1], seg_map.shape[0])'], {}), '(image, (seg_map.shape[1], seg_map.shape[0]))\n', (430, 475), False, 'import cv2\n'), ((595, 653), 'cv2.resize', 'cv2.resize', (['color_segmap', '(image.shape[1], image.shape[0])'], {}), '(color_segmap, (image.shape[1], image.shape[0]))\n', (605, 653), False, 'import cv2\n'), ((2002, 2134), 'numpy.float32', 'np.float32', (['[[offset, bird_eye_view_h], [bird_eye_view_w - offset, bird_eye_view_h], [\n offset, 0], [bird_eye_view_w - offset, 0]]'], {}), '([[offset, bird_eye_view_h], [bird_eye_view_w - offset,\n bird_eye_view_h], [offset, 0], [bird_eye_view_w - offset, 0]])\n', (2012, 2134), True, 'import numpy as np\n'), ((2233, 2296), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['image_points', 'bird_eye_view_points'], {}), '(image_points, bird_eye_view_points)\n', (2260, 2296), False, 'import cv2\n'), ((2317, 2384), 'cv2.warpPerspective', 'cv2.warpPerspective', (['seg_map', 'M', '(bird_eye_view_w, bird_eye_view_h)'], {}), '(seg_map, M, (bird_eye_view_w, bird_eye_view_h))\n', (2336, 2384), False, 'import cv2\n'), ((3147, 3178), 'numpy.maximum', 'np.maximum', (['box[0]', 'boxes[:, 0]'], {}), '(box[0], boxes[:, 0])\n', (3157, 3178), True, 'import numpy as np\n'), ((3187, 3218), 'numpy.maximum', 'np.maximum', (['box[1]', 'boxes[:, 1]'], {}), '(box[1], boxes[:, 1])\n', (3197, 3218), True, 'import numpy as np\n'), ((3227, 3258), 'numpy.minimum', 'np.minimum', (['box[2]', 'boxes[:, 2]'], {}), '(box[2], boxes[:, 2])\n', (3237, 3258), True, 'import numpy as np\n'), ((3267, 3298), 'numpy.minimum', 'np.minimum', (['box[3]', 'boxes[:, 3]'], {}), '(box[3], boxes[:, 3])\n', (3277, 3298), True, 'import numpy as np\n'), ((3304, 3336), 'numpy.maximum', 'np.maximum', (['(0)', '(x_max - x_min + 1)'], {}), '(0, x_max - x_min + 1)\n', (3314, 3336), True, 'import numpy as np\n'), ((3342, 3374), 'numpy.maximum', 'np.maximum', (['(0)', '(y_max - y_min + 1)'], {}), '(0, y_max - y_min + 1)\n', (3352, 3374), True, 'import numpy as np\n'), ((3663, 3681), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (3673, 3681), True, 'import numpy as np\n'), ((4109, 4165), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Get horizon points"""', 'cv2.WINDOW_NORMAL'], {}), "('Get horizon points', cv2.WINDOW_NORMAL)\n", (4124, 4165), False, 'import cv2\n'), ((4167, 4228), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Get horizon points"""', 'get_horizon_point'], {}), "('Get horizon points', get_horizon_point)\n", (4187, 4228), False, 'import cv2\n'), ((4263, 4363), 'cv2.line', 'cv2.line', (['image', '(0, image.shape[0] // 2)', '(image.shape[1], image.shape[0] // 2)', '(0, 0, 251)', '(1)'], {}), '(image, (0, image.shape[0] // 2), (image.shape[1], image.shape[0] //\n 2), (0, 0, 251), 1)\n', (4271, 4363), False, 'import cv2\n'), ((4375, 4414), 'cv2.imshow', 'cv2.imshow', (['"""Get horizon points"""', 'image'], {}), "('Get horizon points', image)\n", (4385, 4414), False, 'import cv2\n'), ((5238, 5255), 'cv2.waitKey', 'cv2.waitKey', (['(1000)'], {}), '(1000)\n', (5249, 5255), False, 'import cv2\n'), ((5257, 5296), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Get horizon points"""'], {}), "('Get horizon points')\n", (5274, 5296), False, 'import cv2\n'), ((5316, 5346), 'numpy.float32', 'np.float32', (['new_horizon_points'], {}), '(new_horizon_points)\n', (5326, 5346), True, 'import numpy as np\n'), ((708, 740), 'numpy.hstack', 'np.hstack', (['(image, color_segmap)'], {}), '((image, color_segmap))\n', (717, 740), True, 'import numpy as np\n'), ((765, 822), 'cv2.addWeighted', 'cv2.addWeighted', (['image', 'alpha', 'color_segmap', '(1 - alpha)', '(0)'], {}), '(image, alpha, color_segmap, 1 - alpha, 0)\n', (780, 822), False, 'import cv2\n'), ((1226, 1285), 'cv2.rectangle', 'cv2.rectangle', (['image', 'c1', 'c2', 'detection_color'], {'thickness': 'tl'}), '(image, c1, c2, detection_color, thickness=tl)\n', (1239, 1285), False, 'import cv2\n'), ((2743, 2762), 'numpy.exp', 'np.exp', (['boxes[:, 3]'], {}), '(boxes[:, 3])\n', (2749, 2762), True, 'import numpy as np\n'), ((2773, 2792), 'numpy.exp', 'np.exp', (['boxes[:, 2]'], {}), '(boxes[:, 2])\n', (2779, 2792), True, 'import numpy as np\n'), ((3005, 3040), 'numpy.vstack', 'np.vstack', (['(xmin, ymin, xmax, ymax)'], {}), '((xmin, ymin, xmax, ymax))\n', (3014, 3040), True, 'import numpy as np\n'), ((3902, 3932), 'numpy.where', 'np.where', (['(iou >= iou_threshold)'], {}), '(iou >= iou_threshold)\n', (3910, 3932), True, 'import numpy as np\n'), ((3948, 3980), 'numpy.delete', 'np.delete', (['sort_index', 'del_index'], {}), '(sort_index, del_index)\n', (3957, 3980), True, 'import numpy as np\n'), ((5219, 5235), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (5230, 5235), False, 'import cv2\n'), ((1548, 1597), 'cv2.rectangle', 'cv2.rectangle', (['image', 'c1', 'c2', 'detection_color', '(-1)'], {}), '(image, c1, c2, detection_color, -1)\n', (1561, 1597), False, 'import cv2\n'), ((2170, 2210), 'numpy.float32', 'np.float32', (['[[0, img_h], [img_w, img_h]]'], {}), '([[0, img_h], [img_w, img_h]])\n', (2180, 2210), True, 'import numpy as np\n'), ((4510, 4623), 'cv2.line', 'cv2.line', (['image', '(0, image.shape[0])', '(new_horizon_points[0][0], new_horizon_points[0][1])', '(192, 67, 251)', '(3)'], {}), '(image, (0, image.shape[0]), (new_horizon_points[0][0],\n new_horizon_points[0][1]), (192, 67, 251), 3)\n', (4518, 4623), False, 'import cv2\n'), ((4650, 4748), 'cv2.circle', 'cv2.circle', (['image', '(new_horizon_points[0][0], new_horizon_points[0][1])', '(5)', '(251, 191, 67)', '(-1)'], {}), '(image, (new_horizon_points[0][0], new_horizon_points[0][1]), 5,\n (251, 191, 67), -1)\n', (4660, 4748), False, 'import cv2\n'), ((4763, 4802), 'cv2.imshow', 'cv2.imshow', (['"""Get horizon points"""', 'image'], {}), "('Get horizon points', image)\n", (4773, 4802), False, 'import cv2\n'), ((4888, 5014), 'cv2.line', 'cv2.line', (['image', '(image.shape[1], image.shape[0])', '(new_horizon_points[1][0], new_horizon_points[1][1])', '(192, 67, 251)', '(3)'], {}), '(image, (image.shape[1], image.shape[0]), (new_horizon_points[1][0],\n new_horizon_points[1][1]), (192, 67, 251), 3)\n', (4896, 5014), False, 'import cv2\n'), ((5035, 5133), 'cv2.circle', 'cv2.circle', (['image', '(new_horizon_points[1][0], new_horizon_points[1][1])', '(5)', '(251, 191, 67)', '(-1)'], {}), '(image, (new_horizon_points[1][0], new_horizon_points[1][1]), 5,\n (251, 191, 67), -1)\n', (5045, 5133), False, 'import cv2\n'), ((5149, 5188), 'cv2.imshow', 'cv2.imshow', (['"""Get horizon points"""', 'image'], {}), "('Get horizon points', image)\n", (5159, 5188), False, 'import cv2\n')] |
'''
Useful functions - specifically will be used for feed training images
and model inference.
'''
import numpy as np
from os import listdir, mkdir, sep, path, walk
from os.path import join, exists, splitext
from scipy.misc import imread, imsave, imresize
def list_images(directory):
images = []
for file in listdir(directory):
name = file.lower()
if name.endswith('.png'):
images.append(join(directory, file))
elif name.endswith('.jpg'):
images.append(join(directory, file))
elif name.endswith('.jpeg'):
images.append(join(directory, file))
return images
def get_train_images(paths, resize_len=512, crop_height=256, crop_width=256):
images = []
for path in paths:
image = imread(path, mode='RGB')
height, width, _ = image.shape
if height < width:
new_height = resize_len
new_width = int(width * new_height / height)
else:
new_width = resize_len
new_height = int(height * new_width / width)
image = imresize(image, [new_height, new_width], interp='nearest')
# crop the image
start_h = np.random.choice(new_height - crop_height + 1)
start_w = np.random.choice(new_width - crop_width + 1)
image = image[start_h:(start_h + crop_height), start_w:(start_w + crop_width), :]
images.append(image)
images = np.stack(images, axis=0)
return images
def get_images(paths, height=None, width=None):
if isinstance(paths, str):
paths = [paths]
images = []
for path in paths:
image = imread(path, mode='RGB')
if height is not None and width is not None:
image = imresize(image, [height, width], interp='nearest')
# Escape image with odd shapes (for training)
height = int(image.shape[0] / 2) * 2
width = int(image.shape[1] / 2) * 2
image = imresize(image, [height, width], interp='nearest')
images.append(image)
images = np.stack(images, axis=0)
return images
def save_images(datas, contents_path, styles_path, save_dir, suffix=None):
assert(len(datas) == len(contents_path) * len(styles_path))
if not exists(save_dir):
mkdir(save_dir)
if suffix is None:
suffix = ''
data_idx = 0
for content_path in contents_path:
for style_path in styles_path:
data = datas[data_idx]
data_idx += 1
content_path_name, content_ext = splitext(content_path)
style_path_name, style_ext = splitext(style_path)
content_name = content_path_name.split(sep)[-1]
style_name = style_path_name.split(sep)[-1]
save_path = join(save_dir, '%s-%s%s%s' %
(content_name, style_name, suffix, content_ext))
imsave(save_path, data) | [
"os.path.exists",
"os.listdir",
"numpy.random.choice",
"scipy.misc.imsave",
"os.path.splitext",
"os.path.join",
"numpy.stack",
"scipy.misc.imread",
"os.mkdir",
"scipy.misc.imresize"
] | [((319, 337), 'os.listdir', 'listdir', (['directory'], {}), '(directory)\n', (326, 337), False, 'from os import listdir, mkdir, sep, path, walk\n'), ((1431, 1455), 'numpy.stack', 'np.stack', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (1439, 1455), True, 'import numpy as np\n'), ((2051, 2075), 'numpy.stack', 'np.stack', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (2059, 2075), True, 'import numpy as np\n'), ((774, 798), 'scipy.misc.imread', 'imread', (['path'], {'mode': '"""RGB"""'}), "(path, mode='RGB')\n", (780, 798), False, 'from scipy.misc import imread, imsave, imresize\n'), ((1084, 1142), 'scipy.misc.imresize', 'imresize', (['image', '[new_height, new_width]'], {'interp': '"""nearest"""'}), "(image, [new_height, new_width], interp='nearest')\n", (1092, 1142), False, 'from scipy.misc import imread, imsave, imresize\n'), ((1187, 1233), 'numpy.random.choice', 'np.random.choice', (['(new_height - crop_height + 1)'], {}), '(new_height - crop_height + 1)\n', (1203, 1233), True, 'import numpy as np\n'), ((1252, 1296), 'numpy.random.choice', 'np.random.choice', (['(new_width - crop_width + 1)'], {}), '(new_width - crop_width + 1)\n', (1268, 1296), True, 'import numpy as np\n'), ((1636, 1660), 'scipy.misc.imread', 'imread', (['path'], {'mode': '"""RGB"""'}), "(path, mode='RGB')\n", (1642, 1660), False, 'from scipy.misc import imread, imsave, imresize\n'), ((1956, 2006), 'scipy.misc.imresize', 'imresize', (['image', '[height, width]'], {'interp': '"""nearest"""'}), "(image, [height, width], interp='nearest')\n", (1964, 2006), False, 'from scipy.misc import imread, imsave, imresize\n'), ((2249, 2265), 'os.path.exists', 'exists', (['save_dir'], {}), '(save_dir)\n', (2255, 2265), False, 'from os.path import join, exists, splitext\n'), ((2275, 2290), 'os.mkdir', 'mkdir', (['save_dir'], {}), '(save_dir)\n', (2280, 2290), False, 'from os import listdir, mkdir, sep, path, walk\n'), ((1735, 1785), 'scipy.misc.imresize', 'imresize', (['image', '[height, width]'], {'interp': '"""nearest"""'}), "(image, [height, width], interp='nearest')\n", (1743, 1785), False, 'from scipy.misc import imread, imsave, imresize\n'), ((2538, 2560), 'os.path.splitext', 'splitext', (['content_path'], {}), '(content_path)\n', (2546, 2560), False, 'from os.path import join, exists, splitext\n'), ((2602, 2622), 'os.path.splitext', 'splitext', (['style_path'], {}), '(style_path)\n', (2610, 2622), False, 'from os.path import join, exists, splitext\n'), ((2777, 2854), 'os.path.join', 'join', (['save_dir', "('%s-%s%s%s' % (content_name, style_name, suffix, content_ext))"], {}), "(save_dir, '%s-%s%s%s' % (content_name, style_name, suffix, content_ext))\n", (2781, 2854), False, 'from os.path import join, exists, splitext\n'), ((2885, 2908), 'scipy.misc.imsave', 'imsave', (['save_path', 'data'], {}), '(save_path, data)\n', (2891, 2908), False, 'from scipy.misc import imread, imsave, imresize\n'), ((427, 448), 'os.path.join', 'join', (['directory', 'file'], {}), '(directory, file)\n', (431, 448), False, 'from os.path import join, exists, splitext\n'), ((512, 533), 'os.path.join', 'join', (['directory', 'file'], {}), '(directory, file)\n', (516, 533), False, 'from os.path import join, exists, splitext\n'), ((598, 619), 'os.path.join', 'join', (['directory', 'file'], {}), '(directory, file)\n', (602, 619), False, 'from os.path import join, exists, splitext\n')] |
import numpy as np
class Windowing:
def __init__(self, Ncp, Ncs, alpha):
self.alpha = alpha
self.Ncp = Ncp
self.Ncs = Ncs
raise_window_len = int(self.Ncp * self.alpha)
fall_window_len = int(self.Ncs * self.alpha)
self.raise_window = np.blackman(
raise_window_len * 2)[:raise_window_len]
self.fall_window = np.blackman(fall_window_len * 2)[-fall_window_len:]
def apply_window(self, samples):
window = np.concatenate(
[self.raise_window, np.ones(len(samples) - len(self.raise_window) - len(self.fall_window)), self.fall_window])
return samples * window
| [
"numpy.blackman"
] | [((297, 330), 'numpy.blackman', 'np.blackman', (['(raise_window_len * 2)'], {}), '(raise_window_len * 2)\n', (308, 330), True, 'import numpy as np\n'), ((392, 424), 'numpy.blackman', 'np.blackman', (['(fall_window_len * 2)'], {}), '(fall_window_len * 2)\n', (403, 424), True, 'import numpy as np\n')] |
def main_fig3(mc_tide,mcs,mode):
import pandas as pd
import scipy.stats
from tqdm import trange
from random import random
import math
from numpy import linspace, zeros, histogram, round, shape, array
import pickle
from tools_main import read_eq_data, create_tsunami_mgsep_tide
from tools_main import read_sea_level_pd, calculate_flooding
from tools_main import adjust_tsunami_dicts1
from numpy import ones_like, savetxt, append
rcp_scenario = ['RCP85NA', 'RCP85WA','RCP26NA', 'RCP26WA']
if mode == False:
print('\t Flood-height comparison for 2000, 2050,2070, and 2100 for all SLR scenarios')
print()
print('Sea-level subsample size:',mcs)
print('Tide subsample size:',mc_tide)
print()
tsunami_mode = 'tsunami_tide'
if tsunami_mode == 'tsunami_tide':
data_df=pd.read_csv('LA_tide_MSL.dat', delimiter = ' ')
data_arr = []
data_arr_d = data_df['value'].values
for ii in range(len(data_arr_d)):
if math.isnan(data_arr_d[ii]) == False:
data_arr.append(data_arr_d[ii])
sample_pdf = scipy.stats.gaussian_kde(data_arr)
newtide_data = sample_pdf.resample(mc_tide).T[:,0]
tsu_data = read_eq_data(-0.2,1)
tsu_data1=create_tsunami_mgsep_tide(tsu_data,newtide_data,0.0)
years = [2000,2050,2070,2100]
cols = []
for i in range(len(years)):
cols.append(str(years[i]))
d_f={}
seal85NA=read_sea_level_pd(rcp_scenario[0],years,mcs)
nx1,nt = shape(seal85NA)
nx,ny = shape(tsu_data1)
bins = linspace(-1.6,7.11,101)
for i in range(len(rcp_scenario)):
if mode == False:
print('Sea-level Scenario: {t1}'.format(t1=rcp_scenario[i]))
seal85NA=read_sea_level_pd(rcp_scenario[i],years,mcs)
df_years = {}
for j in range(len(years)):
tt = adjust_tsunami_dicts1(tsu_data1,seal85NA,str(years[j]),mode)
dummy = tt.values
nx, ny = shape(dummy)
dummy = dummy.reshape(nx*ny)
weights = ones_like(dummy)/float(len(dummy))
x1_1,y1_1 = histogram(dummy,bins,weights=weights)
fname1_1 = 'file1_{t1}_{t2}.dat'.format(t1=rcp_scenario[i],t2=years[j])
savetxt(fname1_1,list(zip(x1_1,y1_1)))
# TODO:
# - multithreading vs serial computing
# - How should data and file for tides should be handled?
# - How should the data and files for sea-level rise should be handled?
# - Add file that contain the table with the statistics
def main_floodheight_t(rcp_scenario,tsunami_mode,mc_tide,mcs,flood_height,mode):
import pandas as pd
import scipy.stats
from tqdm import trange, tqdm
from random import random
import math
from numpy import linspace,zeros,histogram,round
from numpy import shape,argwhere,interp,savetxt,round
import pickle
import pandas as pd
from tools_main import read_eq_data,create_tsunami_mgsep_tide
from tools_main import read_sea_level_pd, calculate_flooding
from tools_main import calc_floodheigth_exceedance,smooth
from tools_main import adjust_tsunami_dicts1
#rcp_scenario = ['RCP85NA', 'RCP85WA','RCP26NA', 'RCP26WA']
if mode == False:
# print('\t Flood-height comparison for 2000, 2050,2070, and 2100 for all SLR scenarios')
# print()
print('Sea-level subsample size:',mcs)
print('Tide subsample size:',mc_tide)
tsunami_mode = 'tsunami_tide'
flood_height = round(flood_height,2)
if mode ==False:
print('Flood Heights:', flood_height)
print()
if tsunami_mode == 'tsunami_tide':
data_df=pd.read_csv('LA_tide_MSL.dat', delimiter = ' ')
data_arr = []
data_arr_d = data_df['value'].values
for ii in range(len(data_arr_d)):
if math.isnan(data_arr_d[ii]) == False:
data_arr.append(data_arr_d[ii])
sample_pdf = scipy.stats.gaussian_kde(data_arr)
newtide_data = sample_pdf.resample(mc_tide).T[:,0]
tsu_data = read_eq_data(-0.2,1)
tsu_data1=create_tsunami_mgsep_tide(tsu_data,newtide_data,0.0)
years = linspace(2000,2100,11,dtype=int)
cols = []
for i in range(len(years)):
cols.append(str(years[i]))
seal85NA=read_sea_level_pd(rcp_scenario,years,mcs)
nx1,nt = shape(seal85NA)
nx,ny = shape(tsu_data1)
if mode == False:
print('Sea-level Scenario: {t1}'.format(t1=rcp_scenario))
seal85NA=read_sea_level_pd(rcp_scenario,years,mcs)
df_years = {}
value_501 = []
fl_data = zeros([len(years),len(flood_height)+1])
fl_data[:,0]=years[:]
for j in range(len(years)):
tt = adjust_tsunami_dicts1(tsu_data1,seal85NA,str(years[j]),mode)
dummy = tt.values
nx, ny = shape(dummy)
eq = linspace(8.0,9.4,15)
eq1=linspace(8.0,9.4,1000)
jjj=0
value_dd = []
for jj in trange(ny*len(flood_height),disable=mode):
i = jj % ny
# print(j,jj,i,jjj,flood_height[jjj])
if jj%ny==0 and jj>0:
for i_d in range(1,len(value_dd)):
if value_dd[i_d-1] > value_dd[i_d]:
value_dd[i_d-1] = value_dd[i_d]
y1=smooth(interp(eq1, eq, value_dd),200)
index_c = -1
for i_y1 in range(len(y1)):
if y1[i_y1]>0.5:
index_c = i_y1
break
if index_c>-1:
ff = eq1[index_c]
else:
ff = float(9.4)
fl_data[j,jjj+1]=ff
# print(fl_data[j,jjj])
value_dd = []
jjj = jjj+1
# print(jj,len(value_dd),i,jjj,jjj+1)
value_dd.append(float(len(argwhere(dummy[:,i]>=flood_height[jjj])))/float(len(dummy[:,i])))
# for i_d in range(1,len(value_dd)):
if value_dd[i_d-1] > value_dd[i_d]:
value_dd[i_d-1] = value_dd[i_d]
y1=smooth(interp(eq1, eq, value_dd),200)
index_c = -1
for i_y1 in range(len(y1)):
if y1[i_y1]>0.5:
index_c = i_y1
break
if index_c>-1:
ff = eq1[index_c]
else:
ff = float(9.4)
fl_data[j,jjj+1]=ff
if mode==False:
print()
fname1_1 = 'exe1_{t1}.dat'.format(t1=rcp_scenario)
print(fname1_1)
savetxt(fname1_1,fl_data,fmt='%3.2f')
# TODO:
# - multithreading vs serial computing
# - How should data and file for tides should be handled?
# - How should the data and files for sea-level rise should be handled?
if __name__ == '__main__':
import argparse
from numpy import linspace
parser = argparse.ArgumentParser(description='Sea-level rise, Tsunami and Tides')
parser.add_argument('-run', '--runmode', help='flood_height,or distri', required=True)
parser.add_argument('-s','--scenario',help='RCP Scenario',required=False)
parser.add_argument('-m','--mode',help='Mode (tsunami, tsunami_tide)',required=False)
parser.add_argument('-sti','--subs_tide',help='Subsample size of tide',required=False)
parser.add_argument('-sse','--subs_seal',help='Subsample size of sea level',required=False)
parser.add_argument('-fh','--flood_h',help='Flood Heights',required=False)
parser.add_argument('-p','--production', action='store_true')
parser.set_defaults(production=False)
args = parser.parse_args()
main_mcs=50
main_mc_tide = 50
flood_height_main = linspace(0.5,1.5,3)
if str(args.runmode) != 'None':
run_mode = str(args.runmode)
if str(args.scenario) != 'None':
main_rcp_scenario = str(args.scenario)
if str(args.mode) != 'None':
main_mode = str(args.mode)
if str(args.subs_tide) != 'None':
main_mc_tide = int(args.subs_tide)
if str(args.subs_seal) != 'None':
main_mcs = int(args.subs_seal)
if str(args.flood_h) != 'None':
my_list = [float(item) for item in args.flood_h.split(',')]
flood_height_main = linspace(my_list[0],my_list[1],int(my_list[2]))
main_prod_mode =args.production
if main_prod_mode == False:
print("\t\t \033[1m Sea-level rise, Tsunami and Tides\033[0m")
print()
if run_mode == 'flood_height':
if run_mode != 'None' and str(args.scenario) != 'None':
if main_prod_mode == False:
print("\t\t \033[1m Flood-Height Calculation\033[0m")
print()
main_floodheight_t(main_rcp_scenario,main_mode,main_mc_tide,main_mcs,flood_height_main,main_prod_mode)
else:
print("\t\t \033[1m Flood-Height Exceedance Calculation\033[0m")
print()
print('Please choose -s option (rcp scenario) and -m option (tsunami, tsunami_tide)')
exit()
if run_mode == 'distribution':
if main_prod_mode==False:
print("\t \033[1m Flood Height Distributions\033[0m")
print()
main_fig3(main_mc_tide,main_mcs,main_prod_mode)
if run_mode != 'flood_height' and run_mode != 'distribution':
print('Not a valid option!')
| [
"numpy.ones_like",
"numpy.histogram",
"argparse.ArgumentParser",
"pandas.read_csv",
"tools_main.read_sea_level_pd",
"numpy.linspace",
"numpy.argwhere",
"tools_main.create_tsunami_mgsep_tide",
"tools_main.read_eq_data",
"numpy.savetxt",
"numpy.interp",
"numpy.shape",
"numpy.round",
"math.is... | [((1506, 1552), 'tools_main.read_sea_level_pd', 'read_sea_level_pd', (['rcp_scenario[0]', 'years', 'mcs'], {}), '(rcp_scenario[0], years, mcs)\n', (1523, 1552), False, 'from tools_main import read_sea_level_pd, calculate_flooding\n'), ((1564, 1579), 'numpy.shape', 'shape', (['seal85NA'], {}), '(seal85NA)\n', (1569, 1579), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((1592, 1608), 'numpy.shape', 'shape', (['tsu_data1'], {}), '(tsu_data1)\n', (1597, 1608), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((1620, 1645), 'numpy.linspace', 'linspace', (['(-1.6)', '(7.11)', '(101)'], {}), '(-1.6, 7.11, 101)\n', (1628, 1645), False, 'from numpy import linspace\n'), ((3551, 3573), 'numpy.round', 'round', (['flood_height', '(2)'], {}), '(flood_height, 2)\n', (3556, 3573), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((4206, 4241), 'numpy.linspace', 'linspace', (['(2000)', '(2100)', '(11)'], {'dtype': 'int'}), '(2000, 2100, 11, dtype=int)\n', (4214, 4241), False, 'from numpy import linspace\n'), ((4333, 4376), 'tools_main.read_sea_level_pd', 'read_sea_level_pd', (['rcp_scenario', 'years', 'mcs'], {}), '(rcp_scenario, years, mcs)\n', (4350, 4376), False, 'from tools_main import read_sea_level_pd, calculate_flooding\n'), ((4388, 4403), 'numpy.shape', 'shape', (['seal85NA'], {}), '(seal85NA)\n', (4393, 4403), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((4416, 4432), 'numpy.shape', 'shape', (['tsu_data1'], {}), '(tsu_data1)\n', (4421, 4432), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((4534, 4577), 'tools_main.read_sea_level_pd', 'read_sea_level_pd', (['rcp_scenario', 'years', 'mcs'], {}), '(rcp_scenario, years, mcs)\n', (4551, 4577), False, 'from tools_main import read_sea_level_pd, calculate_flooding\n'), ((6515, 6554), 'numpy.savetxt', 'savetxt', (['fname1_1', 'fl_data'], {'fmt': '"""%3.2f"""'}), "(fname1_1, fl_data, fmt='%3.2f')\n", (6522, 6554), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((6844, 6916), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sea-level rise, Tsunami and Tides"""'}), "(description='Sea-level rise, Tsunami and Tides')\n", (6867, 6916), False, 'import argparse\n'), ((7648, 7669), 'numpy.linspace', 'linspace', (['(0.5)', '(1.5)', '(3)'], {}), '(0.5, 1.5, 3)\n', (7656, 7669), False, 'from numpy import linspace\n'), ((876, 921), 'pandas.read_csv', 'pd.read_csv', (['"""LA_tide_MSL.dat"""'], {'delimiter': '""" """'}), "('LA_tide_MSL.dat', delimiter=' ')\n", (887, 921), True, 'import pandas as pd\n'), ((1267, 1288), 'tools_main.read_eq_data', 'read_eq_data', (['(-0.2)', '(1)'], {}), '(-0.2, 1)\n', (1279, 1288), False, 'from tools_main import read_eq_data, create_tsunami_mgsep_tide\n'), ((1306, 1360), 'tools_main.create_tsunami_mgsep_tide', 'create_tsunami_mgsep_tide', (['tsu_data', 'newtide_data', '(0.0)'], {}), '(tsu_data, newtide_data, 0.0)\n', (1331, 1360), False, 'from tools_main import read_eq_data, create_tsunami_mgsep_tide\n'), ((1799, 1845), 'tools_main.read_sea_level_pd', 'read_sea_level_pd', (['rcp_scenario[i]', 'years', 'mcs'], {}), '(rcp_scenario[i], years, mcs)\n', (1816, 1845), False, 'from tools_main import read_sea_level_pd, calculate_flooding\n'), ((3711, 3756), 'pandas.read_csv', 'pd.read_csv', (['"""LA_tide_MSL.dat"""'], {'delimiter': '""" """'}), "('LA_tide_MSL.dat', delimiter=' ')\n", (3722, 3756), True, 'import pandas as pd\n'), ((4102, 4123), 'tools_main.read_eq_data', 'read_eq_data', (['(-0.2)', '(1)'], {}), '(-0.2, 1)\n', (4114, 4123), False, 'from tools_main import read_eq_data, create_tsunami_mgsep_tide\n'), ((4141, 4195), 'tools_main.create_tsunami_mgsep_tide', 'create_tsunami_mgsep_tide', (['tsu_data', 'newtide_data', '(0.0)'], {}), '(tsu_data, newtide_data, 0.0)\n', (4166, 4195), False, 'from tools_main import read_eq_data, create_tsunami_mgsep_tide\n'), ((4842, 4854), 'numpy.shape', 'shape', (['dummy'], {}), '(dummy)\n', (4847, 4854), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((4868, 4890), 'numpy.linspace', 'linspace', (['(8.0)', '(9.4)', '(15)'], {}), '(8.0, 9.4, 15)\n', (4876, 4890), False, 'from numpy import linspace\n'), ((4901, 4925), 'numpy.linspace', 'linspace', (['(8.0)', '(9.4)', '(1000)'], {}), '(8.0, 9.4, 1000)\n', (4909, 4925), False, 'from numpy import linspace\n'), ((2031, 2043), 'numpy.shape', 'shape', (['dummy'], {}), '(dummy)\n', (2036, 2043), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((2166, 2205), 'numpy.histogram', 'histogram', (['dummy', 'bins'], {'weights': 'weights'}), '(dummy, bins, weights=weights)\n', (2175, 2205), False, 'from numpy import linspace, zeros, histogram, round\n'), ((6096, 6121), 'numpy.interp', 'interp', (['eq1', 'eq', 'value_dd'], {}), '(eq1, eq, value_dd)\n', (6102, 6121), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((1048, 1074), 'math.isnan', 'math.isnan', (['data_arr_d[ii]'], {}), '(data_arr_d[ii])\n', (1058, 1074), False, 'import math\n'), ((2107, 2123), 'numpy.ones_like', 'ones_like', (['dummy'], {}), '(dummy)\n', (2116, 2123), False, 'from numpy import ones_like, savetxt, append\n'), ((3883, 3909), 'math.isnan', 'math.isnan', (['data_arr_d[ii]'], {}), '(data_arr_d[ii])\n', (3893, 3909), False, 'import math\n'), ((5318, 5343), 'numpy.interp', 'interp', (['eq1', 'eq', 'value_dd'], {}), '(eq1, eq, value_dd)\n', (5324, 5343), False, 'from numpy import shape, argwhere, interp, savetxt, round\n'), ((5876, 5918), 'numpy.argwhere', 'argwhere', (['(dummy[:, i] >= flood_height[jjj])'], {}), '(dummy[:, i] >= flood_height[jjj])\n', (5884, 5918), False, 'from numpy import shape, argwhere, interp, savetxt, round\n')] |
# coding: utf8
# Copyright (c) 2014, 2015 <NAME>.
#
# This file is distributed under the new BSD License, see the LICENSE file or
# checkout the license terms at http://opensource.org/licenses/BSD-3-Clause).
from __future__ import absolute_import, division, print_function
from skfftw.enums import Direction, Flag, Normalization
from skfftw.wrappers import libfftw, libfftwf, libfftwl
import numpy as np
__all__ = ('Plan',)
class Plan(object):
"""
The FFTW plan class.
"""
__planner_funcs = {np.dtype('cdouble'): libfftw.plan_dft,
np.dtype('csingle'): libfftwf.plan_dft,
np.dtype('clongdouble'): libfftwl.plan_dft}
__execute_funcs = {np.dtype('cdouble'): libfftw.execute_dft,
np.dtype('csingle'): libfftwf.execute_dft,
np.dtype('clongdouble'): libfftwl.execute_dft}
__destroy_funcs = {np.dtype('cdouble'): libfftw.destroy_plan,
np.dtype('csingle'): libfftwf.destroy_plan,
np.dtype('clongdouble'): libfftwl.destroy_plan}
def __init__(self, input_array, output_array,
direction=Direction.forward, flags=(Flag.estimate,),
*args, **kwargs):
"""
Instantiate a DFT plan.
"""
self._handle = None
dt = np.dtype(input_array.dtype)
try:
self._planner = self.__planner_funcs[dt]
self._execute = self.__execute_funcs[dt]
self._destroy = self.__destroy_funcs[dt]
except:
raise ValueError("Unsupported data type: {}".format(dt))
self._input_array = input_array
self._output_array = output_array
self._direction = direction
sign_int = int(self._direction)
self._flags = flags
flag_int = 0
for flag in self._flags:
flag_int |= int(flag)
self._handle = self._planner(self._input_array, self._output_array,
sign_int, flag_int)
def __del__(self):
if self._handle is not None:
self._destroy(self._handle)
def __call__(self, input_array=None, output_array=None,
normalization=Normalization.none, *args, **kwargs):
"""
Execute DFT from plan.
Returns the result of the DFT as a Numpy array.
The input and output arrays used for DFT computation may be updated
using the input_array and output_array parameters. If the supplied
array(s) is (are) not compatible with the original one(s) supplied
at construct time, a RuntimeError is raised.
"""
self.execute_dft(input_array, output_array)
if normalization is not Normalization.none:
if normalization is Normalization.sqrt:
self._output_array /= np.sqrt(self.N)
elif normalization is Normalization.full:
self._output_array /= self.N
else:
raise ValueError("Incompatible normalization")
return self._output_array
def execute(self):
"""
Execute DFT from plan.
For more options, please use the __call__ method of this plan.
"""
self._execute(self._handle, self._input_array, self._output_array)
def execute_dft(self, input_array=None, output_array=None):
"""
Execute DFT from plan with optional update of the internal arrays.
For more options, please use the __call__ method of this plan.
"""
self._update_arrays(input_array, output_array)
self.execute()
def _update_arrays(self, input_array, output_array):
"""
Private method used for safe update of the internal arrays.
"""
# check input array
if input_array is not None:
if (input_array.flags.c_contiguous and
input_array.shape == self.input_array.shape and
input_array.dtype == self.input_array.dtype):
self._input_array = input_array
else:
raise RuntimeError('Incompatible input array')
# check output array
if output_array is not None:
if (output_array.flags.c_contiguous and
output_array.shape == self.output_array.shape and
output_array.dtype == self.output_array.dtype):
self._output_array = output_array
else:
raise RuntimeError('Incompatible output array')
@property
def direction(self):
"""
Direction of the transform.
"""
return self._direction
@property
def flags(self):
"""
Planner flags.
"""
return self._flags
@property
def input_array(self):
"""
Input array used internally by the Plan instance.
"""
return self._input_array
@property
def output_array(self):
"""
Output array used internally by the Plan instance.
"""
return self._output_array
@property
def N(self):
"""
Total number of samples. Useful for scaling purposes.
"""
return self._output_array.size | [
"numpy.dtype",
"numpy.sqrt"
] | [((526, 545), 'numpy.dtype', 'np.dtype', (['"""cdouble"""'], {}), "('cdouble')\n", (534, 545), True, 'import numpy as np\n'), ((588, 607), 'numpy.dtype', 'np.dtype', (['"""csingle"""'], {}), "('csingle')\n", (596, 607), True, 'import numpy as np\n'), ((651, 674), 'numpy.dtype', 'np.dtype', (['"""clongdouble"""'], {}), "('clongdouble')\n", (659, 674), True, 'import numpy as np\n'), ((718, 737), 'numpy.dtype', 'np.dtype', (['"""cdouble"""'], {}), "('cdouble')\n", (726, 737), True, 'import numpy as np\n'), ((783, 802), 'numpy.dtype', 'np.dtype', (['"""csingle"""'], {}), "('csingle')\n", (791, 802), True, 'import numpy as np\n'), ((849, 872), 'numpy.dtype', 'np.dtype', (['"""clongdouble"""'], {}), "('clongdouble')\n", (857, 872), True, 'import numpy as np\n'), ((919, 938), 'numpy.dtype', 'np.dtype', (['"""cdouble"""'], {}), "('cdouble')\n", (927, 938), True, 'import numpy as np\n'), ((985, 1004), 'numpy.dtype', 'np.dtype', (['"""csingle"""'], {}), "('csingle')\n", (993, 1004), True, 'import numpy as np\n'), ((1052, 1075), 'numpy.dtype', 'np.dtype', (['"""clongdouble"""'], {}), "('clongdouble')\n", (1060, 1075), True, 'import numpy as np\n'), ((1357, 1384), 'numpy.dtype', 'np.dtype', (['input_array.dtype'], {}), '(input_array.dtype)\n', (1365, 1384), True, 'import numpy as np\n'), ((2891, 2906), 'numpy.sqrt', 'np.sqrt', (['self.N'], {}), '(self.N)\n', (2898, 2906), True, 'import numpy as np\n')] |
import tensorflow as tf
import cv2
from tensorflow.keras.applications.imagenet_utils import preprocess_input
import numpy as np
from collections import namedtuple
from typing import List
import itertools
import collections
import tflite_runtime.interpreter as tflite
CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
Predictions = namedtuple('Prediction', ('boxes', 'scores', 'labels'))
BoxSizes = collections.namedtuple('Boxsizes', ['min', 'max'])
Spec = collections.namedtuple('Spec', ['feature_map_size', 'shrinkage', 'box_sizes', 'aspect_ratios'])
iou_threshold = 0.5 # 0.5
center_variance = 0.1 # 0.1
size_variance = 0.2 # 0.2
def rgb2bgr(tpl):
return (tpl[2], tpl[1], tpl[0])
Label = namedtuple('Label', ['name', 'color'])
def coco_color_map(index):
label_defs = [
Label('aeroplane', rgb2bgr((0, 0, 0))),
Label('bicycle', rgb2bgr((111, 74, 0))),
Label('bird', rgb2bgr((81, 0, 81))),
Label('boat', rgb2bgr((128, 64, 128))),
Label('bottle', rgb2bgr((244, 35, 232))),
Label('bus', rgb2bgr((230, 150, 140))),
Label('car', rgb2bgr((70, 70, 70))),
Label('cat', rgb2bgr((102, 102, 156))),
Label('chair', rgb2bgr((190, 153, 153))),
Label('cow', rgb2bgr((150, 120, 90))),
Label('diningtable', rgb2bgr((153, 153, 153))),
Label('dog', rgb2bgr((250, 170, 30))),
Label('horse', rgb2bgr((220, 220, 0))),
Label('motorbike', rgb2bgr((107, 142, 35))),
Label('person', rgb2bgr((52, 151, 52))),
Label('pottedplant', rgb2bgr((70, 130, 180))),
Label('sheep', rgb2bgr((220, 20, 60))),
Label('sofa', rgb2bgr((0, 0, 142))),
Label('train', rgb2bgr((0, 0, 230))),
Label('tvmonitor', rgb2bgr((119, 11, 32))),
Label('aeroplane', rgb2bgr((0, 0, 0))),
Label('bicycle', rgb2bgr((111, 74, 0))),
Label('bird', rgb2bgr((81, 0, 81))),
Label('boat', rgb2bgr((128, 64, 128))),
Label('bottle', rgb2bgr((244, 35, 232))),
Label('bus', rgb2bgr((230, 150, 140))),
Label('car', rgb2bgr((70, 70, 70))),
Label('cat', rgb2bgr((102, 102, 156))),
Label('chair', rgb2bgr((190, 153, 153))),
Label('cow', rgb2bgr((150, 120, 90))),
Label('diningtable', rgb2bgr((153, 153, 153))),
Label('dog', rgb2bgr((250, 170, 30))),
Label('horse', rgb2bgr((220, 220, 0))),
Label('motorbike', rgb2bgr((107, 142, 35))),
Label('person', rgb2bgr((52, 151, 52))),
Label('pottedplant', rgb2bgr((70, 130, 180))),
Label('sheep', rgb2bgr((220, 20, 60))),
Label('sofa', rgb2bgr((0, 0, 142))),
Label('train', rgb2bgr((0, 0, 230))),
Label('tvmonitor', rgb2bgr((119, 11, 32))),
Label('aeroplane', rgb2bgr((0, 0, 0))),
Label('bicycle', rgb2bgr((111, 74, 0))),
Label('bird', rgb2bgr((81, 0, 81))),
Label('boat', rgb2bgr((128, 64, 128))),
Label('bottle', rgb2bgr((244, 35, 232))),
Label('bus', rgb2bgr((230, 150, 140))),
Label('car', rgb2bgr((70, 70, 70))),
Label('cat', rgb2bgr((102, 102, 156))),
Label('chair', rgb2bgr((190, 153, 153))),
Label('cow', rgb2bgr((150, 120, 90))),
Label('diningtable', rgb2bgr((153, 153, 153))),
Label('dog', rgb2bgr((250, 170, 30))),
Label('horse', rgb2bgr((220, 220, 0))),
Label('motorbike', rgb2bgr((107, 142, 35))),
Label('person', rgb2bgr((52, 151, 52))),
Label('pottedplant', rgb2bgr((70, 130, 180))),
Label('sheep', rgb2bgr((220, 20, 60))),
Label('sofa', rgb2bgr((0, 0, 142))),
Label('train', rgb2bgr((0, 0, 230))),
Label('tvmonitor', rgb2bgr((119, 11, 32))),
Label('aeroplane', rgb2bgr((0, 0, 0))),
Label('bicycle', rgb2bgr((111, 74, 0))),
Label('bird', rgb2bgr((81, 0, 81))),
Label('boat', rgb2bgr((128, 64, 128))),
Label('bottle', rgb2bgr((244, 35, 232))),
Label('bus', rgb2bgr((230, 150, 140))),
Label('car', rgb2bgr((70, 70, 70))),
Label('cat', rgb2bgr((102, 102, 156))),
Label('chair', rgb2bgr((190, 153, 153))),
Label('cow', rgb2bgr((150, 120, 90))),
Label('diningtable', rgb2bgr((153, 153, 153))),
Label('dog', rgb2bgr((250, 170, 30))),
Label('horse', rgb2bgr((220, 220, 0))),
Label('motorbike', rgb2bgr((107, 142, 35))),
Label('person', rgb2bgr((52, 151, 52))),
Label('pottedplant', rgb2bgr((70, 130, 180))),
Label('sheep', rgb2bgr((220, 20, 60))),
Label('sofa', rgb2bgr((0, 0, 142))),
Label('train', rgb2bgr((0, 0, 230))),
Label('tvmonitor', rgb2bgr((119, 11, 32)))
]
return label_defs[index]
def draw_bounding(img , bboxes, labels, img_size):
# resizing 작업
if np.max(bboxes) < 10:
bboxes[:, [0,2]] = bboxes[:, [0,2]]*img_size[1]
bboxes[:, [1,3]] = bboxes[:, [1,3]]*img_size[0]
for i, bbox in enumerate(bboxes):
xmin = int(bbox[0])
ymin = int(bbox[1])
xmax = int(bbox[2])
ymax = int(bbox[3])
img_box = np.copy(img)
_, color = coco_color_map(int(labels[i] - 1))
cv2.rectangle(img_box, (xmin, ymin), (xmax, ymax), color, 2)
cv2.rectangle(img_box, (xmin - 1, ymin), (xmax + 1, ymin - 20), color, cv2.FILLED)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_box, CLASSES[int(labels[i]-1)], (xmin + 5, ymin - 5), font, 0.5,
(255, 255, 255), 1, cv2.LINE_AA)
alpha = 0.8
cv2.addWeighted(img_box, alpha, img, 1. - alpha, 0, img)
@tf.function
def convert_locations_to_boxes(locations, priors, center_variance,
size_variance):
"""네트워크의 회귀 위치 결과를 (center_x, center_y, h, w) 형식의 box로 변환하는 과정
변환 :
$$ predicted :_center * center_variance = frac {real_center - prior_center} {prior_hw}$$
$$ exp (예측_hw * size_variance) = frac {real_hw} {prior_hw} $$
Args :
locations (batch_size, num_priors, 4) : 네트워크의 회귀 출력. 출력도 포함
priors (num_priors, 4) 또는 (batch_size / 1, num_priors, 4) : priors box
center_variance : 중심 스케일을 변경 상수
size_variance : 크기 스케일 변경 상수
Returns:
bbox : priors : [[center_x, center_y, h, w]]
이미지 크기에 상대적입니다.
"""
if tf.rank(priors) + 1 == tf.rank(locations):
priors = tf.expand_dims(priors, 0)
return tf.concat([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
tf.math.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=tf.rank(locations) - 1)
@tf.function
def center_form_to_corner_form(locations):
output = tf.concat([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], tf.rank(locations) - 1)
return output
def batched_nms(boxes, scores, idxs, iou_threshold, top_k=100):
"""
:Args(bbox, scores, idxs, iou_threshold)
NMS
각 인덱스는 각 category에 매핑
boxes : Tensor[N, 4]
NMS가 적용될 bbox list
shape = (x1,y1, x2, y2)
scores : Tensor[N]
각 박스별 confidence score
idxs : Tensor[N]
category 인덱스
iou_threshold : float
임계값
:return Tensor
"""
if tf.size(boxes) == 0:
return tf.convert_to_tensor([], dtype=tf.int32)
max_coordinate = tf.reduce_max(boxes)
offsets = idxs * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
keep = tf.image.non_max_suppression(boxes_for_nms, scores, top_k, iou_threshold) # 기존
# keep, selected_scores = tf.image.non_max_suppression_with_scores(boxes_for_nms, scores, top_k, iou_threshold, soft_nms_sigma=0.5) # 기존
# soft nms일 경우 selected_socres 추가
return keep
def post_process(detections, target_transform, confidence_threshold=0.01, top_k=100, iou_threshold=0.5, classes=21):
batch_boxes = detections[:, :, classes:]
if not tf.is_tensor(batch_boxes):
batch_boxes = tf.convert_to_tensor(batch_boxes)
batch_scores = tf.nn.softmax(detections[:, :, :classes], axis=2)
batch_boxes = convert_locations_to_boxes(batch_boxes, target_transform.center_form_priors,
target_transform.center_variance, target_transform.size_variance)
batch_boxes = center_form_to_corner_form(batch_boxes)
batch_size = tf.shape(batch_scores)[0]
results = []
for image_id in range(batch_size):
scores, boxes = batch_scores[image_id], batch_boxes[image_id] # (N, #CLS) (N, 4)
num_boxes = tf.shape(scores)[0]
num_classes = tf.shape(scores)[1]
boxes = tf.reshape(boxes, [num_boxes, 1, 4])
boxes = tf.broadcast_to(boxes, [num_boxes, num_classes, 4])
labels = tf.range(num_classes, dtype=tf.float32)
labels = tf.reshape(labels, [1, num_classes])
labels = tf.broadcast_to(labels, tf.shape(scores))
# 배경 라벨이 있는 예측값 제거
boxes = boxes[:, 1:]
scores = scores[:, 1:]
labels = labels[:, 1:]
# 모든 클래스 예측을 별도의 인스턴스로 만들어 모든 것을 일괄 처리 과정
boxes = tf.reshape(boxes, [-1, 4])
scores = tf.reshape(scores, [-1])
labels = tf.reshape(labels, [-1])
# confidence 점수가 낮은 predict bbox 제거
low_scoring_mask = scores > confidence_threshold
boxes, scores, labels = tf.boolean_mask(boxes, low_scoring_mask), tf.boolean_mask(scores, low_scoring_mask), tf.boolean_mask(labels, low_scoring_mask)
keep = batched_nms(boxes, scores, labels, iou_threshold, top_k)
boxes, scores, labels = tf.gather(boxes, keep), tf.gather(scores, keep), tf.gather(labels, keep)
# test soft-nms
# keep, selected_scores = batched_nms(boxes, scores, labels, iou_threshold, top_k)
# scores = selected_scores
# boxes, labels = tf.gather(boxes, keep), tf.gather(labels, keep)
results.append(Predictions(boxes.numpy(), scores.numpy(), labels.numpy()))
return results
@tf.function(experimental_relax_shapes=True)
def area_of(left_top, right_bottom):
"""bbox 좌표값 (좌상단, 우하단)으로 사각형 넓이 계산.
Args:
left_top (N, 2): left 좌상단 좌표값.
right_bottom (N, 2): 우하단 좌표값.
Returns:
area (N): 사각형 넓이.
"""
hw = tf.clip_by_value(right_bottom - left_top, 0.0, 10000)
return hw[..., 0] * hw[..., 1]
@tf.function(experimental_relax_shapes=True)
def iou_of(boxes0, boxes1, eps=1e-5):
"""두 bbox간 iou 계산.
Args:
boxes0 (N, 4): ground truth boxes 좌표값.
boxes1 (N or 1, 4): predicted boxes 좌표값.
eps: 0으로 치환되는 것을 막기위한 엡실론 상수값 .
Returns:
iou (N): IoU 값.
"""
overlap_left_top = tf.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = tf.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
@tf.function
def assign_gt2_priors(gt_boxes, gt_labels, corner_form_priors,
iou_threshold=0.45):
"""Ground truth <-> priors(default box) 할당
Args:
gt_boxes (num_targets, 4): ground truth boxes
gt_labels (num_targets): ground truth class labels
priors (num_priors, 4): priors
Returns:
boxes (num_priors, 4): gt 박스
labels (num_priors): gt 라벨
"""
# size: num_priors x num_targets
ious = iou_of(tf.expand_dims(gt_boxes, axis=0), tf.expand_dims(corner_form_priors, axis=1))
# size: num_priors
best_target_per_prior = tf.math.reduce_max(ious, axis=1)
best_target_per_prior_index = tf.math.argmax(ious, axis=1)
# size: num_targets
best_prior_per_target = tf.math.reduce_max(ious, axis=0)
best_prior_per_target_index = tf.math.argmax(ious, axis=0)
targets = tf.range(tf.shape(best_prior_per_target_index)[0], dtype='int64')
best_target_per_prior_index = tf.tensor_scatter_nd_update(best_target_per_prior_index,
tf.expand_dims(best_prior_per_target_index, 1), targets)
# 2.0 is used to make sure every target has a prior assigned
best_target_per_prior = tf.tensor_scatter_nd_update(best_target_per_prior,
tf.expand_dims(best_prior_per_target_index, 1),
tf.ones_like(best_prior_per_target_index,
dtype=tf.float32) * 2.0)
# size: num_priors
labels = tf.gather(gt_labels, best_target_per_prior_index)
labels = tf.where(tf.less(best_target_per_prior, iou_threshold), tf.constant(0, dtype='int64'), labels)
# 라벨이 임계값을 넘기 않는 경우 background(배경) 처리
boxes = tf.gather(gt_boxes, best_target_per_prior_index)
return boxes, labels
@tf.function
def corner_form_to_center_form(boxes):
return tf.concat([
(boxes[..., :2] + boxes[..., 2:]) / 2,
boxes[..., 2:] - boxes[..., :2]
], tf.rank(boxes) - 1)
@tf.function
def convert_boxes_to_locations(center_form_boxes, center_form_priors, center_variance, size_variance):
if tf.rank(center_form_priors) + 1 == tf.rank(center_form_boxes):
center_form_priors = tf.expand_dims(center_form_priors, 0)
return tf.concat([
(center_form_boxes[..., :2] - center_form_priors[..., :2]) / center_form_priors[..., 2:] / center_variance,
tf.math.log(center_form_boxes[..., 2:] / center_form_priors[..., 2:]) / size_variance
], axis=tf.rank(center_form_boxes) - 1)
class MatchingPriors(object):
def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold):
self.center_form_priors = center_form_priors
self.corner_form_priors = center_form_to_corner_form(center_form_priors)
self.center_variance = center_variance
self.size_variance = size_variance
self.iou_threshold = iou_threshold
def __call__(self, gt_boxes, gt_labels):
if type(gt_boxes) is np.ndarray:
gt_boxes = tf.convert_to_tensor(gt_boxes)
if type(gt_labels) is np.ndarray:
gt_labels = tf.convert_to_tensor(gt_labels)
boxes, labels = assign_gt2_priors(gt_boxes, gt_labels, self.corner_form_priors, self.iou_threshold)
boxes = corner_form_to_center_form(boxes)
locations = convert_boxes_to_locations(boxes, self.center_form_priors, self.center_variance, self.size_variance)
return locations, labels
def create_priors_boxes(specs: List[Spec], image_size, clamp=True):
priors = []
for spec in specs:
# specs
# index 0 >> size-(48,438) shrinkage-8 CSNet
scale = image_size / spec.shrinkage
for j, i in itertools.product(range(spec.feature_map_size), repeat=2):
x_center = (i + 0.5) / scale
y_center = (j + 0.5) / scale
# 작은 bbox
size = spec.box_sizes.min
h = w = size / image_size
priors.append([
x_center,
y_center,
w,
h
])
# # 큰 bbox
# size = np.sqrt(spec.box_sizes.max * spec.box_sizes.min)
# h = w = size / image_size
# priors.append([
# x_center,
# y_center,
# w,
# h
# ])
# 작은 bbox 높이, 너비 비율 변경
#size = spec.box_sizes.min 기존
size = np.sqrt(spec.box_sizes.max * spec.box_sizes.min)
h = w = size / image_size
if spec.aspect_ratios :
for ratio in spec.aspect_ratios:
ratio = np.sqrt(ratio)
priors.append([
x_center,
y_center,
w * ratio,
h / ratio
])
priors.append([
x_center,
y_center,
w / ratio,
h * ratio
])
# priors > shape(Batch, 13792)
# 2차원 배열이고 각 배열마다 4개씩 존재(x_center, y_center, w, h) * 13792
priors = np.array(priors, dtype=np.float32)
if clamp:
np.clip(priors, 0.0, 1.0, out=priors)
return tf.convert_to_tensor(priors)
specs = [
Spec(28, 8, BoxSizes(11, 22), [2]), # 0.05 / 0.1
Spec(14, 16, BoxSizes(23, 45), [2]), # 0.1 / 0.2
Spec(7, 32, BoxSizes(56, 90), [2]), # 0.25 / 0.4
Spec(4, 64, BoxSizes(90, 134), [2]), # 0.4 / 0.6
Spec(2, 112, BoxSizes(134, 168), [2]), # 0.6 / 0.75
Spec(1, 224, BoxSizes(179, 235), [2]) # 0.8 / 1.05
]
priors = create_priors_boxes(specs, 224)
target_transform = MatchingPriors(priors, center_variance, size_variance, iou_threshold)
TFLITE_FILE_PATH = 'new_tflite_model.tflite'
interpreter = tflite.Interpreter(model_path=TFLITE_FILE_PATH)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
ret, frame = capture.read()
input = tf.convert_to_tensor(frame, dtype=tf.float32)
# 이미지 리사이징
input = tf.image.resize(input, [224, 224])
input = preprocess_input(input, mode='torch')
input = tf.expand_dims(input, axis=0)
interpreter.set_tensor(input_details[0]['index'], input)
import time
while True:
ret, frame = capture.read()
start = time.perf_counter_ns()
input = tf.convert_to_tensor(frame, dtype=tf.float32)
# 이미지 리사이징
input = tf.image.resize(input, [224, 224])
input = preprocess_input(input, mode='torch')
input = tf.expand_dims(input, axis=0)
duration = (time.perf_counter_ns() - start)
print(f"전처리 과정 : {duration // 1000000}ms.")
start = time.perf_counter_ns()
""" !- 추론 과정 """
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
duration = (time.perf_counter_ns() - start)
print(f"추론 과정 : {duration // 1000000}ms.")
start = time.perf_counter_ns()
predictions = post_process(output_data, target_transform, classes=21, confidence_threshold=0.4)
pred_boxes, pred_scores, pred_labels = predictions[0]
if pred_boxes.size > 0:
draw_bounding(frame, pred_boxes, labels=pred_labels, img_size=frame.shape[:2])
duration = (time.perf_counter_ns() - start)
print(f"포스트 프로세싱 과정 : {duration // 1000000}ms.")
cv2.imshow("VideoFrame", frame)
if cv2.waitKey(1) > 0:
break
capture.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"numpy.clip",
"tensorflow.shape",
"numpy.sqrt",
"tensorflow.boolean_mask",
"tensorflow.math.log",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"tensorflow.nn.softmax",
"tensorflow.ones_like",
"tensorflow.math.exp",
"time.perf_counter_ns",
"tensorflow.is_tensor",
... | [((499, 554), 'collections.namedtuple', 'namedtuple', (['"""Prediction"""', "('boxes', 'scores', 'labels')"], {}), "('Prediction', ('boxes', 'scores', 'labels'))\n", (509, 554), False, 'from collections import namedtuple\n'), ((567, 617), 'collections.namedtuple', 'collections.namedtuple', (['"""Boxsizes"""', "['min', 'max']"], {}), "('Boxsizes', ['min', 'max'])\n", (589, 617), False, 'import collections\n'), ((625, 724), 'collections.namedtuple', 'collections.namedtuple', (['"""Spec"""', "['feature_map_size', 'shrinkage', 'box_sizes', 'aspect_ratios']"], {}), "('Spec', ['feature_map_size', 'shrinkage',\n 'box_sizes', 'aspect_ratios'])\n", (647, 724), False, 'import collections\n'), ((866, 904), 'collections.namedtuple', 'namedtuple', (['"""Label"""', "['name', 'color']"], {}), "('Label', ['name', 'color'])\n", (876, 904), False, 'from collections import namedtuple\n'), ((10164, 10207), 'tensorflow.function', 'tf.function', ([], {'experimental_relax_shapes': '(True)'}), '(experimental_relax_shapes=True)\n', (10175, 10207), True, 'import tensorflow as tf\n'), ((10520, 10563), 'tensorflow.function', 'tf.function', ([], {'experimental_relax_shapes': '(True)'}), '(experimental_relax_shapes=True)\n', (10531, 10563), True, 'import tensorflow as tf\n'), ((17202, 17249), 'tflite_runtime.interpreter.Interpreter', 'tflite.Interpreter', ([], {'model_path': 'TFLITE_FILE_PATH'}), '(model_path=TFLITE_FILE_PATH)\n', (17220, 17249), True, 'import tflite_runtime.interpreter as tflite\n'), ((17425, 17444), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (17441, 17444), False, 'import cv2\n'), ((17569, 17614), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['frame'], {'dtype': 'tf.float32'}), '(frame, dtype=tf.float32)\n', (17589, 17614), True, 'import tensorflow as tf\n'), ((17635, 17669), 'tensorflow.image.resize', 'tf.image.resize', (['input', '[224, 224]'], {}), '(input, [224, 224])\n', (17650, 17669), True, 'import tensorflow as tf\n'), ((17678, 17715), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['input'], {'mode': '"""torch"""'}), "(input, mode='torch')\n", (17694, 17715), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input\n'), ((17724, 17753), 'tensorflow.expand_dims', 'tf.expand_dims', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (17738, 17753), True, 'import tensorflow as tf\n'), ((18970, 18993), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18991, 18993), False, 'import cv2\n'), ((7543, 7563), 'tensorflow.reduce_max', 'tf.reduce_max', (['boxes'], {}), '(boxes)\n', (7556, 7563), True, 'import tensorflow as tf\n'), ((7662, 7735), 'tensorflow.image.non_max_suppression', 'tf.image.non_max_suppression', (['boxes_for_nms', 'scores', 'top_k', 'iou_threshold'], {}), '(boxes_for_nms, scores, top_k, iou_threshold)\n', (7690, 7735), True, 'import tensorflow as tf\n'), ((8215, 8264), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['detections[:, :, :classes]'], {'axis': '(2)'}), '(detections[:, :, :classes], axis=2)\n', (8228, 8264), True, 'import tensorflow as tf\n'), ((10429, 10482), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(right_bottom - left_top)', '(0.0)', '(10000)'], {}), '(right_bottom - left_top, 0.0, 10000)\n', (10445, 10482), True, 'import tensorflow as tf\n'), ((10839, 10883), 'tensorflow.maximum', 'tf.maximum', (['boxes0[..., :2]', 'boxes1[..., :2]'], {}), '(boxes0[..., :2], boxes1[..., :2])\n', (10849, 10883), True, 'import tensorflow as tf\n'), ((10911, 10955), 'tensorflow.minimum', 'tf.minimum', (['boxes0[..., 2:]', 'boxes1[..., 2:]'], {}), '(boxes0[..., 2:], boxes1[..., 2:])\n', (10921, 10955), True, 'import tensorflow as tf\n'), ((11802, 11834), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['ious'], {'axis': '(1)'}), '(ious, axis=1)\n', (11820, 11834), True, 'import tensorflow as tf\n'), ((11869, 11897), 'tensorflow.math.argmax', 'tf.math.argmax', (['ious'], {'axis': '(1)'}), '(ious, axis=1)\n', (11883, 11897), True, 'import tensorflow as tf\n'), ((11950, 11982), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['ious'], {'axis': '(0)'}), '(ious, axis=0)\n', (11968, 11982), True, 'import tensorflow as tf\n'), ((12017, 12045), 'tensorflow.math.argmax', 'tf.math.argmax', (['ious'], {'axis': '(0)'}), '(ious, axis=0)\n', (12031, 12045), True, 'import tensorflow as tf\n'), ((12814, 12863), 'tensorflow.gather', 'tf.gather', (['gt_labels', 'best_target_per_prior_index'], {}), '(gt_labels, best_target_per_prior_index)\n', (12823, 12863), True, 'import tensorflow as tf\n'), ((13028, 13076), 'tensorflow.gather', 'tf.gather', (['gt_boxes', 'best_target_per_prior_index'], {}), '(gt_boxes, best_target_per_prior_index)\n', (13037, 13076), True, 'import tensorflow as tf\n'), ((16480, 16514), 'numpy.array', 'np.array', (['priors'], {'dtype': 'np.float32'}), '(priors, dtype=np.float32)\n', (16488, 16514), True, 'import numpy as np\n'), ((16587, 16615), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['priors'], {}), '(priors)\n', (16607, 16615), True, 'import tensorflow as tf\n'), ((17881, 17903), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (17901, 17903), False, 'import time\n'), ((17916, 17961), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['frame'], {'dtype': 'tf.float32'}), '(frame, dtype=tf.float32)\n', (17936, 17961), True, 'import tensorflow as tf\n'), ((17990, 18024), 'tensorflow.image.resize', 'tf.image.resize', (['input', '[224, 224]'], {}), '(input, [224, 224])\n', (18005, 18024), True, 'import tensorflow as tf\n'), ((18037, 18074), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['input'], {'mode': '"""torch"""'}), "(input, mode='torch')\n", (18053, 18074), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input\n'), ((18087, 18116), 'tensorflow.expand_dims', 'tf.expand_dims', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (18101, 18116), True, 'import tensorflow as tf\n'), ((18228, 18250), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (18248, 18250), False, 'import time\n'), ((18475, 18497), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (18495, 18497), False, 'import time\n'), ((18878, 18909), 'cv2.imshow', 'cv2.imshow', (['"""VideoFrame"""', 'frame'], {}), "('VideoFrame', frame)\n", (18888, 18909), False, 'import cv2\n'), ((4971, 4985), 'numpy.max', 'np.max', (['bboxes'], {}), '(bboxes)\n', (4977, 4985), True, 'import numpy as np\n'), ((5274, 5286), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (5281, 5286), True, 'import numpy as np\n'), ((5349, 5409), 'cv2.rectangle', 'cv2.rectangle', (['img_box', '(xmin, ymin)', '(xmax, ymax)', 'color', '(2)'], {}), '(img_box, (xmin, ymin), (xmax, ymax), color, 2)\n', (5362, 5409), False, 'import cv2\n'), ((5418, 5505), 'cv2.rectangle', 'cv2.rectangle', (['img_box', '(xmin - 1, ymin)', '(xmax + 1, ymin - 20)', 'color', 'cv2.FILLED'], {}), '(img_box, (xmin - 1, ymin), (xmax + 1, ymin - 20), color, cv2.\n FILLED)\n', (5431, 5505), False, 'import cv2\n'), ((5711, 5768), 'cv2.addWeighted', 'cv2.addWeighted', (['img_box', 'alpha', 'img', '(1.0 - alpha)', '(0)', 'img'], {}), '(img_box, alpha, img, 1.0 - alpha, 0, img)\n', (5726, 5768), False, 'import cv2\n'), ((6519, 6537), 'tensorflow.rank', 'tf.rank', (['locations'], {}), '(locations)\n', (6526, 6537), True, 'import tensorflow as tf\n'), ((6556, 6581), 'tensorflow.expand_dims', 'tf.expand_dims', (['priors', '(0)'], {}), '(priors, 0)\n', (6570, 6581), True, 'import tensorflow as tf\n'), ((7444, 7458), 'tensorflow.size', 'tf.size', (['boxes'], {}), '(boxes)\n', (7451, 7458), True, 'import tensorflow as tf\n'), ((7480, 7520), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[]'], {'dtype': 'tf.int32'}), '([], dtype=tf.int32)\n', (7500, 7520), True, 'import tensorflow as tf\n'), ((8113, 8138), 'tensorflow.is_tensor', 'tf.is_tensor', (['batch_boxes'], {}), '(batch_boxes)\n', (8125, 8138), True, 'import tensorflow as tf\n'), ((8162, 8195), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['batch_boxes'], {}), '(batch_boxes)\n', (8182, 8195), True, 'import tensorflow as tf\n'), ((8548, 8570), 'tensorflow.shape', 'tf.shape', (['batch_scores'], {}), '(batch_scores)\n', (8556, 8570), True, 'import tensorflow as tf\n'), ((8819, 8855), 'tensorflow.reshape', 'tf.reshape', (['boxes', '[num_boxes, 1, 4]'], {}), '(boxes, [num_boxes, 1, 4])\n', (8829, 8855), True, 'import tensorflow as tf\n'), ((8872, 8923), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['boxes', '[num_boxes, num_classes, 4]'], {}), '(boxes, [num_boxes, num_classes, 4])\n', (8887, 8923), True, 'import tensorflow as tf\n'), ((8941, 8980), 'tensorflow.range', 'tf.range', (['num_classes'], {'dtype': 'tf.float32'}), '(num_classes, dtype=tf.float32)\n', (8949, 8980), True, 'import tensorflow as tf\n'), ((8998, 9034), 'tensorflow.reshape', 'tf.reshape', (['labels', '[1, num_classes]'], {}), '(labels, [1, num_classes])\n', (9008, 9034), True, 'import tensorflow as tf\n'), ((9280, 9306), 'tensorflow.reshape', 'tf.reshape', (['boxes', '[-1, 4]'], {}), '(boxes, [-1, 4])\n', (9290, 9306), True, 'import tensorflow as tf\n'), ((9324, 9348), 'tensorflow.reshape', 'tf.reshape', (['scores', '[-1]'], {}), '(scores, [-1])\n', (9334, 9348), True, 'import tensorflow as tf\n'), ((9366, 9390), 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), '(labels, [-1])\n', (9376, 9390), True, 'import tensorflow as tf\n'), ((11672, 11704), 'tensorflow.expand_dims', 'tf.expand_dims', (['gt_boxes'], {'axis': '(0)'}), '(gt_boxes, axis=0)\n', (11686, 11704), True, 'import tensorflow as tf\n'), ((11706, 11748), 'tensorflow.expand_dims', 'tf.expand_dims', (['corner_form_priors'], {'axis': '(1)'}), '(corner_form_priors, axis=1)\n', (11720, 11748), True, 'import tensorflow as tf\n'), ((12281, 12327), 'tensorflow.expand_dims', 'tf.expand_dims', (['best_prior_per_target_index', '(1)'], {}), '(best_prior_per_target_index, 1)\n', (12295, 12327), True, 'import tensorflow as tf\n'), ((12538, 12584), 'tensorflow.expand_dims', 'tf.expand_dims', (['best_prior_per_target_index', '(1)'], {}), '(best_prior_per_target_index, 1)\n', (12552, 12584), True, 'import tensorflow as tf\n'), ((12887, 12932), 'tensorflow.less', 'tf.less', (['best_target_per_prior', 'iou_threshold'], {}), '(best_target_per_prior, iou_threshold)\n', (12894, 12932), True, 'import tensorflow as tf\n'), ((12934, 12963), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': '"""int64"""'}), "(0, dtype='int64')\n", (12945, 12963), True, 'import tensorflow as tf\n'), ((13452, 13478), 'tensorflow.rank', 'tf.rank', (['center_form_boxes'], {}), '(center_form_boxes)\n', (13459, 13478), True, 'import tensorflow as tf\n'), ((13509, 13546), 'tensorflow.expand_dims', 'tf.expand_dims', (['center_form_priors', '(0)'], {}), '(center_form_priors, 0)\n', (13523, 13546), True, 'import tensorflow as tf\n'), ((16538, 16575), 'numpy.clip', 'np.clip', (['priors', '(0.0)', '(1.0)'], {'out': 'priors'}), '(priors, 0.0, 1.0, out=priors)\n', (16545, 16575), True, 'import numpy as np\n'), ((18134, 18156), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (18154, 18156), False, 'import time\n'), ((18382, 18404), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (18402, 18404), False, 'import time\n'), ((18788, 18810), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (18808, 18810), False, 'import time\n'), ((18917, 18931), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (18928, 18931), False, 'import cv2\n'), ((6496, 6511), 'tensorflow.rank', 'tf.rank', (['priors'], {}), '(priors)\n', (6503, 6511), True, 'import tensorflow as tf\n'), ((6993, 7011), 'tensorflow.rank', 'tf.rank', (['locations'], {}), '(locations)\n', (7000, 7011), True, 'import tensorflow as tf\n'), ((8741, 8757), 'tensorflow.shape', 'tf.shape', (['scores'], {}), '(scores)\n', (8749, 8757), True, 'import tensorflow as tf\n'), ((8783, 8799), 'tensorflow.shape', 'tf.shape', (['scores'], {}), '(scores)\n', (8791, 8799), True, 'import tensorflow as tf\n'), ((9076, 9092), 'tensorflow.shape', 'tf.shape', (['scores'], {}), '(scores)\n', (9084, 9092), True, 'import tensorflow as tf\n'), ((9526, 9566), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['boxes', 'low_scoring_mask'], {}), '(boxes, low_scoring_mask)\n', (9541, 9566), True, 'import tensorflow as tf\n'), ((9568, 9609), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['scores', 'low_scoring_mask'], {}), '(scores, low_scoring_mask)\n', (9583, 9609), True, 'import tensorflow as tf\n'), ((9611, 9652), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['labels', 'low_scoring_mask'], {}), '(labels, low_scoring_mask)\n', (9626, 9652), True, 'import tensorflow as tf\n'), ((9760, 9782), 'tensorflow.gather', 'tf.gather', (['boxes', 'keep'], {}), '(boxes, keep)\n', (9769, 9782), True, 'import tensorflow as tf\n'), ((9784, 9807), 'tensorflow.gather', 'tf.gather', (['scores', 'keep'], {}), '(scores, keep)\n', (9793, 9807), True, 'import tensorflow as tf\n'), ((9809, 9832), 'tensorflow.gather', 'tf.gather', (['labels', 'keep'], {}), '(labels, keep)\n', (9818, 9832), True, 'import tensorflow as tf\n'), ((12070, 12107), 'tensorflow.shape', 'tf.shape', (['best_prior_per_target_index'], {}), '(best_prior_per_target_index)\n', (12078, 12107), True, 'import tensorflow as tf\n'), ((12642, 12701), 'tensorflow.ones_like', 'tf.ones_like', (['best_prior_per_target_index'], {'dtype': 'tf.float32'}), '(best_prior_per_target_index, dtype=tf.float32)\n', (12654, 12701), True, 'import tensorflow as tf\n'), ((13273, 13287), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (13280, 13287), True, 'import tensorflow as tf\n'), ((13417, 13444), 'tensorflow.rank', 'tf.rank', (['center_form_priors'], {}), '(center_form_priors)\n', (13424, 13444), True, 'import tensorflow as tf\n'), ((14325, 14355), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['gt_boxes'], {}), '(gt_boxes)\n', (14345, 14355), True, 'import tensorflow as tf\n'), ((14422, 14453), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['gt_labels'], {}), '(gt_labels)\n', (14442, 14453), True, 'import tensorflow as tf\n'), ((15758, 15806), 'numpy.sqrt', 'np.sqrt', (['(spec.box_sizes.max * spec.box_sizes.min)'], {}), '(spec.box_sizes.max * spec.box_sizes.min)\n', (15765, 15806), True, 'import numpy as np\n'), ((6695, 6742), 'tensorflow.math.exp', 'tf.math.exp', (['(locations[..., 2:] * size_variance)'], {}), '(locations[..., 2:] * size_variance)\n', (6706, 6742), True, 'import tensorflow as tf\n'), ((6773, 6791), 'tensorflow.rank', 'tf.rank', (['locations'], {}), '(locations)\n', (6780, 6791), True, 'import tensorflow as tf\n'), ((13695, 13764), 'tensorflow.math.log', 'tf.math.log', (['(center_form_boxes[..., 2:] / center_form_priors[..., 2:])'], {}), '(center_form_boxes[..., 2:] / center_form_priors[..., 2:])\n', (13706, 13764), True, 'import tensorflow as tf\n'), ((13793, 13819), 'tensorflow.rank', 'tf.rank', (['center_form_boxes'], {}), '(center_form_boxes)\n', (13800, 13819), True, 'import tensorflow as tf\n'), ((15958, 15972), 'numpy.sqrt', 'np.sqrt', (['ratio'], {}), '(ratio)\n', (15965, 15972), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
df = pd.read_csv("inputs/puzzle_01_input.csv", header=None) # path from root
arr = df.T.to_numpy()[0]
sum_3 = np.convolve(arr, np.ones(3, dtype=int), mode="valid")
# ref: https://stackoverflow.com/questions/42472104/finding-the-sum-of-3-consecutive-numbers-in-an-array/42472226
print("Number of sums that are larger than the previous sum is:")
print(np.sum(np.diff(sum_3) > 0))
| [
"numpy.diff",
"numpy.ones",
"pandas.read_csv"
] | [((45, 99), 'pandas.read_csv', 'pd.read_csv', (['"""inputs/puzzle_01_input.csv"""'], {'header': 'None'}), "('inputs/puzzle_01_input.csv', header=None)\n", (56, 99), True, 'import pandas as pd\n'), ((170, 191), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (177, 191), True, 'import numpy as np\n'), ((401, 415), 'numpy.diff', 'np.diff', (['sum_3'], {}), '(sum_3)\n', (408, 415), True, 'import numpy as np\n')] |
"""Contains the Monte Carlo simulation tests."""
import numpy as np
import copy
from trempy.estimate.estimate_auxiliary import estimate_cleanup
from trempy.shared.shared_auxiliary import print_init_dict
from trempy.config_trempy import PREFERENCE_PARAMETERS
from trempy.tests.test_auxiliary import random_dict
from trempy.custom_exceptions import TrempyError
from trempy.config_trempy import DEFAULT_BOUNDS
from trempy.read.read import ESTIMATION_GROUP
from trempy.estimate.estimate import estimate
from trempy.simulate.simulate import simulate
from trempy.config_trempy import SMALL_FLOAT
from trempy.read.read import read
def basic_dict(version, fname, optimizer, maxfun, num_agents, std=None,
eps=None, ftol=None, gtol=None):
"""Generate basic dictionary for Monte Carlo Simulations."""
constr = {
'version': version, 'fname': fname, 'num_agents': num_agents,
'maxfun': maxfun, 'optimizer': optimizer, 'all_questions': True,
}
init_dict = random_dict(constr)
# Add user-specified std deviations
if std is not None:
for q, sd in std.items():
init_dict['QUESTIONS'][q][0] = sd
# Handle optimizer options
if eps is None:
eps = 1e-05
if ftol is None:
ftol = 1e-08
if gtol is None:
gtol = 1e-08
nuisance_paras = {'eps': eps, 'ftol': ftol, 'gtol': gtol}
for label in ['eps', 'ftol', 'gtol']:
if label in init_dict[optimizer].keys():
init_dict[optimizer][label] = nuisance_paras[label]
return init_dict
def set_questions(init_dict, is_fixed, std=None):
"""Manipulate questions."""
# Change free and fixed status
if is_fixed in ['fix_all']:
for q in init_dict['QUESTIONS'].keys():
init_dict['QUESTIONS'][q][1] = True
else:
np.testing.assert_equal(len(is_fixed), len(init_dict['QUESTIONS'].keys()))
for q, fix_value in enumerate(is_fixed):
init_dict['QUESTIONS'][q][1] = fix_value
# Change standard deviations
if std is not None:
np.testing.assert_equal(len(std), len(init_dict['QUESTIONS'].keys()))
for q, sd in enumerate(std):
init_dict['QUESTIONS'][q][0] = sd
def remove_cutoffs(init_dict):
"""Remove cutoffs."""
init_dict['CUTOFFS'] = dict()
return dict
def estimate_at_truth(fix_question_paras):
"""Stability of the likelihood at the truth."""
estimate_cleanup()
init_dict = basic_dict(version='nonstationary', optimizer='SCIPY-L-BFGS-B', fname='truth',
num_agents=2000, maxfun=1000)
set_questions(init_dict, is_fixed=fix_question_paras, std=None)
seed = init_dict['SIMULATION']['seed']
version = init_dict['VERSION']['version']
print_init_dict(init_dict, fname='truth.trempy.ini')
_, fval = simulate('truth.trempy.ini')
est_output = estimate('truth.trempy.ini')
# Print output
estimated_dict = read('stop/stop.trempy.ini')
results = list()
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
start_value, is_fixed, _ = init_dict[group][key]
estimated_value = estimated_dict[group][key][0]
if start_value is None or is_fixed is True:
continue
results.append([seed, fval, est_output[0], key, start_value, estimated_value])
print('{0:<25} {1:<15}'.format('Parameter:', key))
print('-------------------------')
print('{0:<25} {1:5.4f}'.format('Truth:', start_value))
print('{0:<25} {1:5.4f}'.format('Estimated value:', estimated_value))
print(' ------------------------- ')
print('sim seed: {:>25}'.format(seed))
print('fval at truth: {:>25}'.format(fval))
print(' ------------------------- ')
return results
def perturbate_econ(init_dict, no_temporal_choices=True, max_dist=None):
"""Perturbate all economic parameters and set bounds to default bounds."""
old_dict = copy.deepcopy(init_dict)
version = init_dict['VERSION']['version']
for group in ESTIMATION_GROUP[version]:
for label in PREFERENCE_PARAMETERS[version]:
if label in init_dict[group].keys():
# Distribute parameters
value, is_fixed, _ = init_dict[group][label]
# Handle optional or unused arguments.
if value is None:
continue
lower, upper = DEFAULT_BOUNDS[label]
# Move the parameter by less than max_dist away.
if max_dist is not None:
new_value = np.random.uniform(value - max_dist, value + max_dist)
new_value = min(upper, new_value)
new_value = max(lower, new_value)
else:
# Get new value
new_value = np.random.uniform(lower, upper)
if group in ['DISCOUNTING'] and no_temporal_choices is True:
is_fixed = True
new_value = value
else:
is_fixed = False
# Update
old_dict[group][label] = [value, is_fixed, [lower, upper]]
init_dict[group][label] = [new_value, is_fixed, [lower, upper]]
return old_dict, init_dict
def pertubation_robustness_all(version, no_temporal_choices=True,
max_dist=None, set_std_to=None):
"""Test pertubation of all parameters."""
# Get random init file
estimate_cleanup()
init_dict = basic_dict(version=version, optimizer='SCIPY-L-BFGS-B', fname='truth',
num_agents=2000, maxfun=1000)
# Set variance for questions
if set_std_to is not None:
for q in init_dict['QUESTIONS'].keys():
init_dict['QUESTIONS'][q][0] = set_std_to
init_dict['QUESTIONS'][q][2] = [set_std_to - SMALL_FLOAT, set_std_to + SMALL_FLOAT]
set_questions(init_dict, is_fixed='fix_all', std=None)
seed = init_dict['SIMULATION']['seed']
version = init_dict['VERSION']['version']
print_init_dict(init_dict, fname='truth.trempy.ini')
# Perturb parameters
truth_dict, perturbed_dict = perturbate_econ(
init_dict, no_temporal_choices=no_temporal_choices, max_dist=max_dist)
print_init_dict(perturbed_dict, fname='perturbed.trempy.ini')
# Simulate data from init file and report criterion function.
_, fval = simulate('truth.trempy.ini')
print('fval at truth: {:>25}'.format(fval))
# Estimate starting from perturbed values
estimate('perturbed.trempy.ini')
estimated_dict = read('stop/stop.trempy.ini')
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
start_value, is_fixed, bounds = truth_dict[group][key]
perturbed_value = perturbed_dict[group][key][0]
estimated_value = estimated_dict[group][key][0]
if start_value is None or is_fixed is True:
continue
print('{0:<25} {1:<15}'.format('Parameter:', key))
print('-------------------------')
print('{0:<25} {1:5.4f}'.format('Start:', start_value))
print('{0:<25} {1:5.4f}'.format('Perturbated value:', perturbed_value))
print('{0:<25} {1:5.4f}'.format('Estimated value:', estimated_value))
print('Seed: {:>25}'.format(seed))
print('fval_truth: {:>25}'.format(fval))
def perturbate_single(init_dict, label, value=None):
"""Perturbate a single parameter and fix all other parameters for estimation.
We also set the bounds for the perturbed parameter to its default bounds.
This increases the scope for perturbations.
"""
old_dict = copy.deepcopy(init_dict)
version = init_dict['VERSION']['version']
if label not in PREFERENCE_PARAMETERS[version]:
raise TrempyError('Version {0} has no parameters {1}'.format(version, label))
# Fix variance for each question.
for q in init_dict['QUESTIONS'].keys():
init_dict['QUESTIONS'][q][1] = True
# Handle optional parameters
if label.startswith('unrestricted_weights'):
not_used = (None in init_dict['TEMPORAL'].values())
if not_used:
raise TrempyError('Cannot set value for unused argument: {}.'.format(label))
# Fix every parameter except for perturbed one. The perturbed one is "un-fixed".
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
current_value, _, bounds = init_dict[group][key]
if key == label:
# Reset bounds to default
lower, upper = DEFAULT_BOUNDS[label]
# If no value is specified, draw a random value.
if value is None:
value = np.random.uniform(lower + SMALL_FLOAT, upper - SMALL_FLOAT)
init_dict[group][key] = [value, False, [lower, upper]]
# Also, override old bounds in old dict.
old_dict[group][key] = [current_value, False, [lower, upper]]
# Fix all other parameters.
else:
init_dict[group][key] = [current_value, True, bounds]
return old_dict, init_dict
def pertubation_robustness_single(version, label=None, value=None, num_agents=None, maxfun=None,
optimizer='SCIPY-BFGS'):
"""Check robustness against single perturbations."""
if label is None:
label = np.random.choice(PREFERENCE_PARAMETERS[version])
# Get random init file
constr = {'version': version, 'fname': 'perturb.start'}
if num_agents is not None:
constr['num_agents'] = num_agents
if maxfun is None:
constr['maxfun'] = 50
else:
constr['maxfun'] = maxfun
init_dict = random_dict(constr)
init_dict['ESTIMATION']['optimizer'] = optimizer
init_dict['SCIPY-POWELL']['ftol'] = 0.1
init_dict['SCIPY-POWELL']['xtol'] = 0.01
init_dict['SCIPY-BFGS']['eps'] = 1.4901161193847656e-08
init_dict['SCIPY-BFGS']['gtol'] = 1e-05
init_dict['SCIPY-L-BFGS-B']['eps'] = 1.4901161193847656e-08
init_dict['SCIPY-L-BFGS-B']['gtol'] = 1.5e-08
init_dict['SCIPY-L-BFGS-B']['ftol'] = 1.5e-08
# Perturb parameters
old_dict, perturbated = perturbate_single(init_dict, label=label, value=value)
# Save dicts
print_init_dict(old_dict, 'perturb.start')
print_init_dict(perturbated, 'perturb.end')
# Simulate data from init file
simulate('perturb.start')
# Estimate starting from perturbed values
estimate('perturb.end')
# os.chdir('stop')
estimated_dict = read('stop/stop.trempy.ini')
# os.chdir('../')
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
if key == label:
start_value = old_dict[group][key][0]
perturbed_value = perturbated[group][key][0]
estimated_value = estimated_dict[group][key][0]
print('{0:<25} {1:<15}'.format('Parameter:', label))
print('-------------------------')
print('{0:<25} {1:5.4f}'.format('Start:', start_value))
print('{0:<25} {1:5.4f}'.format('Perturbated value:', perturbed_value))
print('{0:<25} {1:5.4f}'.format('Estimated value:', estimated_value))
| [
"trempy.simulate.simulate.simulate",
"numpy.random.choice",
"trempy.tests.test_auxiliary.random_dict",
"trempy.estimate.estimate_auxiliary.estimate_cleanup",
"numpy.random.uniform",
"trempy.read.read.read",
"copy.deepcopy",
"trempy.estimate.estimate.estimate",
"trempy.shared.shared_auxiliary.print_i... | [((993, 1012), 'trempy.tests.test_auxiliary.random_dict', 'random_dict', (['constr'], {}), '(constr)\n', (1004, 1012), False, 'from trempy.tests.test_auxiliary import random_dict\n'), ((2423, 2441), 'trempy.estimate.estimate_auxiliary.estimate_cleanup', 'estimate_cleanup', ([], {}), '()\n', (2439, 2441), False, 'from trempy.estimate.estimate_auxiliary import estimate_cleanup\n'), ((2759, 2811), 'trempy.shared.shared_auxiliary.print_init_dict', 'print_init_dict', (['init_dict'], {'fname': '"""truth.trempy.ini"""'}), "(init_dict, fname='truth.trempy.ini')\n", (2774, 2811), False, 'from trempy.shared.shared_auxiliary import print_init_dict\n'), ((2827, 2855), 'trempy.simulate.simulate.simulate', 'simulate', (['"""truth.trempy.ini"""'], {}), "('truth.trempy.ini')\n", (2835, 2855), False, 'from trempy.simulate.simulate import simulate\n'), ((2873, 2901), 'trempy.estimate.estimate.estimate', 'estimate', (['"""truth.trempy.ini"""'], {}), "('truth.trempy.ini')\n", (2881, 2901), False, 'from trempy.estimate.estimate import estimate\n'), ((2943, 2971), 'trempy.read.read.read', 'read', (['"""stop/stop.trempy.ini"""'], {}), "('stop/stop.trempy.ini')\n", (2947, 2971), False, 'from trempy.read.read import read\n'), ((4002, 4026), 'copy.deepcopy', 'copy.deepcopy', (['init_dict'], {}), '(init_dict)\n', (4015, 4026), False, 'import copy\n'), ((5549, 5567), 'trempy.estimate.estimate_auxiliary.estimate_cleanup', 'estimate_cleanup', ([], {}), '()\n', (5565, 5567), False, 'from trempy.estimate.estimate_auxiliary import estimate_cleanup\n'), ((6131, 6183), 'trempy.shared.shared_auxiliary.print_init_dict', 'print_init_dict', (['init_dict'], {'fname': '"""truth.trempy.ini"""'}), "(init_dict, fname='truth.trempy.ini')\n", (6146, 6183), False, 'from trempy.shared.shared_auxiliary import print_init_dict\n'), ((6344, 6405), 'trempy.shared.shared_auxiliary.print_init_dict', 'print_init_dict', (['perturbed_dict'], {'fname': '"""perturbed.trempy.ini"""'}), "(perturbed_dict, fname='perturbed.trempy.ini')\n", (6359, 6405), False, 'from trempy.shared.shared_auxiliary import print_init_dict\n'), ((6487, 6515), 'trempy.simulate.simulate.simulate', 'simulate', (['"""truth.trempy.ini"""'], {}), "('truth.trempy.ini')\n", (6495, 6515), False, 'from trempy.simulate.simulate import simulate\n'), ((6615, 6647), 'trempy.estimate.estimate.estimate', 'estimate', (['"""perturbed.trempy.ini"""'], {}), "('perturbed.trempy.ini')\n", (6623, 6647), False, 'from trempy.estimate.estimate import estimate\n'), ((6669, 6697), 'trempy.read.read.read', 'read', (['"""stop/stop.trempy.ini"""'], {}), "('stop/stop.trempy.ini')\n", (6673, 6697), False, 'from trempy.read.read import read\n'), ((7773, 7797), 'copy.deepcopy', 'copy.deepcopy', (['init_dict'], {}), '(init_dict)\n', (7786, 7797), False, 'import copy\n'), ((9851, 9870), 'trempy.tests.test_auxiliary.random_dict', 'random_dict', (['constr'], {}), '(constr)\n', (9862, 9870), False, 'from trempy.tests.test_auxiliary import random_dict\n'), ((10415, 10457), 'trempy.shared.shared_auxiliary.print_init_dict', 'print_init_dict', (['old_dict', '"""perturb.start"""'], {}), "(old_dict, 'perturb.start')\n", (10430, 10457), False, 'from trempy.shared.shared_auxiliary import print_init_dict\n'), ((10462, 10505), 'trempy.shared.shared_auxiliary.print_init_dict', 'print_init_dict', (['perturbated', '"""perturb.end"""'], {}), "(perturbated, 'perturb.end')\n", (10477, 10505), False, 'from trempy.shared.shared_auxiliary import print_init_dict\n'), ((10546, 10571), 'trempy.simulate.simulate.simulate', 'simulate', (['"""perturb.start"""'], {}), "('perturb.start')\n", (10554, 10571), False, 'from trempy.simulate.simulate import simulate\n'), ((10623, 10646), 'trempy.estimate.estimate.estimate', 'estimate', (['"""perturb.end"""'], {}), "('perturb.end')\n", (10631, 10646), False, 'from trempy.estimate.estimate import estimate\n'), ((10692, 10720), 'trempy.read.read.read', 'read', (['"""stop/stop.trempy.ini"""'], {}), "('stop/stop.trempy.ini')\n", (10696, 10720), False, 'from trempy.read.read import read\n'), ((9528, 9576), 'numpy.random.choice', 'np.random.choice', (['PREFERENCE_PARAMETERS[version]'], {}), '(PREFERENCE_PARAMETERS[version])\n', (9544, 9576), True, 'import numpy as np\n'), ((4633, 4686), 'numpy.random.uniform', 'np.random.uniform', (['(value - max_dist)', '(value + max_dist)'], {}), '(value - max_dist, value + max_dist)\n', (4650, 4686), True, 'import numpy as np\n'), ((4885, 4916), 'numpy.random.uniform', 'np.random.uniform', (['lower', 'upper'], {}), '(lower, upper)\n', (4902, 4916), True, 'import numpy as np\n'), ((8849, 8908), 'numpy.random.uniform', 'np.random.uniform', (['(lower + SMALL_FLOAT)', '(upper - SMALL_FLOAT)'], {}), '(lower + SMALL_FLOAT, upper - SMALL_FLOAT)\n', (8866, 8908), True, 'import numpy as np\n')] |
import scipy.ndimage as scnd
import scipy.optimize as sio
import numpy as np
import stemtool as st
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib_scalebar.scalebar as mpss
import matplotlib.offsetbox as mploff
import matplotlib.gridspec as mpgs
import matplotlib as mpl
class atomic_dpc(object):
"""
Atomic Resolution DPC estimation
Parameters
----------
Data_4D: ndarray
Four-dimensional dataset where the first two
dimensions are real space scanning dimensions,
while the last two dimenions are the Fourier
space electron diffraction patterns
Data_ADF: ndarray
Simultaneously collected two-dimensional ADF-STEM
image
calib_pm: float
Real space pixel calibration in picometers
voltage: float
Microscope accelerating voltage in kV
aperture: float
The probe forming condenser aperture in milliradians
Notes
-----
This class function takes in a 4D-STEM image, and a simultaneously
collected atomic resolution ADF-STEM image. Based on the accelerating
voltage and the condenser aperture this calculates the center of mass
(C.O.M.) shifts in the central undiffracted beam. Using the idea that
the curl of the beam shift vectors, should be minimized at the correct
Fourier rotation angles, this class also corrects for rotation of the
collceted 4D-STEM data with respect to the optic axis. Using these, a
correct potential accumulation and charge accumulation maps could be
built. To prevent errors, we convert everything to SI units first.
Examples
--------
Run as:
>>> DPC = st.dpc.atomic_dpc(Data_4D, DataADF, calibration, voltage, aper)
Once the data is loaded, the ADF-STEM and the BF-STEM images could be
visualized as:
>>> DPC.show_ADF_BF()
Then the following call generates the mean CBED image, and if the show_image
call is True, shows the mean image.
>>> DPC.get_cbed(show_image = True)
The initial uncorrected DPC shifts are generated as:
>>> DPC.initial_dpc()
The corrected DPC shifts are generated:
>>> DPC.correct_dpc()
The charge map is generated through:
>>> DPC.show_charge()
While the potential map is generated though:
>>> DPC.show_potential()
If a section of the image needs to be observed, to visualize the beam shifts,
call the following:
>>> DPC.plot_color_dpc()
References
----------
.. [1] <NAME>. et al. "Atomic electric fields revealed by a quantum mechanical
approach to electron picodiffraction". Nat. Commun. 5:565303 doi: 10.1038/ncomms6653 (2014)
.. [2] Savitzky, <NAME>., <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME> et al. "py4DSTEM: a software package for
multimodal analysis of four-dimensional scanning transmission electron microscopy datasets."
arXiv preprint arXiv:2003.09523 (2020).
.. [3] Ishizuka, Akimitsu, <NAME>, <NAME>, <NAME>,
and <NAME>. "Boundary-artifact-free determination of
potential distribution from differential phase contrast signals."
Microscopy 66, no. 6 (2017): 397-405.
"""
def __init__(self, Data_4D, Data_ADF, calib_pm, voltage, aperture):
"""
Load the user defined values.
It also calculates the wavelength based on the accelerating voltage
This also loads several SI constants as the following attributes
`planck`: The Planck's constant
`epsilon0`: The dielectric permittivity of free space
`e_charge`: The charge of an electron in Coulombs
"""
self.data_adf = Data_ADF
self.data_4D = Data_4D
self.calib = calib_pm
self.voltage = voltage * 1000 # convert to volts
self.wavelength = st.sim.wavelength_ang(voltage) * (
10 ** (-10)
) # convert to meters
self.aperture = aperture / 1000 # convert to radians
self.planck = 6.62607004 * (10 ** (-34))
self.epsilon0 = 8.85418782 * (10 ** (-12))
self.e_charge = (-1) * 1.60217662 * (10 ** (-19))
e_mass = 9.109383 * (10 ** (-31))
c = 299792458
self.sigma = (
(2 * np.pi / (self.wavelength * self.voltage))
* ((e_mass * (c ** 2)) + (self.e_charge * self.voltage))
) / ((2 * e_mass * (c ** 2)) + (self.e_charge * self.voltage))
def show_ADF_BF(self, imsize=(20, 10)):
"""
The ADF-STEM image is already loaded, while the `data_bf`
attribute is obtained by summing up the 4D-STEM dataset along it's
Fourier dimensions. This is also a great checkpoint to see whether
the ADF-STEM and the BF-STEM images are the inverse of each other.
"""
self.data_bf = np.sum(self.data_4D, axis=(-1, -2))
fontsize = int(np.amax(np.asarray(imsize)))
plt.figure(figsize=imsize)
plt.subplot(1, 2, 1)
plt.imshow(self.data_adf, cmap="inferno")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
at = mploff.AnchoredText(
"ADF-STEM", prop=dict(size=fontsize), frameon=True, loc="lower left"
)
at.patch.set_boxstyle("round, pad=0., rounding_size=0.2")
plt.gca().add_artist(at)
plt.subplot(1, 2, 2)
plt.imshow(self.data_bf, cmap="inferno")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
at = mploff.AnchoredText(
"Summed 4D-STEM", prop=dict(size=fontsize), frameon=True, loc="lower left"
)
at.patch.set_boxstyle("round, pad=0., rounding_size=0.2")
plt.gca().add_artist(at)
plt.tight_layout()
def get_cbed(self, imsize=(15, 15), show_image=False):
"""
We calculate the mean CBED pattern by averaging the Fourier data, to
get the object attribute `cbed`. We fit this with a circle function to
obtain the object attributes:
`beam_x`: x-coordinates of the circle
`beam_y`: y-coordinates of the circle
`beam_r`: radius of the circle
We use the calculated radius and the known aperture size to get the Fourier
space calibration, which is stored as the `inverse` attribute
"""
self.cbed = np.mean(self.data_4D, axis=(0, 1))
self.beam_x, self.beam_y, self.beam_r = st.util.sobel_circle(self.cbed)
self.inverse = self.aperture / (self.beam_r * self.wavelength)
if show_image:
plt.figure(figsize=imsize)
plt.imshow(self.cbed, cmap="inferno")
scalebar = mpss.ScaleBar(self.inverse, "1/m", mpss.SI_LENGTH_RECIPROCAL)
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
plt.gca().add_artist(scalebar)
plt.axis("off")
def initial_dpc(self, imsize=(30, 17), normalize=True):
"""
This calculates the initial DPC center of mass shifts by measuring
the center of mass of each image in the 4D-STEM dataset, and then
comparing that center of mass with the average disk center of the
entire dataset.
"""
qq, pp = np.mgrid[0 : self.data_4D.shape[-1], 0 : self.data_4D.shape[-2]]
yy, xx = np.mgrid[0 : self.data_4D.shape[0], 0 : self.data_4D.shape[1]]
yy = np.ravel(yy)
xx = np.ravel(xx)
self.YCom = np.empty(self.data_4D.shape[0:2], dtype=np.float)
self.XCom = np.empty(self.data_4D.shape[0:2], dtype=np.float)
for ii in range(len(yy)):
pattern = self.data_4D[yy[ii], xx[ii], :, :]
self.YCom[yy[ii], xx[ii]] = self.inverse * (
(np.sum(np.multiply(qq, pattern)) / np.sum(pattern)) - self.beam_y
)
self.XCom[yy[ii], xx[ii]] = self.inverse * (
(np.sum(np.multiply(pp, pattern)) / np.sum(pattern)) - self.beam_x
)
if normalize:
self.YCom = self.YCom - np.mean(self.YCom)
self.XCom = self.XCom - np.mean(self.XCom)
vm = (np.amax(np.abs(np.concatenate((self.XCom, self.YCom), axis=1)))) / (
10 ** 9
)
fontsize = int(0.9 * np.amax(np.asarray(imsize)))
sc_font = {"weight": "bold", "size": fontsize}
plt.figure(figsize=imsize)
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[0:15, 15:30])
ax3 = plt.subplot(gs[15:17, :])
ax1.imshow(self.XCom / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
at = mploff.AnchoredText(
"Shift in X direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax1.add_artist(at)
ax1.axis("off")
ax2.imshow(self.YCom / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax2.add_artist(scalebar)
at = mploff.AnchoredText(
"Shift in Y direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax2.add_artist(at)
ax2.axis("off")
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(-vm, vm, 1000)
ax3.imshow(sb, cmap="RdBu_r")
ax3.yaxis.set_visible(False)
x1 = np.linspace(0, 1000, 8)
ax3.set_xticks(x1)
ax3.set_xticklabels(np.round(np.linspace(-vm, vm, 8), 2))
for axis in ["top", "bottom", "left", "right"]:
ax3.spines[axis].set_linewidth(2)
ax3.spines[axis].set_color("black")
ax3.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax3.set_title(r"$\mathrm{Beam\: Shift\: \left(nm^{-1}\right)}$", **sc_font)
plt.tight_layout()
def correct_dpc(self, imsize=(30, 17)):
"""
This corrects for the rotation angle of the pixellated detector
with respect to the optic axis. Some pixellated detectors flip
the image, and if there is an image flip, it corrects it too.
The mechanism of this, we compare the gradient of both the flipped
and the unflipped DPC data at multiple rotation angles, and the value
that has the highest relative contrast with the ADF-STEM image is taken
as 90 degrees from the correct angle.
"""
flips = np.zeros(4, dtype=bool)
flips[2:4] = True
chg_sums = np.zeros(4, dtype=self.XCom.dtype)
angles = np.zeros(4, dtype=self.YCom.dtype)
x0 = 90
for ii in range(2):
to_flip = flips[2 * ii]
if to_flip:
xdpcf = np.flip(self.XCom)
else:
xdpcf = self.XCom
rho_dpc, phi_dpc = st.dpc.cart2pol(self.XCom, self.YCom)
x = sio.minimize(st.dpc.angle_fun, x0, args=(rho_dpc, phi_dpc))
min_x = x.x
sol1 = min_x - 90
sol2 = min_x + 90
chg_sums[int(2 * ii)] = np.sum(
st.dpc.charge_dpc(xdpcf, self.YCom, sol1) * self.data_adf
)
chg_sums[int(2 * ii + 1)] = np.sum(
st.dpc.charge_dpc(xdpcf, self.YCom, sol2) * self.data_adf
)
angles[int(2 * ii)] = sol1
angles[int(2 * ii + 1)] = sol2
self.angle = (-1) * angles[chg_sums == np.amin(chg_sums)][0]
self.final_flip = flips[chg_sums == np.amin(chg_sums)][0]
if self.final_flip:
xdpcf = np.fliplr(self.XCom)
else:
xdpcf = np.copy(self.XCom)
rho_dpc, phi_dpc = st.dpc.cart2pol(xdpcf, self.YCom)
self.XComC, self.YComC = st.dpc.pol2cart(
rho_dpc, (phi_dpc - (self.angle * ((np.pi) / 180)))
)
vm = (np.amax(np.abs(np.concatenate((self.XComC, self.YComC), axis=1)))) / (
10 ** 9
)
fontsize = int(0.9 * np.max(imsize))
sc_font = {"weight": "bold", "size": fontsize}
plt.figure(figsize=imsize)
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[0:15, 15:30])
ax3 = plt.subplot(gs[15:17, :])
ax1.imshow(self.XComC / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
at = mploff.AnchoredText(
"Corrected shift in X direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax1.add_artist(at)
ax1.axis("off")
ax2.imshow(self.YComC / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax2.add_artist(scalebar)
at = mploff.AnchoredText(
"Corrected shift in Y direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax2.add_artist(at)
ax2.axis("off")
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(-vm, vm, 1000)
ax3.imshow(sb, cmap="RdBu_r")
ax3.yaxis.set_visible(False)
x1 = np.linspace(0, 1000, 8)
ax3.set_xticks(x1)
ax3.set_xticklabels(np.round(np.linspace(-vm, vm, 8), 2))
for axis in ["top", "bottom", "left", "right"]:
ax3.spines[axis].set_linewidth(2)
ax3.spines[axis].set_color("black")
ax3.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax3.set_title(r"$\mathrm{Beam\: Shift\: \left(nm^{-1}\right)}$", **sc_font)
plt.tight_layout()
self.MomentumX = self.planck * self.XComC
self.MomentumY = self.planck * self.YComC
# assuming infinitely thin sample
self.e_fieldX = self.MomentumX / self.e_charge
self.e_fieldY = self.MomentumY / self.e_charge
def show_charge(self, imsize=(15, 17)):
"""
We calculate the charge from the corrected DPC
center of mass datasets. This is done through
Poisson's equation.
"""
fontsize = int(np.amax(np.asarray(imsize)))
# Use Poisson's equation
self.charge = (
(
(np.gradient(self.e_fieldX)[1] + np.gradient(self.e_fieldY)[0])
* (self.calib * (10 ** (-12)))
)
* self.epsilon0
* 4
* np.pi
)
cm = np.amax(np.abs(self.charge))
plt.figure(figsize=imsize)
fontsize = int(0.9 * np.max(imsize))
sc_font = {"weight": "bold", "size": fontsize}
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[15:17, :])
ax1.imshow(self.charge, vmin=-cm, vmax=cm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
ax1.axis("off")
at = mploff.AnchoredText(
"Charge from DPC", prop=dict(size=fontsize), frameon=True, loc="lower left"
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax1.add_artist(at)
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(cm / self.e_charge, -(cm / self.e_charge), 1000)
ax2.imshow(sb, cmap="RdBu_r")
ax2.yaxis.set_visible(False)
no_labels = 7
x1 = np.linspace(0, 1000, no_labels)
ax2.set_xticks(x1)
ax2.set_xticklabels(
np.round(
np.linspace(cm / self.e_charge, -(cm / self.e_charge), no_labels), 6
)
)
for axis in ["top", "bottom", "left", "right"]:
ax2.spines[axis].set_linewidth(2)
ax2.spines[axis].set_color("black")
ax2.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax2.set_title(r"$\mathrm{Charge\: Density\: \left(e^{-} \right)}$", **sc_font)
plt.tight_layout()
def show_potential(self, imsize=(15, 17)):
"""
Calculate the projected potential from the DPC measurements.
This is accomplished by calculating the phase shift iteratively
from the normalized center of mass shifts. Normalization means
calculating COM shifts in inverse length units and then multiplying
them with the electron wavelength to get an electron independent
mrad shift, which is used to generate the phase. This phase is
proportional to the projected potential for weak phase object
materials (with *lots* of assumptions)
"""
fontsize = int(np.amax(np.asarray(imsize)))
self.phase = st.dpc.integrate_dpc(
self.XComC * self.wavelength, self.YComC * self.wavelength
)
self.potential = self.phase / self.sigma
pm = np.amax(np.abs(self.potential)) * (10 ** 10)
plt.figure(figsize=imsize)
fontsize = int(0.9 * np.max(imsize))
sc_font = {"weight": "bold", "size": fontsize}
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[15:17, :])
ax1.imshow(self.potential * (10 ** 10), vmin=-pm, vmax=pm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
ax1.axis("off")
at = mploff.AnchoredText(
"Calculated projected potential from DPC phase",
prop=dict(size=fontsize),
frameon=True,
loc="lower left",
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax1.add_artist(at)
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(-pm, pm, 1000)
ax2.imshow(sb, cmap="RdBu_r")
ax2.yaxis.set_visible(False)
no_labels = 7
x1 = np.linspace(0, 1000, no_labels)
ax2.set_xticks(x1)
ax2.set_xticklabels(np.round(np.linspace(-pm, pm, no_labels), 6))
for axis in ["top", "bottom", "left", "right"]:
ax2.spines[axis].set_linewidth(2)
ax2.spines[axis].set_color("black")
ax2.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax2.set_title(r"Projected Potential (VÅ)", **sc_font)
plt.tight_layout()
def plot_color_dpc(self, start_frac=0, size_frac=1, skip=2, imsize=(20, 10)):
"""
Use this to plot the corrected DPC center of mass shifts. If no variables
are passed, the arrows are overlaid on the entire image.
Parameters
----------
start_frac: float, optional
The starting fraction of the image, where you will cut from
to show the overlaid arrows. Default is 0
stop_frac: float, optional
The ending fraction of the image, where you will cut from
to show the overlaid arrows. Default is 1
"""
fontsize = int(np.amax(np.asarray(imsize)))
sc_font = {"weight": "bold", "size": fontsize}
mpl.rc("font", **sc_font)
cc = self.XComC + ((1j) * self.YComC)
cc_color = st.util.cp_image_val(cc)
cutstart = (np.asarray(self.XComC.shape) * start_frac).astype(int)
cut_stop = (np.asarray(self.XComC.shape) * (start_frac + size_frac)).astype(int)
ypos, xpos = np.mgrid[0 : self.YComC.shape[0], 0 : self.XComC.shape[1]]
ypos = ypos
xcut = xpos[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
ycut = np.flipud(ypos[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]])
dx = self.XComC[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
dy = self.YComC[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
cc_cut = cc_color[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
overlay = mpl.patches.Rectangle(
cutstart[0:2],
cut_stop[0] - cutstart[0],
cut_stop[1] - cutstart[1],
linewidth=1.5,
edgecolor="w",
facecolor="none",
)
plt.figure(figsize=imsize)
plt.subplot(1, 2, 1)
plt.imshow(cc_color)
scalebar = mpss.ScaleBar(self.calib, "pm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
at = mploff.AnchoredText(
"Center of Mass Shift",
prop=dict(size=fontsize),
frameon=True,
loc="lower left",
)
at.patch.set_boxstyle("round, pad=0., rounding_size=0.2")
plt.gca().add_artist(at)
plt.gca().add_patch(overlay)
plt.subplot(1, 2, 2)
plt.imshow(cc_cut)
plt.quiver(
xcut[::skip, ::skip] - cutstart[1],
ycut[::skip, ::skip] - cutstart[0],
dx[::skip, ::skip],
dy[::skip, ::skip],
pivot="mid",
color="w",
)
scalebar = mpss.ScaleBar(self.calib, "pm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
plt.tight_layout()
| [
"stemtool.util.cp_image_val",
"matplotlib.rc",
"numpy.gradient",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.flip",
"numpy.multiply",
"stemtool.dpc.charge_dpc",
"stemtool.sim.wavelength_ang",
"numpy.asarray",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.empty"... | [((4868, 4903), 'numpy.sum', 'np.sum', (['self.data_4D'], {'axis': '(-1, -2)'}), '(self.data_4D, axis=(-1, -2))\n', (4874, 4903), True, 'import numpy as np\n'), ((4964, 4990), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (4974, 4990), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5019), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5010, 5019), True, 'import matplotlib.pyplot as plt\n'), ((5028, 5069), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.data_adf'], {'cmap': '"""inferno"""'}), "(self.data_adf, cmap='inferno')\n", (5038, 5069), True, 'import matplotlib.pyplot as plt\n'), ((5089, 5127), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (5102, 5127), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((5277, 5292), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5285, 5292), True, 'import matplotlib.pyplot as plt\n'), ((5526, 5546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5537, 5546), True, 'import matplotlib.pyplot as plt\n'), ((5555, 5595), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.data_bf'], {'cmap': '"""inferno"""'}), "(self.data_bf, cmap='inferno')\n", (5565, 5595), True, 'import matplotlib.pyplot as plt\n'), ((5615, 5653), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (5628, 5653), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((5803, 5818), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5811, 5818), True, 'import matplotlib.pyplot as plt\n'), ((6057, 6075), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6073, 6075), True, 'import matplotlib.pyplot as plt\n'), ((6663, 6697), 'numpy.mean', 'np.mean', (['self.data_4D'], {'axis': '(0, 1)'}), '(self.data_4D, axis=(0, 1))\n', (6670, 6697), True, 'import numpy as np\n'), ((6746, 6777), 'stemtool.util.sobel_circle', 'st.util.sobel_circle', (['self.cbed'], {}), '(self.cbed)\n', (6766, 6777), True, 'import stemtool as st\n'), ((7738, 7750), 'numpy.ravel', 'np.ravel', (['yy'], {}), '(yy)\n', (7746, 7750), True, 'import numpy as np\n'), ((7764, 7776), 'numpy.ravel', 'np.ravel', (['xx'], {}), '(xx)\n', (7772, 7776), True, 'import numpy as np\n'), ((7797, 7846), 'numpy.empty', 'np.empty', (['self.data_4D.shape[0:2]'], {'dtype': 'np.float'}), '(self.data_4D.shape[0:2], dtype=np.float)\n', (7805, 7846), True, 'import numpy as np\n'), ((7867, 7916), 'numpy.empty', 'np.empty', (['self.data_4D.shape[0:2]'], {'dtype': 'np.float'}), '(self.data_4D.shape[0:2], dtype=np.float)\n', (7875, 7916), True, 'import numpy as np\n'), ((8684, 8710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (8694, 8710), True, 'import matplotlib.pyplot as plt\n'), ((8724, 8759), 'matplotlib.gridspec.GridSpec', 'mpgs.GridSpec', (['imsize[1]', 'imsize[0]'], {}), '(imsize[1], imsize[0])\n', (8737, 8759), True, 'import matplotlib.gridspec as mpgs\n'), ((8774, 8801), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:15, 0:15]'], {}), '(gs[0:15, 0:15])\n', (8785, 8801), True, 'import matplotlib.pyplot as plt\n'), ((8816, 8844), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:15, 15:30]'], {}), '(gs[0:15, 15:30])\n', (8827, 8844), True, 'import matplotlib.pyplot as plt\n'), ((8859, 8884), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[15:17, :]'], {}), '(gs[15:17, :])\n', (8870, 8884), True, 'import matplotlib.pyplot as plt\n'), ((8981, 9019), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (8994, 9019), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((9544, 9582), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (9557, 9582), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((10025, 10061), 'numpy.zeros', 'np.zeros', (['(10, 1000)'], {'dtype': 'np.float'}), '((10, 1000), dtype=np.float)\n', (10033, 10061), True, 'import numpy as np\n'), ((10230, 10253), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(8)'], {}), '(0, 1000, 8)\n', (10241, 10253), True, 'import numpy as np\n'), ((10667, 10685), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10683, 10685), True, 'import matplotlib.pyplot as plt\n'), ((11263, 11286), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'bool'}), '(4, dtype=bool)\n', (11271, 11286), True, 'import numpy as np\n'), ((11332, 11366), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'self.XCom.dtype'}), '(4, dtype=self.XCom.dtype)\n', (11340, 11366), True, 'import numpy as np\n'), ((11384, 11418), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'self.YCom.dtype'}), '(4, dtype=self.YCom.dtype)\n', (11392, 11418), True, 'import numpy as np\n'), ((12482, 12515), 'stemtool.dpc.cart2pol', 'st.dpc.cart2pol', (['xdpcf', 'self.YCom'], {}), '(xdpcf, self.YCom)\n', (12497, 12515), True, 'import stemtool as st\n'), ((12549, 12611), 'stemtool.dpc.pol2cart', 'st.dpc.pol2cart', (['rho_dpc', '(phi_dpc - self.angle * (np.pi / 180))'], {}), '(rho_dpc, phi_dpc - self.angle * (np.pi / 180))\n', (12564, 12611), True, 'import stemtool as st\n'), ((12865, 12891), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (12875, 12891), True, 'import matplotlib.pyplot as plt\n'), ((12906, 12941), 'matplotlib.gridspec.GridSpec', 'mpgs.GridSpec', (['imsize[1]', 'imsize[0]'], {}), '(imsize[1], imsize[0])\n', (12919, 12941), True, 'import matplotlib.gridspec as mpgs\n'), ((12956, 12983), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:15, 0:15]'], {}), '(gs[0:15, 0:15])\n', (12967, 12983), True, 'import matplotlib.pyplot as plt\n'), ((12998, 13026), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:15, 15:30]'], {}), '(gs[0:15, 15:30])\n', (13009, 13026), True, 'import matplotlib.pyplot as plt\n'), ((13041, 13066), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[15:17, :]'], {}), '(gs[15:17, :])\n', (13052, 13066), True, 'import matplotlib.pyplot as plt\n'), ((13164, 13202), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (13177, 13202), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((13738, 13776), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (13751, 13776), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((14229, 14265), 'numpy.zeros', 'np.zeros', (['(10, 1000)'], {'dtype': 'np.float'}), '((10, 1000), dtype=np.float)\n', (14237, 14265), True, 'import numpy as np\n'), ((14434, 14457), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(8)'], {}), '(0, 1000, 8)\n', (14445, 14457), True, 'import numpy as np\n'), ((14871, 14889), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14887, 14889), True, 'import matplotlib.pyplot as plt\n'), ((15738, 15764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (15748, 15764), True, 'import matplotlib.pyplot as plt\n'), ((15879, 15914), 'matplotlib.gridspec.GridSpec', 'mpgs.GridSpec', (['imsize[1]', 'imsize[0]'], {}), '(imsize[1], imsize[0])\n', (15892, 15914), True, 'import matplotlib.gridspec as mpgs\n'), ((15929, 15956), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:15, 0:15]'], {}), '(gs[0:15, 0:15])\n', (15940, 15956), True, 'import matplotlib.pyplot as plt\n'), ((15971, 15996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[15:17, :]'], {}), '(gs[15:17, :])\n', (15982, 15996), True, 'import matplotlib.pyplot as plt\n'), ((16083, 16121), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (16096, 16121), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((16518, 16554), 'numpy.zeros', 'np.zeros', (['(10, 1000)'], {'dtype': 'np.float'}), '((10, 1000), dtype=np.float)\n', (16526, 16554), True, 'import numpy as np\n'), ((16779, 16810), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', 'no_labels'], {}), '(0, 1000, no_labels)\n', (16790, 16810), True, 'import numpy as np\n'), ((17322, 17340), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17338, 17340), True, 'import matplotlib.pyplot as plt\n'), ((18035, 18120), 'stemtool.dpc.integrate_dpc', 'st.dpc.integrate_dpc', (['(self.XComC * self.wavelength)', '(self.YComC * self.wavelength)'], {}), '(self.XComC * self.wavelength, self.YComC * self.wavelength\n )\n', (18055, 18120), True, 'import stemtool as st\n'), ((18254, 18280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (18264, 18280), True, 'import matplotlib.pyplot as plt\n'), ((18395, 18430), 'matplotlib.gridspec.GridSpec', 'mpgs.GridSpec', (['imsize[1]', 'imsize[0]'], {}), '(imsize[1], imsize[0])\n', (18408, 18430), True, 'import matplotlib.gridspec as mpgs\n'), ((18445, 18472), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:15, 0:15]'], {}), '(gs[0:15, 0:15])\n', (18456, 18472), True, 'import matplotlib.pyplot as plt\n'), ((18487, 18512), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[15:17, :]'], {}), '(gs[15:17, :])\n', (18498, 18512), True, 'import matplotlib.pyplot as plt\n'), ((18615, 18653), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['(self.calib / 1000)', '"""nm"""'], {}), "(self.calib / 1000, 'nm')\n", (18628, 18653), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((19117, 19153), 'numpy.zeros', 'np.zeros', (['(10, 1000)'], {'dtype': 'np.float'}), '((10, 1000), dtype=np.float)\n', (19125, 19153), True, 'import numpy as np\n'), ((19344, 19375), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', 'no_labels'], {}), '(0, 1000, no_labels)\n', (19355, 19375), True, 'import numpy as np\n'), ((19776, 19794), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19792, 19794), True, 'import matplotlib.pyplot as plt\n'), ((20557, 20582), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **sc_font)\n", (20563, 20582), True, 'import matplotlib as mpl\n'), ((20648, 20672), 'stemtool.util.cp_image_val', 'st.util.cp_image_val', (['cc'], {}), '(cc)\n', (20668, 20672), True, 'import stemtool as st\n'), ((21026, 21091), 'numpy.flipud', 'np.flipud', (['ypos[cutstart[0]:cut_stop[0], cutstart[1]:cut_stop[1]]'], {}), '(ypos[cutstart[0]:cut_stop[0], cutstart[1]:cut_stop[1]])\n', (21035, 21091), True, 'import numpy as np\n'), ((21351, 21493), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['cutstart[0:2]', '(cut_stop[0] - cutstart[0])', '(cut_stop[1] - cutstart[1])'], {'linewidth': '(1.5)', 'edgecolor': '"""w"""', 'facecolor': '"""none"""'}), "(cutstart[0:2], cut_stop[0] - cutstart[0], cut_stop[1] -\n cutstart[1], linewidth=1.5, edgecolor='w', facecolor='none')\n", (21372, 21493), True, 'import matplotlib as mpl\n'), ((21582, 21608), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (21592, 21608), True, 'import matplotlib.pyplot as plt\n'), ((21617, 21637), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (21628, 21637), True, 'import matplotlib.pyplot as plt\n'), ((21646, 21666), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cc_color'], {}), '(cc_color)\n', (21656, 21666), True, 'import matplotlib.pyplot as plt\n'), ((21686, 21717), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['self.calib', '"""pm"""'], {}), "(self.calib, 'pm')\n", (21699, 21717), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((21867, 21882), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (21875, 21882), True, 'import matplotlib.pyplot as plt\n'), ((22202, 22222), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (22213, 22222), True, 'import matplotlib.pyplot as plt\n'), ((22231, 22249), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cc_cut'], {}), '(cc_cut)\n', (22241, 22249), True, 'import matplotlib.pyplot as plt\n'), ((22258, 22413), 'matplotlib.pyplot.quiver', 'plt.quiver', (['(xcut[::skip, ::skip] - cutstart[1])', '(ycut[::skip, ::skip] - cutstart[0])', 'dx[::skip, ::skip]', 'dy[::skip, ::skip]'], {'pivot': '"""mid"""', 'color': '"""w"""'}), "(xcut[::skip, ::skip] - cutstart[1], ycut[::skip, ::skip] -\n cutstart[0], dx[::skip, ::skip], dy[::skip, ::skip], pivot='mid', color='w'\n )\n", (22268, 22413), True, 'import matplotlib.pyplot as plt\n'), ((22507, 22538), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['self.calib', '"""pm"""'], {}), "(self.calib, 'pm')\n", (22520, 22538), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((22688, 22703), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (22696, 22703), True, 'import matplotlib.pyplot as plt\n'), ((22712, 22730), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22728, 22730), True, 'import matplotlib.pyplot as plt\n'), ((3889, 3919), 'stemtool.sim.wavelength_ang', 'st.sim.wavelength_ang', (['voltage'], {}), '(voltage)\n', (3910, 3919), True, 'import stemtool as st\n'), ((6884, 6910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'imsize'}), '(figsize=imsize)\n', (6894, 6910), True, 'import matplotlib.pyplot as plt\n'), ((6923, 6960), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.cbed'], {'cmap': '"""inferno"""'}), "(self.cbed, cmap='inferno')\n", (6933, 6960), True, 'import matplotlib.pyplot as plt\n'), ((6984, 7045), 'matplotlib_scalebar.scalebar.ScaleBar', 'mpss.ScaleBar', (['self.inverse', '"""1/m"""', 'mpss.SI_LENGTH_RECIPROCAL'], {}), "(self.inverse, '1/m', mpss.SI_LENGTH_RECIPROCAL)\n", (6997, 7045), True, 'import matplotlib_scalebar.scalebar as mpss\n'), ((7215, 7230), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7223, 7230), True, 'import matplotlib.pyplot as plt\n'), ((10115, 10141), 'numpy.linspace', 'np.linspace', (['(-vm)', 'vm', '(1000)'], {}), '(-vm, vm, 1000)\n', (10126, 10141), True, 'import numpy as np\n'), ((11649, 11686), 'stemtool.dpc.cart2pol', 'st.dpc.cart2pol', (['self.XCom', 'self.YCom'], {}), '(self.XCom, self.YCom)\n', (11664, 11686), True, 'import stemtool as st\n'), ((11703, 11762), 'scipy.optimize.minimize', 'sio.minimize', (['st.dpc.angle_fun', 'x0'], {'args': '(rho_dpc, phi_dpc)'}), '(st.dpc.angle_fun, x0, args=(rho_dpc, phi_dpc))\n', (11715, 11762), True, 'import scipy.optimize as sio\n'), ((12381, 12401), 'numpy.fliplr', 'np.fliplr', (['self.XCom'], {}), '(self.XCom)\n', (12390, 12401), True, 'import numpy as np\n'), ((12436, 12454), 'numpy.copy', 'np.copy', (['self.XCom'], {}), '(self.XCom)\n', (12443, 12454), True, 'import numpy as np\n'), ((14319, 14345), 'numpy.linspace', 'np.linspace', (['(-vm)', 'vm', '(1000)'], {}), '(-vm, vm, 1000)\n', (14330, 14345), True, 'import numpy as np\n'), ((15709, 15728), 'numpy.abs', 'np.abs', (['self.charge'], {}), '(self.charge)\n', (15715, 15728), True, 'import numpy as np\n'), ((16608, 16668), 'numpy.linspace', 'np.linspace', (['(cm / self.e_charge)', '(-(cm / self.e_charge))', '(1000)'], {}), '(cm / self.e_charge, -(cm / self.e_charge), 1000)\n', (16619, 16668), True, 'import numpy as np\n'), ((19207, 19233), 'numpy.linspace', 'np.linspace', (['(-pm)', 'pm', '(1000)'], {}), '(-pm, pm, 1000)\n', (19218, 19233), True, 'import numpy as np\n'), ((4935, 4953), 'numpy.asarray', 'np.asarray', (['imsize'], {}), '(imsize)\n', (4945, 4953), True, 'import numpy as np\n'), ((5238, 5247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5245, 5247), True, 'import matplotlib.pyplot as plt\n'), ((5492, 5501), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5499, 5501), True, 'import matplotlib.pyplot as plt\n'), ((5764, 5773), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5771, 5773), True, 'import matplotlib.pyplot as plt\n'), ((6024, 6033), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6031, 6033), True, 'import matplotlib.pyplot as plt\n'), ((8374, 8392), 'numpy.mean', 'np.mean', (['self.YCom'], {}), '(self.YCom)\n', (8381, 8392), True, 'import numpy as np\n'), ((8429, 8447), 'numpy.mean', 'np.mean', (['self.XCom'], {}), '(self.XCom)\n', (8436, 8447), True, 'import numpy as np\n'), ((10318, 10341), 'numpy.linspace', 'np.linspace', (['(-vm)', 'vm', '(8)'], {}), '(-vm, vm, 8)\n', (10329, 10341), True, 'import numpy as np\n'), ((11547, 11565), 'numpy.flip', 'np.flip', (['self.XCom'], {}), '(self.XCom)\n', (11554, 11565), True, 'import numpy as np\n'), ((12785, 12799), 'numpy.max', 'np.max', (['imsize'], {}), '(imsize)\n', (12791, 12799), True, 'import numpy as np\n'), ((14522, 14545), 'numpy.linspace', 'np.linspace', (['(-vm)', 'vm', '(8)'], {}), '(-vm, vm, 8)\n', (14533, 14545), True, 'import numpy as np\n'), ((15380, 15398), 'numpy.asarray', 'np.asarray', (['imsize'], {}), '(imsize)\n', (15390, 15398), True, 'import numpy as np\n'), ((15794, 15808), 'numpy.max', 'np.max', (['imsize'], {}), '(imsize)\n', (15800, 15808), True, 'import numpy as np\n'), ((16905, 16970), 'numpy.linspace', 'np.linspace', (['(cm / self.e_charge)', '(-(cm / self.e_charge))', 'no_labels'], {}), '(cm / self.e_charge, -(cm / self.e_charge), no_labels)\n', (16916, 16970), True, 'import numpy as np\n'), ((17993, 18011), 'numpy.asarray', 'np.asarray', (['imsize'], {}), '(imsize)\n', (18003, 18011), True, 'import numpy as np\n'), ((18209, 18231), 'numpy.abs', 'np.abs', (['self.potential'], {}), '(self.potential)\n', (18215, 18231), True, 'import numpy as np\n'), ((18310, 18324), 'numpy.max', 'np.max', (['imsize'], {}), '(imsize)\n', (18316, 18324), True, 'import numpy as np\n'), ((19440, 19471), 'numpy.linspace', 'np.linspace', (['(-pm)', 'pm', 'no_labels'], {}), '(-pm, pm, no_labels)\n', (19451, 19471), True, 'import numpy as np\n'), ((20473, 20491), 'numpy.asarray', 'np.asarray', (['imsize'], {}), '(imsize)\n', (20483, 20491), True, 'import numpy as np\n'), ((21828, 21837), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21835, 21837), True, 'import matplotlib.pyplot as plt\n'), ((22131, 22140), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22138, 22140), True, 'import matplotlib.pyplot as plt\n'), ((22164, 22173), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22171, 22173), True, 'import matplotlib.pyplot as plt\n'), ((22649, 22658), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22656, 22658), True, 'import matplotlib.pyplot as plt\n'), ((7172, 7181), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7179, 7181), True, 'import matplotlib.pyplot as plt\n'), ((8478, 8524), 'numpy.concatenate', 'np.concatenate', (['(self.XCom, self.YCom)'], {'axis': '(1)'}), '((self.XCom, self.YCom), axis=1)\n', (8492, 8524), True, 'import numpy as np\n'), ((8599, 8617), 'numpy.asarray', 'np.asarray', (['imsize'], {}), '(imsize)\n', (8609, 8617), True, 'import numpy as np\n'), ((11907, 11948), 'stemtool.dpc.charge_dpc', 'st.dpc.charge_dpc', (['xdpcf', 'self.YCom', 'sol1'], {}), '(xdpcf, self.YCom, sol1)\n', (11924, 11948), True, 'import stemtool as st\n'), ((12043, 12084), 'stemtool.dpc.charge_dpc', 'st.dpc.charge_dpc', (['xdpcf', 'self.YCom', 'sol2'], {}), '(xdpcf, self.YCom, sol2)\n', (12060, 12084), True, 'import stemtool as st\n'), ((12310, 12327), 'numpy.amin', 'np.amin', (['chg_sums'], {}), '(chg_sums)\n', (12317, 12327), True, 'import numpy as np\n'), ((12670, 12718), 'numpy.concatenate', 'np.concatenate', (['(self.XComC, self.YComC)'], {'axis': '(1)'}), '((self.XComC, self.YComC), axis=1)\n', (12684, 12718), True, 'import numpy as np\n'), ((20693, 20721), 'numpy.asarray', 'np.asarray', (['self.XComC.shape'], {}), '(self.XComC.shape)\n', (20703, 20721), True, 'import numpy as np\n'), ((20768, 20796), 'numpy.asarray', 'np.asarray', (['self.XComC.shape'], {}), '(self.XComC.shape)\n', (20778, 20796), True, 'import numpy as np\n'), ((8117, 8132), 'numpy.sum', 'np.sum', (['pattern'], {}), '(pattern)\n', (8123, 8132), True, 'import numpy as np\n'), ((8271, 8286), 'numpy.sum', 'np.sum', (['pattern'], {}), '(pattern)\n', (8277, 8286), True, 'import numpy as np\n'), ((12244, 12261), 'numpy.amin', 'np.amin', (['chg_sums'], {}), '(chg_sums)\n', (12251, 12261), True, 'import numpy as np\n'), ((8089, 8113), 'numpy.multiply', 'np.multiply', (['qq', 'pattern'], {}), '(qq, pattern)\n', (8100, 8113), True, 'import numpy as np\n'), ((8243, 8267), 'numpy.multiply', 'np.multiply', (['pp', 'pattern'], {}), '(pp, pattern)\n', (8254, 8267), True, 'import numpy as np\n'), ((15490, 15516), 'numpy.gradient', 'np.gradient', (['self.e_fieldX'], {}), '(self.e_fieldX)\n', (15501, 15516), True, 'import numpy as np\n'), ((15522, 15548), 'numpy.gradient', 'np.gradient', (['self.e_fieldY'], {}), '(self.e_fieldY)\n', (15533, 15548), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.model import create_model, load_model
from utils.image import get_affine_transform
from utils.debugger import Debugger
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
#x1、y1、x2、y2、以及score赋值
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
#每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#按照score置信度降序排序
order = scores.argsort()[::-1]
keep = [] #保留的结果框集合
while order.size > 0:
i = order[0]
keep.append(i) #保留该类剩余box中得分最高的一个
#得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
#计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] #因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
class BaseDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.max_per_image = 100
self.num_classes = opt.num_classes
self.scales = opt.test_scales
self.opt = opt
self.pause = True
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
return images, meta
def process(self, images, return_time=False):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def run(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
theme=self.opt.debugger_theme)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type (''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
for scale in self.scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
# import pdb; pdb.set_trace()
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images, return_time=True)
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
# print('--->>> base_detector run show_results')
# img_ = self.show_results(debugger, image, results)
debugger.add_img(image, img_id='multi_pose')
#---------------------------------------------------------------- NMS
nms_dets_ = []
for bbox in results[1]:
if bbox[4] > self.opt.vis_thresh:
nms_dets_.append((bbox[0], bbox[1],bbox[2], bbox[3], bbox[4]))
if len(nms_dets_)>0:
keep_ = py_cpu_nms(np.array(nms_dets_),thresh=0.35)
# print('keep_ : ',nms_dets_,keep_)
#----------------------------------------------------------------
faces_boxes = []
person_boxes = []
idx = 0
for bbox in results[1]:
if bbox[4] > self.opt.vis_thresh:
idx += 1
if (idx-1) not in keep_:
continue
# 绘制目标物体
# print('------------------>>>add_coco_bbox')
debugger.add_coco_bbox(bbox[:4], 0, bbox[4], img_id='multi_pose')
face_pts = debugger.add_coco_hp(bbox[5:39], img_id='multi_pose')
# print('--------------------------------->>>>>>>>>>oou')
if len(face_pts)==5:
# print('change box')
person_boxes.append([int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]),bbox[4]])
x_min = min([face_pts[i][0] for i in range(len(face_pts))])
y_min = min([face_pts[i][1] for i in range(len(face_pts))])
x_max = max([face_pts[i][0] for i in range(len(face_pts))])
y_max = max([face_pts[i][1] for i in range(len(face_pts))])
edge = abs(x_max-x_min)
#
bbox_x1 = int(max(0,(x_min-edge*0.05)))
bbox_x2 = int(min(image.shape[1]-1,(x_max+edge*0.05)))
bbox_y1 = int(max(0,(y_min-edge*0.32)))
bbox_y2 = int(min(image.shape[0]-1,(y_max+edge*0.55)))
# print('ppppp',face_pts,x1)
# if ((bbox_x2-bbox_x1)*(bbox_y2-bbox_y1))>100:
faces_boxes.append([bbox_x1,bbox_y1,bbox_x2,bbox_y2,1.])
# cv2.rectangle(image,(bbox_x1,bbox_y1),(bbox_x2,bbox_y2),(0,255,255),2)
# print('-------->>> show_results debugger')
img_ = debugger.show_all_imgs(pause=self.pause)
return img_,{'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time},faces_boxes,person_boxes
| [
"cv2.warpAffine",
"cv2.imread",
"numpy.minimum",
"cv2.resize",
"torch.device",
"numpy.where",
"models.model.create_model",
"torch.from_numpy",
"torch.cuda.synchronize",
"models.model.load_model",
"numpy.array",
"numpy.concatenate",
"utils.image.get_affine_transform",
"numpy.maximum",
"ti... | [((787, 819), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (797, 819), True, 'import numpy as np\n'), ((834, 866), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (844, 866), True, 'import numpy as np\n'), ((881, 913), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (891, 913), True, 'import numpy as np\n'), ((928, 960), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (938, 960), True, 'import numpy as np\n'), ((1000, 1030), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1010, 1030), True, 'import numpy as np\n'), ((1043, 1073), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1053, 1073), True, 'import numpy as np\n'), ((1562, 1610), 'models.model.create_model', 'create_model', (['opt.arch', 'opt.heads', 'opt.head_conv'], {}), '(opt.arch, opt.heads, opt.head_conv)\n', (1574, 1610), False, 'from models.model import create_model, load_model\n'), ((1628, 1666), 'models.model.load_model', 'load_model', (['self.model', 'opt.load_model'], {}), '(self.model, opt.load_model)\n', (1638, 1666), False, 'from models.model import create_model, load_model\n'), ((2635, 2689), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', '[inp_width, inp_height]'], {}), '(c, s, 0, [inp_width, inp_height])\n', (2655, 2689), False, 'from utils.image import get_affine_transform\n'), ((2710, 2752), 'cv2.resize', 'cv2.resize', (['image', '(new_width, new_height)'], {}), '(image, (new_width, new_height))\n', (2720, 2752), False, 'import cv2\n'), ((2769, 2865), 'cv2.warpAffine', 'cv2.warpAffine', (['resized_image', 'trans_input', '(inp_width, inp_height)'], {'flags': 'cv2.INTER_LINEAR'}), '(resized_image, trans_input, (inp_width, inp_height), flags=\n cv2.INTER_LINEAR)\n', (2783, 2865), False, 'import cv2\n'), ((3144, 3168), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (3160, 3168), False, 'import torch\n'), ((3913, 4010), 'utils.debugger.Debugger', 'Debugger', ([], {'dataset': 'self.opt.dataset', 'ipynb': '(self.opt.debug == 3)', 'theme': 'self.opt.debugger_theme'}), '(dataset=self.opt.dataset, ipynb=self.opt.debug == 3, theme=self.\n opt.debugger_theme)\n', (3921, 4010), False, 'from utils.debugger import Debugger\n'), ((4047, 4058), 'time.time', 'time.time', ([], {}), '()\n', (4056, 4058), False, 'import time\n'), ((4449, 4460), 'time.time', 'time.time', ([], {}), '()\n', (4458, 4460), False, 'import time\n'), ((5637, 5661), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5659, 5661), False, 'import torch\n'), ((5677, 5688), 'time.time', 'time.time', ([], {}), '()\n', (5686, 5688), False, 'import time\n'), ((1230, 1253), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1238, 1253), True, 'import numpy as np\n'), ((1443, 1463), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1455, 1463), False, 'import torch\n'), ((1493, 1512), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1505, 1512), False, 'import torch\n'), ((2275, 2338), 'numpy.array', 'np.array', (['[new_width / 2.0, new_height / 2.0]'], {'dtype': 'np.float32'}), '([new_width / 2.0, new_height / 2.0], dtype=np.float32)\n', (2283, 2338), True, 'import numpy as np\n'), ((2492, 2553), 'numpy.array', 'np.array', (['[new_width // 2, new_height // 2]'], {'dtype': 'np.float32'}), '([new_width // 2, new_height // 2], dtype=np.float32)\n', (2500, 2553), True, 'import numpy as np\n'), ((2564, 2615), 'numpy.array', 'np.array', (['[inp_width, inp_height]'], {'dtype': 'np.float32'}), '([inp_width, inp_height], dtype=np.float32)\n', (2572, 2615), True, 'import numpy as np\n'), ((3075, 3130), 'numpy.concatenate', 'np.concatenate', (['(images, images[:, :, :, ::-1])'], {'axis': '(0)'}), '((images, images[:, :, :, ::-1]), axis=0)\n', (3089, 3130), True, 'import numpy as np\n'), ((4581, 4592), 'time.time', 'time.time', ([], {}), '()\n', (4590, 4592), False, 'import time\n'), ((4946, 4970), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4968, 4970), False, 'import torch\n'), ((4996, 5007), 'time.time', 'time.time', ([], {}), '()\n', (5005, 5007), False, 'import time\n'), ((5144, 5168), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5166, 5168), False, 'import torch\n'), ((5239, 5250), 'time.time', 'time.time', ([], {}), '()\n', (5248, 5250), False, 'import time\n'), ((5442, 5466), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5464, 5466), False, 'import torch\n'), ((5493, 5504), 'time.time', 'time.time', ([], {}), '()\n', (5502, 5504), False, 'import time\n'), ((1749, 1785), 'numpy.array', 'np.array', (['opt.mean'], {'dtype': 'np.float32'}), '(opt.mean, dtype=np.float32)\n', (1757, 1785), True, 'import numpy as np\n'), ((1818, 1853), 'numpy.array', 'np.array', (['opt.std'], {'dtype': 'np.float32'}), '(opt.std, dtype=np.float32)\n', (1826, 1853), True, 'import numpy as np\n'), ((4246, 4281), 'cv2.imread', 'cv2.imread', (['image_or_path_or_tensor'], {}), '(image_or_path_or_tensor)\n', (4256, 4281), False, 'import cv2\n'), ((6268, 6287), 'numpy.array', 'np.array', (['nms_dets_'], {}), '(nms_dets_)\n', (6276, 6287), True, 'import numpy as np\n')] |
import os
import unittest
from itertools import chain
import casex
import numpy as np
import scipy.stats as ss
from seedpod_ground_risk.core.plot_server import PlotServer
from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans
from seedpod_ground_risk.layers.strike_risk_layer import wrap_pipeline_cuda, wrap_all_pipeline
from seedpod_ground_risk.layers.temporal_population_estimate_layer import TemporalPopulationEstimateLayer
from seedpod_ground_risk.path_analysis.descent_models.ballistic_model import BallisticModel
from seedpod_ground_risk.path_analysis.descent_models.glide_model import GlideDescentModel
from seedpod_ground_risk.path_analysis.harm_models.fatality_model import FatalityModel
from seedpod_ground_risk.path_analysis.harm_models.strike_model import StrikeModel
from seedpod_ground_risk.path_analysis.utils import velocity_to_kinetic_energy, bearing_to_angle
def offset_window_row(arr, shape, offset):
y, x = shape
off_y, off_x = offset
for j in range(y):
start_y = off_y - j
end_y = start_y + y
# row_windows = []
# app = row_windows.append
for i in range(x):
start_x = off_x - i
end_x = start_x + x
yield arr[start_y:end_y, start_x:end_x]
# app(arr[start_y:end_y, start_x:end_x])
# yield row_windows # Dont return np array here, as it gets copied to contiguous memory and OOMs
class FullRiskMapTestCase(unittest.TestCase):
###
# This can take upwards of 10mins to run
###
def setUp(self) -> None:
super().setUp()
self.hour = 17
self.serialise = False
self.test_bound_coords = [-1.5, 50.87, -1.3, 51]
# self.test_bound_coords = [-1.55, 50.745, -1.3, 51]
self.resolution = 30
self.test_bounds = make_bounds_polygon((self.test_bound_coords[0], self.test_bound_coords[2]),
(self.test_bound_coords[1], self.test_bound_coords[3]))
self._setup_aircraft()
os.chdir(
os.sep.join((
os.path.dirname(os.path.realpath(__file__)),
'..', '..'))
)
ps = PlotServer()
ps.set_time(self.hour)
self.raster_shape = ps._get_raster_dimensions(self.test_bounds, self.resolution)
ps.data_layers = [TemporalPopulationEstimateLayer('tpe')]
[layer.preload_data() for layer in chain(ps.data_layers, ps.annotation_layers)]
ps.generate_layers(self.test_bounds, self.raster_shape)
self.raster_grid = np.flipud(np.sum(
[remove_raster_nans(res[1]) for res in ps._generated_data_layers.values() if
res[1] is not None],
axis=0))
self.raster_shape = self.raster_grid.shape
del ps
# self.path_coords = list(gpd.read_file('path.geojson').iloc[0].geometry.coords)
def test_full_risk_map(self):
bm = BallisticModel(self.aircraft)
gm = GlideDescentModel(self.aircraft)
fm = FatalityModel(0.3, 1e6, 34)
ac_mass = self.aircraft.mass
x, y = np.mgrid[0:self.raster_shape[0], 0:self.raster_shape[1]]
eval_grid = np.vstack((x.ravel(), y.ravel())).T
samples = 5000
# Conjure up our distributions for various things
alt = ss.norm(self.alt, 5).rvs(samples)
vel = ss.norm(self.vel, 2.5).rvs(samples)
wind_vels = ss.norm(self.wind_vel, 1).rvs(samples)
wind_dirs = bearing_to_angle(ss.norm(self.wind_dir, np.deg2rad(5)).rvs(samples))
wind_vel_y = wind_vels * np.sin(wind_dirs)
wind_vel_x = wind_vels * np.cos(wind_dirs)
(bm_mean, bm_cov), v_ib, a_ib = bm.transform(alt, vel,
ss.uniform(0, 360).rvs(samples),
wind_vel_y, wind_vel_x,
0, 0)
(gm_mean, gm_cov), v_ig, a_ig = gm.transform(alt, vel,
ss.uniform(0, 360).rvs(samples),
wind_vel_y, wind_vel_x,
0, 0)
sm_b = StrikeModel(self.raster_grid, self.resolution ** 2, self.aircraft.width, a_ib)
sm_g = StrikeModel(self.raster_grid, self.resolution ** 2, self.aircraft.width, a_ig)
premult = sm_b.premult_mat + sm_g.premult_mat
offset_y, offset_x = self.raster_shape[0] // 2, self.raster_shape[1] // 2
bm_pdf = ss.multivariate_normal(bm_mean + np.array([offset_y, offset_x]), bm_cov).pdf(eval_grid)
gm_pdf = ss.multivariate_normal(gm_mean + np.array([offset_y, offset_x]), gm_cov).pdf(eval_grid)
pdf = bm_pdf + gm_pdf
pdf = pdf.reshape(self.raster_shape)
padded_pdf = np.zeros(((self.raster_shape[0] * 3) + 1, (self.raster_shape[1] * 3) + 1))
padded_pdf[self.raster_shape[0]:self.raster_shape[0] * 2, self.raster_shape[1]:self.raster_shape[1] * 2] = pdf
padded_pdf = padded_pdf * self.event_prob
padded_centre_y, padded_centre_x = self.raster_shape[0] + offset_y, self.raster_shape[1] + offset_x
impact_ke_b = velocity_to_kinetic_energy(ac_mass, v_ib)
impact_ke_g = velocity_to_kinetic_energy(ac_mass, v_ig)
# Check if CUDA toolkit available through env var otherwise fallback to CPU bound numba version
if not os.getenv('CUDA_HOME'):
print('CUDA NOT found, falling back to Numba JITed CPU code')
# Leaving parallelisation to Numba seems to be faster
res = wrap_all_pipeline(self.raster_shape, padded_pdf, padded_centre_y, padded_centre_x, premult)
else:
res = np.zeros(self.raster_shape, dtype=float)
threads_per_block = (32, 32) # 1024 max per block
blocks_per_grid = (
int(np.ceil(self.raster_shape[1] / threads_per_block[1])),
int(np.ceil(self.raster_shape[0] / threads_per_block[0]))
)
print('CUDA found, using config <<<' + str(blocks_per_grid) + ',' + str(threads_per_block) + '>>>')
wrap_pipeline_cuda[blocks_per_grid, threads_per_block](self.raster_shape, padded_pdf, padded_centre_y,
padded_centre_x, premult, res)
# Alternative joblib parallelisation
# res = jl.Parallel(n_jobs=-1, prefer='threads', verbose=1)(
# jl.delayed(wrap_row_pipeline)(c, self.raster_shape, padded_pdf, (padded_centre_y, padded_centre_x), sm)
# for c in range(self.raster_shape[0]))
strike_pdf = res
# snapped_points = [snap_coords_to_grid(self.raster_indices, *coords) for coords in self.path_coords]
import matplotlib.pyplot as mpl
import matplotlib.colors as mc
fig1, ax1 = mpl.subplots(1, 1)
m1 = ax1.matshow(self.raster_grid, norm=mc.LogNorm())
fig1.colorbar(m1, label='Population Density [people/km$^2$]')
ax1.set_title(f'Population Density at t={self.hour}')
ax1.set_xticks([0, self.raster_shape[1] - 1])
ax1.set_yticks([0, self.raster_shape[0] - 1])
ax1.set_xticklabels([self.test_bound_coords[0], self.test_bound_coords[2]], )
ax1.set_yticklabels([self.test_bound_coords[3], self.test_bound_coords[1]], )
fig1.tight_layout()
fig1.savefig(f'figs/tpe_t{self.hour}.png', bbox_inches='tight')
fig1.show()
if self.serialise:
np.savetxt(f'strike_map_t{self.hour}', strike_pdf, delimiter=',')
fig2, ax2 = mpl.subplots(1, 1)
m2 = ax2.matshow(strike_pdf)
fig2.colorbar(m2, label='Strike Risk [h$^{-1}$]')
ax2.set_title(f'Strike Risk Map at t={self.hour}')
ax2.set_xticks([0, self.raster_shape[1] - 1])
ax2.set_yticks([0, self.raster_shape[0] - 1])
ax2.set_xticklabels([self.test_bound_coords[0], self.test_bound_coords[2]], )
ax2.set_yticklabels([self.test_bound_coords[3], self.test_bound_coords[1]], )
fig2.tight_layout()
fig2.savefig(f'figs/risk_strike_t{self.hour}.png', bbox_inches='tight')
fig2.show()
fatality_pdf = fm.transform(strike_pdf, impact_ke=impact_ke_g) + fm.transform(strike_pdf, impact_ke=impact_ke_b)
if self.serialise:
np.savetxt(f'fatality_map_t{self.hour}', fatality_pdf, delimiter=',')
fig3, ax3 = mpl.subplots(1, 1)
m3 = ax3.matshow(fatality_pdf)
fig3.colorbar(m3, label='Fatality Risk [h$^{-1}$]')
ax3.set_title(f'Fatality Risk Map at t={self.hour}')
ax3.set_xticks([0, self.raster_shape[1] - 1])
ax3.set_yticks([0, self.raster_shape[0] - 1])
ax3.set_xticklabels([self.test_bound_coords[0], self.test_bound_coords[2]], )
ax3.set_yticklabels([self.test_bound_coords[3], self.test_bound_coords[1]], )
fig3.tight_layout()
fig3.savefig(f'figs/risk_fatality_t{self.hour}.png', bbox_inches='tight')
fig3.show()
import rasterio
from rasterio import transform
trans = transform.from_bounds(*self.test_bound_coords, *self.raster_shape)
rds = rasterio.open(f'tiffs/fatality_risk_h{self.hour}.tif', 'w', driver='GTiff', count=1,
dtype=rasterio.float64,
crs='EPSG:4326', transform=trans, compress='lzw',
width=self.raster_shape[0], height=self.raster_shape[1])
rds.write(fatality_pdf, 1)
rds.close()
def _setup_aircraft(self, ac_width: float = 2.22, ac_length: float = 1.63,
ac_mass: float = 17, ac_glide_ratio: float = 11, ac_glide_speed: float = 21,
ac_glide_drag_coeff: float = 0.1, ac_ballistic_drag_coeff: float = 0.8,
ac_ballistic_frontal_area: float = 0.5, ac_failure_prob: float = 5e-3, alt: float = 100,
vel: float = 31,
wind_vel: float = 5, wind_dir: float = 45):
self.aircraft = casex.AircraftSpecs(casex.enums.AircraftType.FIXED_WING, ac_width, ac_length, ac_mass)
self.aircraft.set_ballistic_drag_coefficient(ac_ballistic_drag_coeff)
self.aircraft.set_ballistic_frontal_area(ac_ballistic_frontal_area)
self.aircraft.set_glide_speed_ratio(ac_glide_speed, ac_glide_ratio)
self.aircraft.set_glide_drag_coefficient(ac_glide_drag_coeff)
self.alt = alt
self.vel = vel
self.wind_vel = wind_vel
self.wind_dir = np.deg2rad((wind_dir - 90) % 360)
self.event_prob = ac_failure_prob
def plot_path_risk(hour):
import matplotlib.pyplot as mpl
import shapely.geometry as sg
import numpy as np
import geopandas as gpd
# import os
# os.chdir(
# os.sep.join((
# os.path.dirname(os.path.realpath(__file__)),
# '..', '..'))
# )
path = np.genfromtxt('fr_map_path.csv', delimiter=',').astype(int)
raster_indices = dict(Longitude=np.genfromtxt('raster_indices_lon.csv', delimiter=','),
Latitude=np.genfromtxt('raster_indices_lat.csv', delimiter=','))
lat = raster_indices['Latitude'][path[:, 1]]
lon = raster_indices['Longitude'][path[:, 0]]
ls = sg.LineString([sg.Point(lon, lat) for lon, lat in zip(lon, lat)])
df = gpd.GeoDataFrame(geometry=[ls]).set_crs('EPSG:4326')
fatality_pdf = np.genfromtxt(f'fatality_map_t{hour}', delimiter=',')
strike_pdf = np.genfromtxt(f'strike_map_t{hour}', delimiter=',')
fig3, ax3 = mpl.subplots(1, 1)
ax3.tick_params(left=False, right=False,
bottom=False, top=False,
labelleft=False, labelbottom=False)
m3 = ax3.matshow(fatality_pdf)
ax3.plot(path[:, 0], path[:, 1], 'r')
fig3.colorbar(m3, label='Fatality Risk [h$^{-1}$]')
ax3.set_title(f'Fatality Risk Map at t={hour}')
fig3.show()
pathwise_strike_maxs = strike_pdf[path[:, 1], path[:, 0]]
pathwise_fatality_maxs = fatality_pdf[path[:, 1], path[:, 0]]
fig, ax = mpl.subplots(1, 1)
path_dist = df.to_crs('EPSG:27700').iloc[0].geometry.length
ax.set_yscale('log')
x = np.linspace(0, path_dist, len(pathwise_fatality_maxs))
ax.axhline(y=np.mean(pathwise_fatality_maxs), c='y',
label='Fatality Mean') # This seems to be as stable as fsum
ax.plot(x, pathwise_fatality_maxs, c='r', label='Fatality Risk')
ax.axhline(y=np.mean(pathwise_strike_maxs), c='g',
label='Strike Mean') # This seems to be as stable as fsum
ax.plot(x, pathwise_strike_maxs, c='b', label='Strike Risk')
ax.legend()
ax.set_ylabel('Risk [$h^{-1}$]')
ax.set_xlabel('Path Distance [m]')
ax.set_title(f'Casualty Risk along path at t={hour}')
fig.show()
if __name__ == '__main__':
unittest.main()
| [
"itertools.chain",
"seedpod_ground_risk.path_analysis.descent_models.ballistic_model.BallisticModel",
"seedpod_ground_risk.path_analysis.harm_models.strike_model.StrikeModel",
"rasterio.transform.from_bounds",
"shapely.geometry.Point",
"seedpod_ground_risk.path_analysis.descent_models.glide_model.GlideDes... | [((11736, 11789), 'numpy.genfromtxt', 'np.genfromtxt', (['f"""fatality_map_t{hour}"""'], {'delimiter': '""","""'}), "(f'fatality_map_t{hour}', delimiter=',')\n", (11749, 11789), True, 'import numpy as np\n'), ((11808, 11859), 'numpy.genfromtxt', 'np.genfromtxt', (['f"""strike_map_t{hour}"""'], {'delimiter': '""","""'}), "(f'strike_map_t{hour}', delimiter=',')\n", (11821, 11859), True, 'import numpy as np\n'), ((11877, 11895), 'matplotlib.pyplot.subplots', 'mpl.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (11889, 11895), True, 'import matplotlib.pyplot as mpl\n'), ((12396, 12414), 'matplotlib.pyplot.subplots', 'mpl.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (12408, 12414), True, 'import matplotlib.pyplot as mpl\n'), ((13179, 13194), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13192, 13194), False, 'import unittest\n'), ((1878, 2013), 'seedpod_ground_risk.core.utils.make_bounds_polygon', 'make_bounds_polygon', (['(self.test_bound_coords[0], self.test_bound_coords[2])', '(self.test_bound_coords[1], self.test_bound_coords[3])'], {}), '((self.test_bound_coords[0], self.test_bound_coords[2]),\n (self.test_bound_coords[1], self.test_bound_coords[3]))\n', (1897, 2013), False, 'from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans\n'), ((2259, 2271), 'seedpod_ground_risk.core.plot_server.PlotServer', 'PlotServer', ([], {}), '()\n', (2269, 2271), False, 'from seedpod_ground_risk.core.plot_server import PlotServer\n'), ((3023, 3052), 'seedpod_ground_risk.path_analysis.descent_models.ballistic_model.BallisticModel', 'BallisticModel', (['self.aircraft'], {}), '(self.aircraft)\n', (3037, 3052), False, 'from seedpod_ground_risk.path_analysis.descent_models.ballistic_model import BallisticModel\n'), ((3067, 3099), 'seedpod_ground_risk.path_analysis.descent_models.glide_model.GlideDescentModel', 'GlideDescentModel', (['self.aircraft'], {}), '(self.aircraft)\n', (3084, 3099), False, 'from seedpod_ground_risk.path_analysis.descent_models.glide_model import GlideDescentModel\n'), ((3114, 3147), 'seedpod_ground_risk.path_analysis.harm_models.fatality_model.FatalityModel', 'FatalityModel', (['(0.3)', '(1000000.0)', '(34)'], {}), '(0.3, 1000000.0, 34)\n', (3127, 3147), False, 'from seedpod_ground_risk.path_analysis.harm_models.fatality_model import FatalityModel\n'), ((4347, 4425), 'seedpod_ground_risk.path_analysis.harm_models.strike_model.StrikeModel', 'StrikeModel', (['self.raster_grid', '(self.resolution ** 2)', 'self.aircraft.width', 'a_ib'], {}), '(self.raster_grid, self.resolution ** 2, self.aircraft.width, a_ib)\n', (4358, 4425), False, 'from seedpod_ground_risk.path_analysis.harm_models.strike_model import StrikeModel\n'), ((4442, 4520), 'seedpod_ground_risk.path_analysis.harm_models.strike_model.StrikeModel', 'StrikeModel', (['self.raster_grid', '(self.resolution ** 2)', 'self.aircraft.width', 'a_ig'], {}), '(self.raster_grid, self.resolution ** 2, self.aircraft.width, a_ig)\n', (4453, 4520), False, 'from seedpod_ground_risk.path_analysis.harm_models.strike_model import StrikeModel\n'), ((4974, 5044), 'numpy.zeros', 'np.zeros', (['(self.raster_shape[0] * 3 + 1, self.raster_shape[1] * 3 + 1)'], {}), '((self.raster_shape[0] * 3 + 1, self.raster_shape[1] * 3 + 1))\n', (4982, 5044), True, 'import numpy as np\n'), ((5352, 5393), 'seedpod_ground_risk.path_analysis.utils.velocity_to_kinetic_energy', 'velocity_to_kinetic_energy', (['ac_mass', 'v_ib'], {}), '(ac_mass, v_ib)\n', (5378, 5393), False, 'from seedpod_ground_risk.path_analysis.utils import velocity_to_kinetic_energy, bearing_to_angle\n'), ((5417, 5458), 'seedpod_ground_risk.path_analysis.utils.velocity_to_kinetic_energy', 'velocity_to_kinetic_energy', (['ac_mass', 'v_ig'], {}), '(ac_mass, v_ig)\n', (5443, 5458), False, 'from seedpod_ground_risk.path_analysis.utils import velocity_to_kinetic_energy, bearing_to_angle\n'), ((7062, 7080), 'matplotlib.pyplot.subplots', 'mpl.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (7074, 7080), True, 'import matplotlib.pyplot as mpl\n'), ((7817, 7835), 'matplotlib.pyplot.subplots', 'mpl.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (7829, 7835), True, 'import matplotlib.pyplot as mpl\n'), ((8666, 8684), 'matplotlib.pyplot.subplots', 'mpl.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (8678, 8684), True, 'import matplotlib.pyplot as mpl\n'), ((9349, 9415), 'rasterio.transform.from_bounds', 'transform.from_bounds', (['*self.test_bound_coords', '*self.raster_shape'], {}), '(*self.test_bound_coords, *self.raster_shape)\n', (9370, 9415), False, 'from rasterio import transform\n'), ((9431, 9654), 'rasterio.open', 'rasterio.open', (['f"""tiffs/fatality_risk_h{self.hour}.tif"""', '"""w"""'], {'driver': '"""GTiff"""', 'count': '(1)', 'dtype': 'rasterio.float64', 'crs': '"""EPSG:4326"""', 'transform': 'trans', 'compress': '"""lzw"""', 'width': 'self.raster_shape[0]', 'height': 'self.raster_shape[1]'}), "(f'tiffs/fatality_risk_h{self.hour}.tif', 'w', driver='GTiff',\n count=1, dtype=rasterio.float64, crs='EPSG:4326', transform=trans,\n compress='lzw', width=self.raster_shape[0], height=self.raster_shape[1])\n", (9444, 9654), False, 'import rasterio\n'), ((10322, 10412), 'casex.AircraftSpecs', 'casex.AircraftSpecs', (['casex.enums.AircraftType.FIXED_WING', 'ac_width', 'ac_length', 'ac_mass'], {}), '(casex.enums.AircraftType.FIXED_WING, ac_width,\n ac_length, ac_mass)\n', (10341, 10412), False, 'import casex\n'), ((10822, 10855), 'numpy.deg2rad', 'np.deg2rad', (['((wind_dir - 90) % 360)'], {}), '((wind_dir - 90) % 360)\n', (10832, 10855), True, 'import numpy as np\n'), ((2421, 2459), 'seedpod_ground_risk.layers.temporal_population_estimate_layer.TemporalPopulationEstimateLayer', 'TemporalPopulationEstimateLayer', (['"""tpe"""'], {}), "('tpe')\n", (2452, 2459), False, 'from seedpod_ground_risk.layers.temporal_population_estimate_layer import TemporalPopulationEstimateLayer\n'), ((3681, 3698), 'numpy.sin', 'np.sin', (['wind_dirs'], {}), '(wind_dirs)\n', (3687, 3698), True, 'import numpy as np\n'), ((3733, 3750), 'numpy.cos', 'np.cos', (['wind_dirs'], {}), '(wind_dirs)\n', (3739, 3750), True, 'import numpy as np\n'), ((5582, 5604), 'os.getenv', 'os.getenv', (['"""CUDA_HOME"""'], {}), "('CUDA_HOME')\n", (5591, 5604), False, 'import os\n'), ((5767, 5862), 'seedpod_ground_risk.layers.strike_risk_layer.wrap_all_pipeline', 'wrap_all_pipeline', (['self.raster_shape', 'padded_pdf', 'padded_centre_y', 'padded_centre_x', 'premult'], {}), '(self.raster_shape, padded_pdf, padded_centre_y,\n padded_centre_x, premult)\n', (5784, 5862), False, 'from seedpod_ground_risk.layers.strike_risk_layer import wrap_pipeline_cuda, wrap_all_pipeline\n'), ((5897, 5937), 'numpy.zeros', 'np.zeros', (['self.raster_shape'], {'dtype': 'float'}), '(self.raster_shape, dtype=float)\n', (5905, 5937), True, 'import numpy as np\n'), ((7728, 7793), 'numpy.savetxt', 'np.savetxt', (['f"""strike_map_t{self.hour}"""', 'strike_pdf'], {'delimiter': '""","""'}), "(f'strike_map_t{self.hour}', strike_pdf, delimiter=',')\n", (7738, 7793), True, 'import numpy as np\n'), ((8573, 8642), 'numpy.savetxt', 'np.savetxt', (['f"""fatality_map_t{self.hour}"""', 'fatality_pdf'], {'delimiter': '""","""'}), "(f'fatality_map_t{self.hour}', fatality_pdf, delimiter=',')\n", (8583, 8642), True, 'import numpy as np\n'), ((11229, 11276), 'numpy.genfromtxt', 'np.genfromtxt', (['"""fr_map_path.csv"""'], {'delimiter': '""","""'}), "('fr_map_path.csv', delimiter=',')\n", (11242, 11276), True, 'import numpy as np\n'), ((11326, 11380), 'numpy.genfromtxt', 'np.genfromtxt', (['"""raster_indices_lon.csv"""'], {'delimiter': '""","""'}), "('raster_indices_lon.csv', delimiter=',')\n", (11339, 11380), True, 'import numpy as np\n'), ((11418, 11472), 'numpy.genfromtxt', 'np.genfromtxt', (['"""raster_indices_lat.csv"""'], {'delimiter': '""","""'}), "('raster_indices_lat.csv', delimiter=',')\n", (11431, 11472), True, 'import numpy as np\n'), ((11600, 11618), 'shapely.geometry.Point', 'sg.Point', (['lon', 'lat'], {}), '(lon, lat)\n', (11608, 11618), True, 'import shapely.geometry as sg\n'), ((11661, 11692), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': '[ls]'}), '(geometry=[ls])\n', (11677, 11692), True, 'import geopandas as gpd\n'), ((12588, 12619), 'numpy.mean', 'np.mean', (['pathwise_fatality_maxs'], {}), '(pathwise_fatality_maxs)\n', (12595, 12619), True, 'import numpy as np\n'), ((12793, 12822), 'numpy.mean', 'np.mean', (['pathwise_strike_maxs'], {}), '(pathwise_strike_maxs)\n', (12800, 12822), True, 'import numpy as np\n'), ((2507, 2550), 'itertools.chain', 'chain', (['ps.data_layers', 'ps.annotation_layers'], {}), '(ps.data_layers, ps.annotation_layers)\n', (2512, 2550), False, 'from itertools import chain\n'), ((3412, 3432), 'scipy.stats.norm', 'ss.norm', (['self.alt', '(5)'], {}), '(self.alt, 5)\n', (3419, 3432), True, 'import scipy.stats as ss\n'), ((3461, 3483), 'scipy.stats.norm', 'ss.norm', (['self.vel', '(2.5)'], {}), '(self.vel, 2.5)\n', (3468, 3483), True, 'import scipy.stats as ss\n'), ((3518, 3543), 'scipy.stats.norm', 'ss.norm', (['self.wind_vel', '(1)'], {}), '(self.wind_vel, 1)\n', (3525, 3543), True, 'import scipy.stats as ss\n'), ((7130, 7142), 'matplotlib.colors.LogNorm', 'mc.LogNorm', ([], {}), '()\n', (7140, 7142), True, 'import matplotlib.colors as mc\n'), ((2677, 2703), 'seedpod_ground_risk.core.utils.remove_raster_nans', 'remove_raster_nans', (['res[1]'], {}), '(res[1])\n', (2695, 2703), False, 'from seedpod_ground_risk.core.utils import make_bounds_polygon, remove_raster_nans\n'), ((3871, 3889), 'scipy.stats.uniform', 'ss.uniform', (['(0)', '(360)'], {}), '(0, 360)\n', (3881, 3889), True, 'import scipy.stats as ss\n'), ((4160, 4178), 'scipy.stats.uniform', 'ss.uniform', (['(0)', '(360)'], {}), '(0, 360)\n', (4170, 4178), True, 'import scipy.stats as ss\n'), ((6056, 6108), 'numpy.ceil', 'np.ceil', (['(self.raster_shape[1] / threads_per_block[1])'], {}), '(self.raster_shape[1] / threads_per_block[1])\n', (6063, 6108), True, 'import numpy as np\n'), ((6132, 6184), 'numpy.ceil', 'np.ceil', (['(self.raster_shape[0] / threads_per_block[0])'], {}), '(self.raster_shape[0] / threads_per_block[0])\n', (6139, 6184), True, 'import numpy as np\n'), ((2173, 2199), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2189, 2199), False, 'import os\n'), ((3618, 3631), 'numpy.deg2rad', 'np.deg2rad', (['(5)'], {}), '(5)\n', (3628, 3631), True, 'import numpy as np\n'), ((4712, 4742), 'numpy.array', 'np.array', (['[offset_y, offset_x]'], {}), '([offset_y, offset_x])\n', (4720, 4742), True, 'import numpy as np\n'), ((4818, 4848), 'numpy.array', 'np.array', (['[offset_y, offset_x]'], {}), '([offset_y, offset_x])\n', (4826, 4848), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
import os, sys
import yaml
from argparse import ArgumentParser
from tqdm import tqdm
import imageio
import numpy as np
from skimage.transform import resize
from skimage import img_as_ubyte
import torch
# self
_curr_path = os.path.abspath(__file__) # /home/..../face
_cur_dir = os.path.dirname(_curr_path) # ./
_tf_dir = os.path.dirname(_cur_dir) # ./
print(_tf_dir)
sys.path.append(_tf_dir) # /home/..../pytorch3d
_dl_dir = os.path.dirname(_tf_dir) # ./
_deep_learning_dir = os.path.dirname(_dl_dir) # ../
print(_deep_learning_dir)
sys.path.append(_deep_learning_dir) # /home/..../pytorch3d
from first_order_model.sync_batchnorm import DataParallelWithCallback
from first_order_model.modules.generator import OcclusionAwareGenerator
from first_order_model.modules.keypoint_detector import KPDetector
from first_order_model.animate import normalize_kp
from scipy.spatial import ConvexHull
# save result
from base.io import *
if sys.version_info[0] < 3:
raise Exception("You must use Python 3 or higher. Recommended version is Python 3.7")
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
def make_animation(source_image, driving_video, generator, kp_detector, relative=True, adapt_movement_scale=True, cpu=False):
with torch.no_grad():
predictions = []
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not cpu:
source = source.cuda()
driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
kp_source = kp_detector(source)
kp_driving_initial = kp_detector(driving[:, :, 0])
for frame_idx in tqdm(range(driving.shape[2])):
driving_frame = driving[:, :, frame_idx]
if not cpu:
driving_frame = driving_frame.cuda()
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial, use_relative_movement=relative,
use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)
out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
return predictions
def find_best_frame(source, driving, cpu=False):
import face_alignment
def normalize_kp(kp):
kp = kp - kp.mean(axis=0, keepdims=True)
area = ConvexHull(kp[:, :2]).volume
area = np.sqrt(area)
kp[:, :2] = kp[:, :2] / area
return kp
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True,
device='cpu' if cpu else 'cuda')
kp_source = fa.get_landmarks(255 * source)[0]
kp_source = normalize_kp(kp_source)
norm = float('inf')
frame_num = 0
for i, image in tqdm(enumerate(driving)):
kp_driving = fa.get_landmarks(255 * image)[0]
kp_driving = normalize_kp(kp_driving)
new_norm = (np.abs(kp_source - kp_driving) ** 2).sum()
if new_norm < norm:
norm = new_norm
frame_num = i
return frame_num
"""
python demo.py --config config/vox-256.yaml \
--dic_dataset /media/jiaxiangshang/My\ Passport/0_SHANG_DATA/1_Face_2D/7_voxel_celeb2_val_GL_unique --name_global_list train_video_10 \
--dic_save /media/jiaxiangshang/My\ Passport/1_SHANG_EXP/2_frrnet \
--checkpoint /data0/2_Project/python/deeplearning_python/dl_model_reen/vox-cpk.pth.tar \
--relative --adapt_scale
python ./first_order_model/0_test_gl_img2img.py \
--config config/vox-256.yaml \
--dic_dataset /apdcephfs/private_alexinwang/jxshang/data/0_3DFace_Train/2_mono/7_voxel_celeb2_val_GL_unique_5 \
--name_global_list train_video_5 \
--dic_save /apdcephfs/share_782420/jxshang/exp/5_reen_results/first_order_model \
--checkpoint /apdcephfs/private_alexinwang/jxshang/project/deeplearning_python/dl_model_reen/vox-cpk.pth.tar \
--relative \
--adapt_scale
python ./first_order_model/0_test_gl_img2img.py \
--config config/vox-256.yaml \
--dic_dataset /apdcephfs/private_alexinwang/jxshang/data/0_3DFace_Train/2_mono/7_voxel_celeb2_val_GL_unique_5 \
--name_global_list train_video_5 \
--dic_save /apdcephfs/share_782420/jxshang/exp/6_reen_quati/first_order_model \
--checkpoint /apdcephfs/private_alexinwang/jxshang/project/deeplearning_python/dl_model_reen/vox-cpk.pth.tar \
--relative \
--adapt_scale \
--flag_quati 1
"""
from first_order_model.crop_video import *
def test_video(opt, path_src, list_path_tar):
path_src_pure, _ = os.path.splitext(path_src)
path_src_bbox = path_src_pure + '_bbox.txt'
src_bbox = parse_self_facebbox(path_src_bbox)[:-1]
source_image_ori = imageio.imread(path_src)
source_image, _, bbox_src = crop_bbox(source_image_ori, src_bbox)
driving_video_ori = []
driving_video = []
list_m_inv = []
list_bbox = []
for i in range(len(list_path_tar)):
path_tar = list_path_tar[i]
path_tar_pure, _ = os.path.splitext(path_tar)
path_tar_bbox = path_tar_pure + '_bbox.txt'
tar_bbox = parse_self_facebbox(path_tar_bbox)[:-1]
tar_image_ori = imageio.imread(path_tar)
tar_image, m_inv, bbox = crop_bbox(tar_image_ori, tar_bbox)
driving_video_ori.append(tar_image_ori)
driving_video.append(tar_image)
list_m_inv.append(m_inv)
list_bbox.append(bbox)
#source_image_ori = resize(source_image_ori, (256, 256))[..., :3]
source_image = resize(source_image, (256, 256))[..., :3]
#driving_video_ori = [resize(frame, (256, 256))[..., :3] for frame in driving_video_ori]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
opt.config = os.path.join(_cur_dir, opt.config)
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
if opt.find_best_frame or opt.best_frame is not None:
i = opt.best_frame if opt.best_frame is not None else find_best_frame(source_image, driving_video, cpu=opt.cpu)
print("Best frame: " + str(i))
driving_forward = driving_video[i:]
driving_backward = driving_video[:(i + 1)][::-1]
predictions_forward = make_animation(source_image, driving_forward, generator, kp_detector,
relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
predictions_backward = make_animation(source_image, driving_backward, generator, kp_detector,
relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
predictions = predictions_backward[::-1] + predictions_forward[1:]
else:
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=opt.relative,
adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
list_result = [img_as_ubyte(frame) for frame in predictions]
return source_image_ori, driving_video_ori, list_result, list_m_inv, bbox_src, list_bbox
#imageio.mimsave(opt.result_video, [img_as_ubyte(frame) for frame in predictions], fps=fps)
import ast
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", default='config/vox-256.yaml', help="path to config")
parser.add_argument("--checkpoint", default='/data0/2_Project/python/deeplearning_python/dl_model_reen/vox-cpk.pth.tar', help="path to checkpoint to restore")
parser.add_argument("--source_image", default='sup-mat/source.png', help="path to source image")
parser.add_argument("--driving_video", default='sup-mat/source.png', help="path to driving video")
parser.add_argument("--result_video", default='result.mp4', help="path to output")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--find_best_frame", dest="find_best_frame", action="store_true",
help="Generate from the frame that is the most alligned with source. (Only for faces, requires face_aligment lib)")
parser.add_argument("--best_frame", dest="best_frame", type=int, default=None,
help="Set frame to start from.")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.")
# jiaxiang
parser.add_argument('--dic_dataset', default='/media/jiaxiangshang/My Passport/0_SHANG_DATA/1_Face_2D/7_voxel_celeb2_val_GL_unique', type=str, help='')
parser.add_argument('--dic_save', default='/media/jiaxiangshang/My Passport/1_SHANG_EXP/2_frrnet/1_free_vc', type=str, help='')
parser.add_argument('--name_global_list', default='train_video_10', type=str, help='')
parser.add_argument('--num_src_k', default=1, type=int, help='')
parser.add_argument('--num_tar_k', default=10, type=int, help='')
parser.add_argument('--flag_quati', default=0, type=ast.literal_eval, help='')
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
# read global list
emotion_list, dic_folderLeaf_list, dict_video_2_frames = parse_video_global_list(opt.dic_dataset, opt.name_global_list, True)
# save global list
if os.path.isdir(opt.dic_save) == False:
os.makedirs(opt.dic_save)
path_train_list = os.path.join(opt.dic_save, "eval.txt")
f_train_global = open(path_train_list, 'w')
list_name_videoKey = list(dict_video_2_frames.keys())
for i in range(len(list_name_videoKey)):
print('Sample', i)
name_vk = list_name_videoKey[i]
list_frames = dict_video_2_frames[name_vk]
step = int(len(list_frames)/opt.num_src_k)
for j in range(0, len(list_frames), step):
main_frame = list_frames[j]
for i_v in range(len(list_name_videoKey)):
if opt.flag_quati:
if i_v != i:
continue
else:
if i_v % opt.num_tar_k != 0 and i_v != i:
continue
name_vk_SEAR = list_name_videoKey[i_v]
list_frames_SEAR = dict_video_2_frames[name_vk_SEAR]
list_path_SEAR = [lf+'.jpg' for lf in list_frames_SEAR]
source_image, list_driving_video, list_result, list_m_inv, bbox_src, list_bbox_tar = test_video(opt, main_frame + '.jpg', list_path_SEAR)
name_subfolder_save_0 = 'reen_%d' % (i)
name_subfolder_save = 'numf_%d_on_%d' % (j, i_v)
dic_subf_save = os.path.join(opt.dic_save, name_subfolder_save_0+'/'+name_subfolder_save)
print('save subdic', dic_subf_save)
if os.path.isdir(dic_subf_save) == False:
os.makedirs(dic_subf_save)
for f in range(len(list_frames_SEAR)):
path_frame_pure = list_frames_SEAR[f]
_, name_frame = os.path.split(path_frame_pure)
path_save_src = os.path.join(dic_subf_save, name_frame + '_src.jpg')
path_save = os.path.join(dic_subf_save, name_frame + '.jpg')
path_all_save = os.path.join(dic_subf_save, name_frame + '_concat.jpg')
src_img = source_image
tar_img = list_driving_video[f]
result_img = list_result[f]
M_inv = list_m_inv[f]
bbox_tar = list_bbox_tar[f]
if 1:
from base.io import inverse_affine_warp_overlay
result_img_replace = inverse_affine_warp_overlay(M_inv, tar_img, result_img * 1.0, np.ones_like(result_img), flag_cv=True)
# # visual
# cv2.imshow("Image Debug", result_img)
# k = cv2.waitKey(0) & 0xFF
# if k == 27:
# cv2.destroyAllWindows()
# cv2.imshow("Image Debug", cv2.cvtColor(img_replace, cv2.COLOR_RGB2BGR))
# k = cv2.waitKey(0) & 0xFF
# if k == 27:
# cv2.destroyAllWindows()
result_concat = np.concatenate([src_img, tar_img, result_img_replace], axis=1)
result_concat = result_concat.astype(np.uint8)
# save
src_img = cv2.cvtColor(src_img, cv2.COLOR_RGB2BGR)
result_img_replace = cv2.cvtColor(result_img_replace, cv2.COLOR_RGB2BGR)
result_concat = cv2.cvtColor(result_concat, cv2.COLOR_RGB2BGR)
cv2.imwrite(path_save_src, src_img)
cv2.imwrite(path_save, result_img_replace)
cv2.imwrite(path_all_save, result_concat)
path_save_bbox = os.path.join(dic_subf_save, name_frame + '_bbox_fom_src.txt')
write_self_facebbox(path_save_bbox, bbox_src)
path_save_bbox = os.path.join(dic_subf_save, name_frame + '_bbox_fom.txt')
write_self_facebbox(path_save_bbox, bbox_tar)
f_train_global.write("%s %s\n" % (name_subfolder_save_0 + '/' + name_subfolder_save, name_frame))
| [
"numpy.sqrt",
"yaml.load",
"numpy.array",
"sys.path.append",
"first_order_model.modules.generator.OcclusionAwareGenerator",
"argparse.ArgumentParser",
"os.path.split",
"os.path.isdir",
"first_order_model.modules.keypoint_detector.KPDetector",
"numpy.concatenate",
"skimage.img_as_ubyte",
"numpy... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((264, 289), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (279, 289), False, 'import os, sys\n'), ((320, 347), 'os.path.dirname', 'os.path.dirname', (['_curr_path'], {}), '(_curr_path)\n', (335, 347), False, 'import os, sys\n'), ((364, 389), 'os.path.dirname', 'os.path.dirname', (['_cur_dir'], {}), '(_cur_dir)\n', (379, 389), False, 'import os, sys\n'), ((411, 435), 'sys.path.append', 'sys.path.append', (['_tf_dir'], {}), '(_tf_dir)\n', (426, 435), False, 'import os, sys\n'), ((471, 495), 'os.path.dirname', 'os.path.dirname', (['_tf_dir'], {}), '(_tf_dir)\n', (486, 495), False, 'import os, sys\n'), ((523, 547), 'os.path.dirname', 'os.path.dirname', (['_dl_dir'], {}), '(_dl_dir)\n', (538, 547), False, 'import os, sys\n'), ((581, 616), 'sys.path.append', 'sys.path.append', (['_deep_learning_dir'], {}), '(_deep_learning_dir)\n', (596, 616), False, 'import os, sys\n'), ((1243, 1360), 'first_order_model.modules.generator.OcclusionAwareGenerator', 'OcclusionAwareGenerator', ([], {}), "(**config['model_params']['generator_params'], **\n config['model_params']['common_params'])\n", (1266, 1360), False, 'from first_order_model.modules.generator import OcclusionAwareGenerator\n'), ((1456, 1562), 'first_order_model.modules.keypoint_detector.KPDetector', 'KPDetector', ([], {}), "(**config['model_params']['kp_detector_params'], **config[\n 'model_params']['common_params'])\n", (1466, 1562), False, 'from first_order_model.modules.keypoint_detector import KPDetector\n'), ((3689, 3806), 'face_alignment.FaceAlignment', 'face_alignment.FaceAlignment', (['face_alignment.LandmarksType._2D'], {'flip_input': '(True)', 'device': "('cpu' if cpu else 'cuda')"}), "(face_alignment.LandmarksType._2D, flip_input=\n True, device='cpu' if cpu else 'cuda')\n", (3717, 3806), False, 'import face_alignment\n'), ((3906, 3929), 'first_order_model.animate.normalize_kp', 'normalize_kp', (['kp_source'], {}), '(kp_source)\n', (3918, 3929), False, 'from first_order_model.animate import normalize_kp\n'), ((5685, 5711), 'os.path.splitext', 'os.path.splitext', (['path_src'], {}), '(path_src)\n', (5701, 5711), False, 'import os, sys\n'), ((5839, 5863), 'imageio.imread', 'imageio.imread', (['path_src'], {}), '(path_src)\n', (5853, 5863), False, 'import imageio\n'), ((6865, 6899), 'os.path.join', 'os.path.join', (['_cur_dir', 'opt.config'], {}), '(_cur_dir, opt.config)\n', (6877, 6899), False, 'import os, sys\n'), ((8367, 8383), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (8381, 8383), False, 'from argparse import ArgumentParser\n'), ((10682, 10720), 'os.path.join', 'os.path.join', (['opt.dic_save', '"""eval.txt"""'], {}), "(opt.dic_save, 'eval.txt')\n", (10694, 10720), False, 'import os, sys\n'), ((1213, 1225), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1222, 1225), False, 'import yaml\n'), ((1761, 1788), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1771, 1788), False, 'import torch\n'), ((1946, 1981), 'first_order_model.sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['generator'], {}), '(generator)\n', (1970, 1981), False, 'from first_order_model.sync_batchnorm import DataParallelWithCallback\n'), ((2004, 2041), 'first_order_model.sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['kp_detector'], {}), '(kp_detector)\n', (2028, 2041), False, 'from first_order_model.sync_batchnorm import DataParallelWithCallback\n'), ((2263, 2278), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2276, 2278), False, 'import torch\n'), ((3610, 3623), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (3617, 3623), True, 'import numpy as np\n'), ((4094, 4118), 'first_order_model.animate.normalize_kp', 'normalize_kp', (['kp_driving'], {}), '(kp_driving)\n', (4106, 4118), False, 'from first_order_model.animate import normalize_kp\n'), ((6129, 6155), 'os.path.splitext', 'os.path.splitext', (['path_tar'], {}), '(path_tar)\n', (6145, 6155), False, 'import os, sys\n'), ((6292, 6316), 'imageio.imread', 'imageio.imread', (['path_tar'], {}), '(path_tar)\n', (6306, 6316), False, 'import imageio\n'), ((6628, 6660), 'skimage.transform.resize', 'resize', (['source_image', '(256, 256)'], {}), '(source_image, (256, 256))\n', (6634, 6660), False, 'from skimage.transform import resize\n'), ((8080, 8099), 'skimage.img_as_ubyte', 'img_as_ubyte', (['frame'], {}), '(frame)\n', (8092, 8099), False, 'from skimage import img_as_ubyte\n'), ((10587, 10614), 'os.path.isdir', 'os.path.isdir', (['opt.dic_save'], {}), '(opt.dic_save)\n', (10600, 10614), False, 'import os, sys\n'), ((10633, 10658), 'os.makedirs', 'os.makedirs', (['opt.dic_save'], {}), '(opt.dic_save)\n', (10644, 10658), False, 'import os, sys\n'), ((2925, 3136), 'first_order_model.animate.normalize_kp', 'normalize_kp', ([], {'kp_source': 'kp_source', 'kp_driving': 'kp_driving', 'kp_driving_initial': 'kp_driving_initial', 'use_relative_movement': 'relative', 'use_relative_jacobian': 'relative', 'adapt_movement_scale': 'adapt_movement_scale'}), '(kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial\n =kp_driving_initial, use_relative_movement=relative,\n use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)\n', (2937, 3136), False, 'from first_order_model.animate import normalize_kp\n'), ((3566, 3587), 'scipy.spatial.ConvexHull', 'ConvexHull', (['kp[:, :2]'], {}), '(kp[:, :2])\n', (3576, 3587), False, 'from scipy.spatial import ConvexHull\n'), ((6784, 6809), 'skimage.transform.resize', 'resize', (['frame', '(256, 256)'], {}), '(frame, (256, 256))\n', (6790, 6809), False, 'from skimage.transform import resize\n'), ((1709, 1728), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1721, 1728), False, 'import torch\n'), ((11912, 11989), 'os.path.join', 'os.path.join', (['opt.dic_save', "(name_subfolder_save_0 + '/' + name_subfolder_save)"], {}), "(opt.dic_save, name_subfolder_save_0 + '/' + name_subfolder_save)\n", (11924, 11989), False, 'import os, sys\n'), ((4139, 4169), 'numpy.abs', 'np.abs', (['(kp_source - kp_driving)'], {}), '(kp_source - kp_driving)\n', (4145, 4169), True, 'import numpy as np\n'), ((12057, 12085), 'os.path.isdir', 'os.path.isdir', (['dic_subf_save'], {}), '(dic_subf_save)\n', (12070, 12085), False, 'import os, sys\n'), ((12116, 12142), 'os.makedirs', 'os.makedirs', (['dic_subf_save'], {}), '(dic_subf_save)\n', (12127, 12142), False, 'import os, sys\n'), ((12293, 12323), 'os.path.split', 'os.path.split', (['path_frame_pure'], {}), '(path_frame_pure)\n', (12306, 12323), False, 'import os, sys\n'), ((12361, 12413), 'os.path.join', 'os.path.join', (['dic_subf_save', "(name_frame + '_src.jpg')"], {}), "(dic_subf_save, name_frame + '_src.jpg')\n", (12373, 12413), False, 'import os, sys\n'), ((12446, 12494), 'os.path.join', 'os.path.join', (['dic_subf_save', "(name_frame + '.jpg')"], {}), "(dic_subf_save, name_frame + '.jpg')\n", (12458, 12494), False, 'import os, sys\n'), ((12531, 12586), 'os.path.join', 'os.path.join', (['dic_subf_save', "(name_frame + '_concat.jpg')"], {}), "(dic_subf_save, name_frame + '_concat.jpg')\n", (12543, 12586), False, 'import os, sys\n'), ((13587, 13649), 'numpy.concatenate', 'np.concatenate', (['[src_img, tar_img, result_img_replace]'], {'axis': '(1)'}), '([src_img, tar_img, result_img_replace], axis=1)\n', (13601, 13649), True, 'import numpy as np\n'), ((14211, 14272), 'os.path.join', 'os.path.join', (['dic_subf_save', "(name_frame + '_bbox_fom_src.txt')"], {}), "(dic_subf_save, name_frame + '_bbox_fom_src.txt')\n", (14223, 14272), False, 'import os, sys\n'), ((14376, 14433), 'os.path.join', 'os.path.join', (['dic_subf_save', "(name_frame + '_bbox_fom.txt')"], {}), "(dic_subf_save, name_frame + '_bbox_fom.txt')\n", (14388, 14433), False, 'import os, sys\n'), ((13026, 13050), 'numpy.ones_like', 'np.ones_like', (['result_img'], {}), '(result_img)\n', (13038, 13050), True, 'import numpy as np\n'), ((2486, 2509), 'numpy.array', 'np.array', (['driving_video'], {}), '(driving_video)\n', (2494, 2509), True, 'import numpy as np\n')] |
import itertools
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from .. import transform
@pytest.mark.parametrize("volume_shape", [(64, 64, 64), (64, 64, 64, 3)])
def test_get_affine_smoke(volume_shape):
affine = transform.get_affine(volume_shape)
assert_array_equal(affine, np.eye(4))
def test_get_affine_errors():
with pytest.raises(ValueError):
transform.get_affine(volume_shape=(64, 64))
with pytest.raises(ValueError):
transform.get_affine(volume_shape=(64, 64, 64), rotation=[0, 0])
with pytest.raises(ValueError):
transform.get_affine(volume_shape=(64, 64, 64), translation=[0, 0])
@pytest.mark.parametrize("volume_shape", [(2, 2, 2), (2, 2, 2, 3)])
def test_get_coordinates(volume_shape):
coords = transform._get_coordinates(volume_shape=volume_shape)
coords_ref = [
list(element) for element in list(itertools.product([0, 1], repeat=3))
]
assert_array_equal(coords, coords_ref)
def test_get_coordinates_errors():
with pytest.raises(ValueError):
transform._get_coordinates(volume_shape=(64, 64))
@pytest.mark.parametrize("volume_shape", [(8, 8, 8), (8, 8, 8, 3)])
def test_trilinear_interpolation_smoke(volume_shape):
volume = np.arange(np.prod(volume_shape)).reshape(volume_shape)
coords = transform._get_coordinates(volume_shape=volume_shape)
x = transform._trilinear_interpolation(volume=volume, coords=coords)
assert_array_equal(x, volume)
@pytest.mark.parametrize("volume_shape", [(8, 8, 8), (8, 8, 8, 3)])
def test_get_voxels(volume_shape):
volume = np.arange(np.prod(volume_shape)).reshape(volume_shape)
coords = transform._get_coordinates(volume_shape=volume_shape)
voxels = transform._get_voxels(volume=volume, coords=coords)
if len(volume_shape) == 3:
assert_array_equal(voxels, np.arange(np.prod(volume_shape)))
else:
assert_array_equal(
voxels,
np.arange(np.prod(volume_shape)).reshape((np.prod(volume_shape[:3]), -1)),
)
def test_get_voxels_errors():
volume = np.zeros((8, 8))
coords = transform._get_coordinates(volume_shape=(8, 8, 8))
with pytest.raises(ValueError):
transform._get_voxels(volume=volume, coords=coords)
volume = np.zeros((8, 8, 8))
coords = np.zeros((8, 8, 8))
with pytest.raises(ValueError):
transform._get_voxels(volume=volume, coords=coords)
coords = np.zeros((8, 2))
with pytest.raises(ValueError):
transform._get_voxels(volume=volume, coords=coords)
| [
"numpy.prod",
"numpy.eye",
"itertools.product",
"pytest.mark.parametrize",
"numpy.zeros",
"pytest.raises",
"numpy.testing.assert_array_equal"
] | [((125, 197), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""volume_shape"""', '[(64, 64, 64), (64, 64, 64, 3)]'], {}), "('volume_shape', [(64, 64, 64), (64, 64, 64, 3)])\n", (148, 197), False, 'import pytest\n'), ((676, 742), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""volume_shape"""', '[(2, 2, 2), (2, 2, 2, 3)]'], {}), "('volume_shape', [(2, 2, 2), (2, 2, 2, 3)])\n", (699, 742), False, 'import pytest\n'), ((1131, 1197), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""volume_shape"""', '[(8, 8, 8), (8, 8, 8, 3)]'], {}), "('volume_shape', [(8, 8, 8), (8, 8, 8, 3)])\n", (1154, 1197), False, 'import pytest\n'), ((1497, 1563), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""volume_shape"""', '[(8, 8, 8), (8, 8, 8, 3)]'], {}), "('volume_shape', [(8, 8, 8), (8, 8, 8, 3)])\n", (1520, 1563), False, 'import pytest\n'), ((958, 996), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['coords', 'coords_ref'], {}), '(coords, coords_ref)\n', (976, 996), False, 'from numpy.testing import assert_array_equal\n'), ((1464, 1493), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'volume'], {}), '(x, volume)\n', (1482, 1493), False, 'from numpy.testing import assert_array_equal\n'), ((2100, 2116), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (2108, 2116), True, 'import numpy as np\n'), ((2291, 2310), 'numpy.zeros', 'np.zeros', (['(8, 8, 8)'], {}), '((8, 8, 8))\n', (2299, 2310), True, 'import numpy as np\n'), ((2324, 2343), 'numpy.zeros', 'np.zeros', (['(8, 8, 8)'], {}), '((8, 8, 8))\n', (2332, 2343), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.zeros', 'np.zeros', (['(8, 2)'], {}), '((8, 2))\n', (2462, 2470), True, 'import numpy as np\n'), ((319, 328), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (325, 328), True, 'import numpy as np\n'), ((371, 396), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (384, 396), False, 'import pytest\n'), ((460, 485), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (473, 485), False, 'import pytest\n'), ((570, 595), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (583, 595), False, 'import pytest\n'), ((1043, 1068), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1056, 1068), False, 'import pytest\n'), ((2190, 2215), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2203, 2215), False, 'import pytest\n'), ((2353, 2378), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2366, 2378), False, 'import pytest\n'), ((2480, 2505), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2493, 2505), False, 'import pytest\n'), ((911, 946), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': '(3)'}), '([0, 1], repeat=3)\n', (928, 946), False, 'import itertools\n'), ((1275, 1296), 'numpy.prod', 'np.prod', (['volume_shape'], {}), '(volume_shape)\n', (1282, 1296), True, 'import numpy as np\n'), ((1622, 1643), 'numpy.prod', 'np.prod', (['volume_shape'], {}), '(volume_shape)\n', (1629, 1643), True, 'import numpy as np\n'), ((1876, 1897), 'numpy.prod', 'np.prod', (['volume_shape'], {}), '(volume_shape)\n', (1883, 1897), True, 'import numpy as np\n'), ((2012, 2037), 'numpy.prod', 'np.prod', (['volume_shape[:3]'], {}), '(volume_shape[:3])\n', (2019, 2037), True, 'import numpy as np\n'), ((1980, 2001), 'numpy.prod', 'np.prod', (['volume_shape'], {}), '(volume_shape)\n', (1987, 2001), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# PNA Aggregators ------------------------------------------------------------------------------
EPS = 1e-5
def aggregate_mean(h):
return torch.mean(h, dim=1)
def aggregate_max(h):
return torch.max(h, dim=1)[0]
def aggregate_min(h):
return torch.min(h, dim=1)[0]
def aggregate_std(h):
return torch.sqrt(aggregate_var(h) + EPS)
def aggregate_var(h):
h_mean_squares = torch.mean(h * h, dim=-2)
h_mean = torch.mean(h, dim=-2)
var = torch.relu(h_mean_squares - h_mean * h_mean)
return var
def aggregate_moment(h, n=3):
# for each node (E[(X-E[X])^n])^{1/n}
# EPS is added to the absolute value of expectation before taking the nth root for stability
h_mean = torch.mean(h, dim=1, keepdim=True)
h_n = torch.mean(torch.pow(h - h_mean, n))
rooted_h_n = torch.sign(h_n) * torch.pow(torch.abs(h_n) + EPS, 1.0 / n)
return rooted_h_n
def aggregate_moment_3(h):
return aggregate_moment(h, n=3)
def aggregate_moment_4(h):
return aggregate_moment(h, n=4)
def aggregate_moment_5(h):
return aggregate_moment(h, n=5)
def aggregate_sum(h):
return torch.sum(h, dim=1)
AGGREGATORS = {
"mean": aggregate_mean,
"sum": aggregate_sum,
"max": aggregate_max,
"min": aggregate_min,
"std": aggregate_std,
"var": aggregate_var,
"moment3": aggregate_moment_3,
"moment4": aggregate_moment_4,
"moment5": aggregate_moment_5,
}
# PNA Scalers ---------------------------------------------------------------------------------
# each scaler is a function that takes as input X (B x N x Din), adj (B x N x N) and
# avg_d (dictionary containing averages over training set) and returns X_scaled (B x N x Din) as output
def scale_identity(h, D=None, avg_d=None):
return h
def scale_amplification(h, D, avg_d):
# log(D + 1) / d * h where d is the average of the ``log(D + 1)`` in the training set
return h * (np.log(D + 1) / avg_d["log"])
def scale_attenuation(h, D, avg_d):
# (log(D + 1))^-1 / d * X where d is the average of the ``log(D + 1))^-1`` in the training set
return h * (avg_d["log"] / np.log(D + 1))
SCALERS = {
"identity": scale_identity,
"amplification": scale_amplification,
"attenuation": scale_attenuation,
}
SUPPORTED_ACTIVATION_MAP = {
"ReLU",
"Sigmoid",
"Tanh",
"ELU",
"SELU",
"GLU",
"LeakyReLU",
"Softplus",
"None",
}
def get_activation(activation):
"""returns the activation function represented by the input string"""
if activation and callable(activation):
# activation is already a function
return activation
# search in SUPPORTED_ACTIVATION_MAP a torch.nn.modules.activation
activation = [
x for x in SUPPORTED_ACTIVATION_MAP if activation.lower() == x.lower()
]
assert len(activation) == 1 and isinstance(
activation[0], str
), "Unhandled activation function"
activation = activation[0]
if activation.lower() == "none":
return None
return vars(torch.nn.modules.activation)[activation]()
class Set2Set(torch.nn.Module):
r"""
Set2Set global pooling operator from the `"Order Matters: Sequence to sequence for sets"
<https://arxiv.org/abs/1511.06391>`_ paper. This pooling layer performs the following operation
.. math::
\mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1})
\alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t)
\mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i
\mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t,
where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice
the dimensionality as the input.
Arguments
---------
input_dim: int
Size of each input sample.
hidden_dim: int, optional
the dim of set representation which corresponds to the input dim of the LSTM in Set2Set.
This is typically the sum of the input dim and the lstm output dim. If not provided, it will be set to :obj:`input_dim*2`
steps: int, optional
Number of iterations :math:`T`. If not provided, the number of nodes will be used.
num_layers : int, optional
Number of recurrent layers (e.g., :obj:`num_layers=2` would mean stacking two LSTMs together)
(Default, value = 1)
"""
def __init__(
self, nin, nhid=None, steps=None, num_layers=1, activation=None, device="cpu"
):
super(Set2Set, self).__init__()
self.steps = steps
self.nin = nin
self.nhid = nin * 2 if nhid is None else nhid
if self.nhid <= self.nin:
raise ValueError("Set2Set hidden_dim should be larger than input_dim")
# the hidden is a concatenation of weighted sum of embedding and LSTM output
self.lstm_output_dim = self.nhid - self.nin
self.num_layers = num_layers
self.lstm = nn.LSTM(
self.nhid, self.nin, num_layers=num_layers, batch_first=True
).to(device)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
r"""
Applies the pooling on input tensor x
Arguments
----------
x: torch.FloatTensor
Input tensor of size (B, N, D)
Returns
-------
x: `torch.FloatTensor`
Tensor resulting from the set2set pooling operation.
"""
batch_size = x.shape[0]
n = self.steps or x.shape[1]
h = (
x.new_zeros((self.num_layers, batch_size, self.nin)),
x.new_zeros((self.num_layers, batch_size, self.nin)),
)
q_star = x.new_zeros(batch_size, 1, self.nhid)
for i in range(n):
# q: batch_size x 1 x input_dim
q, h = self.lstm(q_star, h)
# e: batch_size x n x 1
e = torch.matmul(x, torch.transpose(q, 1, 2))
a = self.softmax(e)
r = torch.sum(a * x, dim=1, keepdim=True)
q_star = torch.cat([q, r], dim=-1)
return torch.squeeze(q_star, dim=1)
class FCLayer(nn.Module):
r"""
A simple fully connected and customizable layer. This layer is centered around a torch.nn.Linear module.
The order in which transformations are applied is:
#. Dense Layer
#. Activation
#. Dropout (if applicable)
#. Batch Normalization (if applicable)
Arguments
----------
in_size: int
Input dimension of the layer (the torch.nn.Linear)
out_size: int
Output dimension of the layer.
dropout: float, optional
The ratio of units to dropout. No dropout by default.
(Default value = 0.)
activation: str or callable, optional
Activation function to use.
(Default value = relu)
b_norm: bool, optional
Whether to use batch normalization
(Default value = False)
bias: bool, optional
Whether to enable bias in for the linear layer.
(Default value = True)
init_fn: callable, optional
Initialization function to use for the weight of the layer. Default is
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` with :math:`k=\frac{1}{ \text{in_size}}`
(Default value = None)
Attributes
----------
dropout: int
The ratio of units to dropout.
b_norm: int
Whether to use batch normalization
linear: torch.nn.Linear
The linear layer
activation: the torch.nn.Module
The activation layer
init_fn: function
Initialization function used for the weight of the layer
in_size: int
Input dimension of the linear layer
out_size: int
Output dimension of the linear layer
"""
def __init__(
self,
in_size,
out_size,
activation="relu",
dropout=0.0,
b_norm=False,
bias=True,
init_fn=None,
device="cpu",
):
super(FCLayer, self).__init__()
self.__params = locals()
del self.__params["__class__"]
del self.__params["self"]
self.in_size = in_size
self.out_size = out_size
self.bias = bias
self.linear = nn.Linear(in_size, out_size, bias=bias).to(device)
self.dropout = None
self.b_norm = None
if dropout:
self.dropout = nn.Dropout(p=dropout)
if b_norm:
self.b_norm = nn.BatchNorm1d(out_size).to(device)
self.activation = get_activation(activation)
self.init_fn = nn.init.xavier_uniform_
self.reset_parameters()
def reset_parameters(self, init_fn=None):
init_fn = init_fn or self.init_fn
if init_fn is not None:
init_fn(self.linear.weight, 1 / self.in_size)
if self.bias:
self.linear.bias.data.zero_()
def forward(self, x):
h = self.linear(x)
if self.activation is not None:
h = self.activation(h)
if self.dropout is not None:
h = self.dropout(h)
if self.b_norm is not None:
if h.shape[1] != self.out_size:
h = self.b_norm(h.transpose(1, 2)).transpose(1, 2)
else:
h = self.b_norm(h)
return h
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_size)
+ " -> "
+ str(self.out_size)
+ ")"
)
class MLP(nn.Module):
"""
Simple multi-layer perceptron, built of a series of FCLayers
"""
def __init__(
self,
in_size,
hidden_size,
out_size,
layers,
mid_activation="relu",
last_activation="none",
dropout=0.0,
mid_b_norm=False,
last_b_norm=False,
device="cpu",
):
super(MLP, self).__init__()
self.in_size = in_size
self.hidden_size = hidden_size
self.out_size = out_size
self.fully_connected = nn.ModuleList()
if layers <= 1:
self.fully_connected.append(
FCLayer(
in_size,
out_size,
activation=last_activation,
b_norm=last_b_norm,
device=device,
dropout=dropout,
)
)
else:
self.fully_connected.append(
FCLayer(
in_size,
hidden_size,
activation=mid_activation,
b_norm=mid_b_norm,
device=device,
dropout=dropout,
)
)
for _ in range(layers - 2):
self.fully_connected.append(
FCLayer(
hidden_size,
hidden_size,
activation=mid_activation,
b_norm=mid_b_norm,
device=device,
dropout=dropout,
)
)
self.fully_connected.append(
FCLayer(
hidden_size,
out_size,
activation=last_activation,
b_norm=last_b_norm,
device=device,
dropout=dropout,
)
)
def forward(self, x):
for fc in self.fully_connected:
x = fc(x)
return x
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_size)
+ " -> "
+ str(self.out_size)
+ ")"
)
class GRU(nn.Module):
"""
Wrapper class for the GRU used by the GNN framework, nn.GRU is used for the Gated Recurrent Unit itself
"""
def __init__(self, input_size, hidden_size, device):
super(GRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size).to(device)
def forward(self, x, y):
"""
:param x: shape: (B, N, Din) where Din <= input_size (difference is padded)
:param y: shape: (B, N, Dh) where Dh <= hidden_size (difference is padded)
:return: shape: (B, N, Dh)
"""
assert x.shape[-1] <= self.input_size and y.shape[-1] <= self.hidden_size
(B, N, _) = x.shape
x = x.reshape(1, B * N, -1).contiguous()
y = y.reshape(1, B * N, -1).contiguous()
# padding if necessary
if x.shape[-1] < self.input_size:
x = F.pad(
input=x,
pad=[0, self.input_size - x.shape[-1]],
mode="constant",
value=0,
)
if y.shape[-1] < self.hidden_size:
y = F.pad(
input=y,
pad=[0, self.hidden_size - y.shape[-1]],
mode="constant",
value=0,
)
x = self.gru(x, y)[1]
x = x.reshape(B, N, -1)
return x
class S2SReadout(nn.Module):
"""
Performs a Set2Set aggregation of all the graph nodes' features followed by a series of fully connected layers
"""
def __init__(
self,
in_size,
hidden_size,
out_size,
fc_layers=3,
device="cpu",
final_activation="relu",
):
super(S2SReadout, self).__init__()
# set2set aggregation
self.set2set = Set2Set(in_size, device=device)
# fully connected layers
self.mlp = MLP(
in_size=2 * in_size,
hidden_size=hidden_size,
out_size=out_size,
layers=fc_layers,
mid_activation="relu",
last_activation=final_activation,
mid_b_norm=True,
last_b_norm=False,
device=device,
)
def forward(self, x):
x = self.set2set(x)
return self.mlp(x)
| [
"torch.nn.Dropout",
"torch.max",
"numpy.log",
"torch.pow",
"torch.min",
"torch.nn.BatchNorm1d",
"torch.sum",
"torch.squeeze",
"torch.nn.functional.pad",
"torch.nn.GRU",
"torch.mean",
"torch.nn.ModuleList",
"torch.nn.LSTM",
"torch.relu",
"torch.abs",
"torch.sign",
"torch.transpose",
... | [((232, 252), 'torch.mean', 'torch.mean', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (242, 252), False, 'import torch\n'), ((484, 509), 'torch.mean', 'torch.mean', (['(h * h)'], {'dim': '(-2)'}), '(h * h, dim=-2)\n', (494, 509), False, 'import torch\n'), ((523, 544), 'torch.mean', 'torch.mean', (['h'], {'dim': '(-2)'}), '(h, dim=-2)\n', (533, 544), False, 'import torch\n'), ((555, 599), 'torch.relu', 'torch.relu', (['(h_mean_squares - h_mean * h_mean)'], {}), '(h_mean_squares - h_mean * h_mean)\n', (565, 599), False, 'import torch\n'), ((799, 833), 'torch.mean', 'torch.mean', (['h'], {'dim': '(1)', 'keepdim': '(True)'}), '(h, dim=1, keepdim=True)\n', (809, 833), False, 'import torch\n'), ((1209, 1228), 'torch.sum', 'torch.sum', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (1218, 1228), False, 'import torch\n'), ((288, 307), 'torch.max', 'torch.max', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (297, 307), False, 'import torch\n'), ((346, 365), 'torch.min', 'torch.min', (['h'], {'dim': '(1)'}), '(h, dim=1)\n', (355, 365), False, 'import torch\n'), ((855, 879), 'torch.pow', 'torch.pow', (['(h - h_mean)', 'n'], {}), '(h - h_mean, n)\n', (864, 879), False, 'import torch\n'), ((898, 913), 'torch.sign', 'torch.sign', (['h_n'], {}), '(h_n)\n', (908, 913), False, 'import torch\n'), ((5154, 5171), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (5164, 5171), True, 'import torch.nn as nn\n'), ((6164, 6192), 'torch.squeeze', 'torch.squeeze', (['q_star'], {'dim': '(1)'}), '(q_star, dim=1)\n', (6177, 6192), False, 'import torch\n'), ((10235, 10250), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (10248, 10250), True, 'import torch.nn as nn\n'), ((2009, 2022), 'numpy.log', 'np.log', (['(D + 1)'], {}), '(D + 1)\n', (2015, 2022), True, 'import numpy as np\n'), ((2211, 2224), 'numpy.log', 'np.log', (['(D + 1)'], {}), '(D + 1)\n', (2217, 2224), True, 'import numpy as np\n'), ((6063, 6100), 'torch.sum', 'torch.sum', (['(a * x)'], {'dim': '(1)', 'keepdim': '(True)'}), '(a * x, dim=1, keepdim=True)\n', (6072, 6100), False, 'import torch\n'), ((6122, 6147), 'torch.cat', 'torch.cat', (['[q, r]'], {'dim': '(-1)'}), '([q, r], dim=-1)\n', (6131, 6147), False, 'import torch\n'), ((8582, 8603), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (8592, 8603), True, 'import torch.nn as nn\n'), ((12929, 13014), 'torch.nn.functional.pad', 'F.pad', ([], {'input': 'x', 'pad': '[0, self.input_size - x.shape[-1]]', 'mode': '"""constant"""', 'value': '(0)'}), "(input=x, pad=[0, self.input_size - x.shape[-1]], mode='constant', value=0\n )\n", (12934, 13014), True, 'import torch.nn.functional as F\n'), ((13148, 13233), 'torch.nn.functional.pad', 'F.pad', ([], {'input': 'y', 'pad': '[0, self.hidden_size - y.shape[-1]]', 'mode': '"""constant"""', 'value': '(0)'}), "(input=y, pad=[0, self.hidden_size - y.shape[-1]], mode='constant',\n value=0)\n", (13153, 13233), True, 'import torch.nn.functional as F\n'), ((926, 940), 'torch.abs', 'torch.abs', (['h_n'], {}), '(h_n)\n', (935, 940), False, 'import torch\n'), ((5028, 5097), 'torch.nn.LSTM', 'nn.LSTM', (['self.nhid', 'self.nin'], {'num_layers': 'num_layers', 'batch_first': '(True)'}), '(self.nhid, self.nin, num_layers=num_layers, batch_first=True)\n', (5035, 5097), True, 'import torch.nn as nn\n'), ((5989, 6013), 'torch.transpose', 'torch.transpose', (['q', '(1)', '(2)'], {}), '(q, 1, 2)\n', (6004, 6013), False, 'import torch\n'), ((8429, 8468), 'torch.nn.Linear', 'nn.Linear', (['in_size', 'out_size'], {'bias': 'bias'}), '(in_size, out_size, bias=bias)\n', (8438, 8468), True, 'import torch.nn as nn\n'), ((12301, 12355), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size'}), '(input_size=input_size, hidden_size=hidden_size)\n', (12307, 12355), True, 'import torch.nn as nn\n'), ((8649, 8673), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_size'], {}), '(out_size)\n', (8663, 8673), True, 'import torch.nn as nn\n')] |
import pandas as pd
import numpy as np
import math
import cmath
import pickle
joints = ['Nose','Neck','Right_shoulder','Right_elbow','Right_wrist','Left_shoulder',
'Left_elbow','Left_wrist','Right_hip','Right_knee','Right_ankle','Left_hip',
'Left_knee','Left_ankle','Right_eye','Left_eye','Right_ear','Left_ear']
def calculateAngle2d(a, b, c):
x1, y1 = a
x2, y2 = b #midpoint
x3, y3 = c
ABx = x1 - x2
ABy = y1 - y2
BCx = x3 - x2
BCy = y3 - y2
dotProduct = ABx * BCx + ABy * BCy
# print(dotProduct)
magnitudeAB = math.sqrt(ABx * ABx + ABy * ABy)
# print(magnitudeAB)
magnitudeBC = math.sqrt(BCx * BCx + BCy * BCy)
# print(magnitudeBC)
angle = math.acos(dotProduct/(magnitudeAB*magnitudeBC))
angle = (angle * 180) / math.pi
# return(round(abs(angle), 4))
return angle
def calculateAngle3d(p1, p2, p3):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
ABx = x1 - x2
ABy = y1 - y2
ABz = z1 - z2
BCx = x3 - x2
BCy = y3 - y2
BCz = z3 - z2
dotProduct = ABx * BCx +ABy * BCy +ABz * BCz
magnitudeAB = ABx * ABx +ABy * ABy +ABz * ABz
magnitudeBC = BCx * BCx +BCy * BCy +BCz * BCz
angle = dotProduct
if (magnitudeAB == 0 or magnitudeBC == 0):
angle = 0.0
else:
angle = cmath.acos(angle/math.sqrt(magnitudeAB *magnitudeBC))
angle = (angle * 180) / math.pi
return(round(abs(angle), 4))
def calculateDistance(p1, p2):
squared_dist = np.sum((p1-p2)**2, axis=0)
dist = np.sqrt(squared_dist)
return dist
def get_init_pos_from_pkl():
with open('initial3d_by_mean.pkl', 'rb') as file:
init_pos3d = pickle.load(file)
with open('initial3d_by_median.pkl', 'rb') as file:
init_pos3d_median = pickle.load(file)
with open('initial2d_by_mean.pkl', 'rb') as file:
init_pos2d = pickle.load(file)
with open('initial2d_by_median.pkl', 'rb') as file:
init_pos2d_median = pickle.load(file)
with open('initial2d_dis_by_mean.pkl', 'rb') as file:
init_dis2d = pickle.load(file)
with open('initial2d_dis_by_median.pkl', 'rb') as file:
init_dis2d_median = pickle.load(file)
return init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median
def get_init_pos_from_csv():
df = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position3d_new.csv")
df2 = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position2d_new.csv")
df3 = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_distance2d_new.csv")
df = df.iloc[0:145]
df2 = df2.iloc[0:145]
df3 = df3.iloc[0:145]
init_pos3d = {key: (0,0,0) for key in joints}
init_pos3d_median = {key: (0,0,0) for key in joints}
init_pos2d = {key: (0,0) for key in joints}
init_pos2d_median = {key: (0,0) for key in joints}
init_dis2d = {key: 0 for key in joints}
init_dis2d_median = {key: 0 for key in joints}
for i in joints:
try:
init_pos3d[i] = (df['{}X'.format(i)].mean(), df['{}Y'.format(i)].mean(), df['{}Z'.format(i)].mean())
init_pos3d_median[i] = (df['{}X'.format(i)].median(), df['{}Y'.format(i)].median(), df['{}Z'.format(i)].median())
except:
init_pos3d[i] = (0,0,0)
init_pos3d_median[i] = (0,0,0)
try:
init_pos2d[i] = (round(df2['{}X'.format(i)].mean()), round(df2['{}Y'.format(i)].mean()))
init_pos2d_median[i] = (round(df2['{}X'.format(i)].median()), round(df2['{}Y'.format(i)].median()))
except:
init_pos2d[i] = (0,0)
init_pos2d_median[i] = (0,0)
try:
init_dis2d[i] = df3[i].mean()
init_dis2d_median[i] = df3[i].median()
except:
init_dis2d[i] = 0
init_dis2d_median[i] = 0
return init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median
# print(init_pos3d)
def find_position_angular_differences_right(init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median):
reachout3d_df = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position3d_new.csv")
reachout2d_df = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position2d_new.csv")
right_elbowX_diff = []
right_elbowY_diff = []
right_elbowZ_diff = []
right_hipX_diff = []
right_hipY_diff = []
right_hipZ_diff = []
right_shoulderX_diff = []
right_shoulderY_diff = []
right_shoulderZ_diff = []
shoulder_angle3d = []
######################2d##########################
right_elbowX_diff2d = []
right_elbowY_diff2d = []
right_hipX_diff2d = []
right_hipY_diff2d = []
right_shoulderX_diff2d = []
right_shoulderY_diff2d = []
shoulder_angle2d = []
for i in reachout3d_df.index:
p1 = np.array(reachout3d_df.loc[i, ['Right_elbowX', 'Right_elbowY', 'Right_elbowZ']])
p2 = np.array(reachout3d_df.loc[i, ['Right_shoulderX', 'Right_shoulderY', 'Right_shoulderZ']])
p3 = np.array(reachout3d_df.loc[i, ['Right_hipX', 'Right_hipY', 'Right_hipZ']])
right_elbowX_diff.append(reachout3d_df.loc[i, 'Right_elbowX'] - init_pos3d['Right_elbow'][0])
right_elbowY_diff.append(reachout3d_df.loc[i, 'Right_elbowY'] - init_pos3d['Right_elbow'][1])
right_elbowZ_diff.append(reachout3d_df.loc[i, 'Right_elbowZ'] - init_pos3d['Right_elbow'][2])
right_hipX_diff.append(reachout3d_df.loc[i, 'Right_hipX'] - init_pos3d['Right_hip'][0])
right_hipY_diff.append(reachout3d_df.loc[i, 'Right_hipY'] - init_pos3d['Right_hip'][1])
right_hipZ_diff.append(reachout3d_df.loc[i, 'Right_hipZ'] - init_pos3d['Right_hip'][2])
right_shoulderX_diff.append(reachout3d_df.loc[i, 'Right_shoulderX'] - init_pos3d['Right_shoulder'][0])
right_shoulderY_diff.append(reachout3d_df.loc[i, 'Right_shoulderY'] - init_pos3d['Right_shoulder'][1])
right_shoulderZ_diff.append(reachout3d_df.loc[i, 'Right_shoulderZ'] - init_pos3d['Right_shoulder'][2])
shoulder_angle3d.append(calculateAngle3d(p1, p2, p3))
p1 = np.array(reachout2d_df.loc[i, ['Right_elbowX', 'Right_elbowY']])
p2 = np.array(reachout2d_df.loc[i, ['Right_shoulderX', 'Right_shoulderY']])
p3 = np.array(reachout2d_df.loc[i, ['Right_hipX', 'Right_hipY']])
right_elbowX_diff2d.append(reachout2d_df.loc[i, 'Right_elbowX'] - init_pos2d['Right_elbow'][0])
right_elbowY_diff2d.append(reachout2d_df.loc[i, 'Right_elbowY'] - init_pos2d['Right_elbow'][1])
right_hipX_diff2d.append(reachout2d_df.loc[i, 'Right_hipX'] - init_pos2d['Right_hip'][0])
right_hipY_diff2d.append(reachout2d_df.loc[i, 'Right_hipY'] - init_pos2d['Right_hip'][1])
right_shoulderX_diff2d.append(reachout2d_df.loc[i, 'Right_shoulderX'] - init_pos2d['Right_shoulder'][0])
right_shoulderY_diff2d.append(reachout2d_df.loc[i, 'Right_shoulderY'] - init_pos2d['Right_shoulder'][1])
shoulder_angle2d.append(calculateAngle2d(p1, p2, p3))
# print(max(right_elbowX_diff))
right_list = list(zip(right_elbowX_diff,right_elbowY_diff,right_elbowZ_diff,right_hipX_diff,right_hipY_diff,right_hipZ_diff,right_shoulderX_diff,right_shoulderY_diff,right_shoulderZ_diff, shoulder_angle3d,
right_elbowX_diff2d,right_elbowY_diff2d,right_hipX_diff2d,right_hipY_diff2d,right_shoulderX_diff2d, right_shoulderY_diff2d,shoulder_angle2d))
return right_list
def find_position_angular_differences_left(init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median):
reachout3d_df = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position3d_new.csv")
reachout2d_df = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position2d_new.csv")
left_elbowX_diff = []
left_elbowY_diff = []
left_elbowZ_diff = []
left_hipX_diff = []
left_hipY_diff = []
left_hipZ_diff = []
left_shoulderX_diff = []
left_shoulderY_diff = []
left_shoulderZ_diff = []
shoulder_angle3d = []
######################2d##########################
left_elbowX_diff2d = []
left_elbowY_diff2d = []
left_hipX_diff2d = []
left_hipY_diff2d = []
left_shoulderX_diff2d = []
left_shoulderY_diff2d = []
shoulder_angle2d = []
for i in reachout3d_df.index:
p1 = np.array(reachout3d_df.loc[i, ['Left_elbowX', 'Left_elbowY', 'Left_elbowZ']])
p2 = np.array(reachout3d_df.loc[i, ['Left_shoulderX', 'Left_shoulderY', 'Left_shoulderZ']])
p3 = np.array(reachout3d_df.loc[i, ['Left_hipX', 'Left_hipY', 'Left_hipZ']])
left_elbowX_diff.append(reachout3d_df.loc[i, 'Left_elbowX'] - init_pos3d['Left_elbow'][0])
left_elbowY_diff.append(reachout3d_df.loc[i, 'Left_elbowY'] - init_pos3d['Left_elbow'][1])
left_elbowZ_diff.append(reachout3d_df.loc[i, 'Left_elbowZ'] - init_pos3d['Left_elbow'][2])
left_hipX_diff.append(reachout3d_df.loc[i, 'Left_hipX'] - init_pos3d['Left_hip'][0])
left_hipY_diff.append(reachout3d_df.loc[i, 'Left_hipY'] - init_pos3d['Left_hip'][1])
left_hipZ_diff.append(reachout3d_df.loc[i, 'Left_hipZ'] - init_pos3d['Left_hip'][2])
left_shoulderX_diff.append(reachout3d_df.loc[i, 'Left_shoulderX'] - init_pos3d['Left_shoulder'][0])
left_shoulderY_diff.append(reachout3d_df.loc[i, 'Left_shoulderY'] - init_pos3d['Left_shoulder'][1])
left_shoulderZ_diff.append(reachout3d_df.loc[i, 'Left_shoulderZ'] - init_pos3d['Left_shoulder'][2])
shoulder_angle3d.append(calculateAngle3d(p1, p2, p3))
p1 = np.array(reachout2d_df.loc[i, ['Left_elbowX', 'Left_elbowY']])
p2 = np.array(reachout2d_df.loc[i, ['Left_shoulderX', 'Left_shoulderY']])
p3 = np.array(reachout2d_df.loc[i, ['Left_hipX', 'Left_hipY']])
left_elbowX_diff2d.append(reachout2d_df.loc[i, 'Left_elbowX'] - init_pos2d['Left_elbow'][0])
left_elbowY_diff2d.append(reachout2d_df.loc[i, 'Left_elbowY'] - init_pos2d['Left_elbow'][1])
left_hipX_diff2d.append(reachout2d_df.loc[i, 'Left_hipX'] - init_pos2d['Left_hip'][0])
left_hipY_diff2d.append(reachout2d_df.loc[i, 'Left_hipY'] - init_pos2d['Left_hip'][1])
left_shoulderX_diff2d.append(reachout2d_df.loc[i, 'Left_shoulderX'] - init_pos2d['Left_shoulder'][0])
left_shoulderY_diff2d.append(reachout2d_df.loc[i, 'Left_shoulderY'] - init_pos2d['Left_shoulder'][1])
shoulder_angle2d.append(calculateAngle2d(p1, p2, p3))
# print(max(left_elbowX_diff))
left_list = list(zip(left_elbowX_diff,left_elbowY_diff,left_elbowZ_diff,left_hipX_diff,left_hipY_diff,left_hipZ_diff,left_shoulderX_diff,left_shoulderY_diff,left_shoulderZ_diff, shoulder_angle3d,
left_elbowX_diff2d,left_elbowY_diff2d,left_hipX_diff2d,left_hipY_diff2d,left_shoulderX_diff2d, left_shoulderY_diff2d,shoulder_angle2d))
return left_list
# init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median = get_init_pos_from_pkl()
init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median = get_init_pos_from_csv()
# right_list = find_position_angular_differences_right(init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median)
left_list = find_position_angular_differences_left(init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median)
# diff_df = pd.DataFrame(right_list, columns=['REX', 'REY', 'REZ', 'RHX', 'RHY', 'RHZ', 'RSX', 'RSY', 'RSZ', 'Angle','RE2dX', 'RE2dY', 'RH2dX', 'RH2dY', 'RS2dX', 'RS2dY', 'Angle2d'])
diff_df = pd.DataFrame(left_list, columns=['LEX', 'LEY', 'LEZ', 'LHX', 'LHY', 'LHZ', 'LSX', 'LSY', 'LSZ', 'Angle','LE2dX', 'LE2dY', 'LH2dX', 'LH2dY', 'LS2dX', 'LS2dY', 'Angle2d'])
diff_df.to_csv("diff_reachstepout_position3d_new.csv")
# #############################################
# p1 = np.array(init_pos3d['Right_elbow'])
# p2 = np.array(init_pos3d['Right_shoulder'])
# p3 = np.array(init_pos3d['Right_hip'])
# distance = calculateDistance(p1, p2)
# print(init_pos3d['Right_elbow'], p1, distance)
# angle3d = calculateAngle3d(p1, p2, p3)
# p1 = np.array(init_pos2d['Right_elbow'])
# p2 = np.array(init_pos2d['Right_shoulder'])
# p3 = np.array(init_pos2d['Right_hip'])
# angle2d = calculateAngle2d(p1, p2, p3)
# print(angle3d,angle2d) | [
"numpy.sqrt",
"pandas.read_csv",
"math.acos",
"math.sqrt",
"pickle.load",
"numpy.sum",
"numpy.array",
"pandas.DataFrame"
] | [((11698, 11876), 'pandas.DataFrame', 'pd.DataFrame', (['left_list'], {'columns': "['LEX', 'LEY', 'LEZ', 'LHX', 'LHY', 'LHZ', 'LSX', 'LSY', 'LSZ', 'Angle',\n 'LE2dX', 'LE2dY', 'LH2dX', 'LH2dY', 'LS2dX', 'LS2dY', 'Angle2d']"}), "(left_list, columns=['LEX', 'LEY', 'LEZ', 'LHX', 'LHY', 'LHZ',\n 'LSX', 'LSY', 'LSZ', 'Angle', 'LE2dX', 'LE2dY', 'LH2dX', 'LH2dY',\n 'LS2dX', 'LS2dY', 'Angle2d'])\n", (11710, 11876), True, 'import pandas as pd\n'), ((589, 621), 'math.sqrt', 'math.sqrt', (['(ABx * ABx + ABy * ABy)'], {}), '(ABx * ABx + ABy * ABy)\n', (598, 621), False, 'import math\n'), ((665, 697), 'math.sqrt', 'math.sqrt', (['(BCx * BCx + BCy * BCy)'], {}), '(BCx * BCx + BCy * BCy)\n', (674, 697), False, 'import math\n'), ((735, 786), 'math.acos', 'math.acos', (['(dotProduct / (magnitudeAB * magnitudeBC))'], {}), '(dotProduct / (magnitudeAB * magnitudeBC))\n', (744, 786), False, 'import math\n'), ((1532, 1562), 'numpy.sum', 'np.sum', (['((p1 - p2) ** 2)'], {'axis': '(0)'}), '((p1 - p2) ** 2, axis=0)\n', (1538, 1562), True, 'import numpy as np\n'), ((1570, 1591), 'numpy.sqrt', 'np.sqrt', (['squared_dist'], {}), '(squared_dist)\n', (1577, 1591), True, 'import numpy as np\n'), ((2403, 2480), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position3d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position3d_new.csv')\n", (2414, 2480), True, 'import pandas as pd\n'), ((2491, 2568), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position2d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position2d_new.csv')\n", (2502, 2568), True, 'import pandas as pd\n'), ((2579, 2656), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_distance2d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_distance2d_new.csv')\n", (2590, 2656), True, 'import pandas as pd\n'), ((4194, 4271), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position3d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position3d_new.csv')\n", (4205, 4271), True, 'import pandas as pd\n'), ((4292, 4369), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position2d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position2d_new.csv')\n", (4303, 4369), True, 'import pandas as pd\n'), ((7709, 7786), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position3d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position3d_new.csv')\n", (7720, 7786), True, 'import pandas as pd\n'), ((7807, 7884), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position2d_new.csv"""'], {}), "('C:\\\\Users\\\\Testing\\\\Downloads\\\\reachstepout_position2d_new.csv')\n", (7818, 7884), True, 'import pandas as pd\n'), ((1717, 1734), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1728, 1734), False, 'import pickle\n'), ((1823, 1840), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1834, 1840), False, 'import pickle\n'), ((1920, 1937), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1931, 1937), False, 'import pickle\n'), ((2026, 2043), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2037, 2043), False, 'import pickle\n'), ((2127, 2144), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2138, 2144), False, 'import pickle\n'), ((2237, 2254), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2248, 2254), False, 'import pickle\n'), ((4946, 5031), 'numpy.array', 'np.array', (["reachout3d_df.loc[i, ['Right_elbowX', 'Right_elbowY', 'Right_elbowZ']]"], {}), "(reachout3d_df.loc[i, ['Right_elbowX', 'Right_elbowY', 'Right_elbowZ']]\n )\n", (4954, 5031), True, 'import numpy as np\n'), ((5040, 5133), 'numpy.array', 'np.array', (["reachout3d_df.loc[i, ['Right_shoulderX', 'Right_shoulderY', 'Right_shoulderZ']]"], {}), "(reachout3d_df.loc[i, ['Right_shoulderX', 'Right_shoulderY',\n 'Right_shoulderZ']])\n", (5048, 5133), True, 'import numpy as np\n'), ((5143, 5217), 'numpy.array', 'np.array', (["reachout3d_df.loc[i, ['Right_hipX', 'Right_hipY', 'Right_hipZ']]"], {}), "(reachout3d_df.loc[i, ['Right_hipX', 'Right_hipY', 'Right_hipZ']])\n", (5151, 5217), True, 'import numpy as np\n'), ((6221, 6285), 'numpy.array', 'np.array', (["reachout2d_df.loc[i, ['Right_elbowX', 'Right_elbowY']]"], {}), "(reachout2d_df.loc[i, ['Right_elbowX', 'Right_elbowY']])\n", (6229, 6285), True, 'import numpy as np\n'), ((6299, 6369), 'numpy.array', 'np.array', (["reachout2d_df.loc[i, ['Right_shoulderX', 'Right_shoulderY']]"], {}), "(reachout2d_df.loc[i, ['Right_shoulderX', 'Right_shoulderY']])\n", (6307, 6369), True, 'import numpy as np\n'), ((6383, 6443), 'numpy.array', 'np.array', (["reachout2d_df.loc[i, ['Right_hipX', 'Right_hipY']]"], {}), "(reachout2d_df.loc[i, ['Right_hipX', 'Right_hipY']])\n", (6391, 6443), True, 'import numpy as np\n'), ((8446, 8523), 'numpy.array', 'np.array', (["reachout3d_df.loc[i, ['Left_elbowX', 'Left_elbowY', 'Left_elbowZ']]"], {}), "(reachout3d_df.loc[i, ['Left_elbowX', 'Left_elbowY', 'Left_elbowZ']])\n", (8454, 8523), True, 'import numpy as np\n'), ((8537, 8627), 'numpy.array', 'np.array', (["reachout3d_df.loc[i, ['Left_shoulderX', 'Left_shoulderY', 'Left_shoulderZ']]"], {}), "(reachout3d_df.loc[i, ['Left_shoulderX', 'Left_shoulderY',\n 'Left_shoulderZ']])\n", (8545, 8627), True, 'import numpy as np\n'), ((8637, 8708), 'numpy.array', 'np.array', (["reachout3d_df.loc[i, ['Left_hipX', 'Left_hipY', 'Left_hipZ']]"], {}), "(reachout3d_df.loc[i, ['Left_hipX', 'Left_hipY', 'Left_hipZ']])\n", (8645, 8708), True, 'import numpy as np\n'), ((9685, 9747), 'numpy.array', 'np.array', (["reachout2d_df.loc[i, ['Left_elbowX', 'Left_elbowY']]"], {}), "(reachout2d_df.loc[i, ['Left_elbowX', 'Left_elbowY']])\n", (9693, 9747), True, 'import numpy as np\n'), ((9761, 9829), 'numpy.array', 'np.array', (["reachout2d_df.loc[i, ['Left_shoulderX', 'Left_shoulderY']]"], {}), "(reachout2d_df.loc[i, ['Left_shoulderX', 'Left_shoulderY']])\n", (9769, 9829), True, 'import numpy as np\n'), ((9843, 9901), 'numpy.array', 'np.array', (["reachout2d_df.loc[i, ['Left_hipX', 'Left_hipY']]"], {}), "(reachout2d_df.loc[i, ['Left_hipX', 'Left_hipY']])\n", (9851, 9901), True, 'import numpy as np\n'), ((1375, 1411), 'math.sqrt', 'math.sqrt', (['(magnitudeAB * magnitudeBC)'], {}), '(magnitudeAB * magnitudeBC)\n', (1384, 1411), False, 'import math\n')] |
import itertools
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.linalg
import tqdm
import warnings
from mpl_toolkits.mplot3d import Axes3D
import graph
import optimization
import trait_matrix
# Computes V * exp_wt * U.
# By construction the exponential of our matrices are always real-valued.
def Expm(V, exp_wt, U):
return np.real(V.dot(np.diag(exp_wt)).dot(U))
def ReachabilityConstraint(parameters,
Y_desired,
A, X_init, Q,
specified_time=None,
mode=optimization.QUADRATIC_EXACT, margin=None):
# Sanity checks.
assert (mode in (optimization.QUADRATIC_EXACT, optimization.ABSOLUTE_EXACT)) == (margin is None)
# Prepare variable depending on whether t part of the parameters.
num_nodes = A.shape[0]
num_species = X_init.shape[1]
num_traits = Q.shape[1]
if specified_time is None:
t = parameters[-1]
num_parameters_i = (np.size(parameters) - 1) / num_species
else:
t = specified_time
num_parameters_i = np.size(parameters) / num_species
# Reshape adjacency matrix to make sure.
Adj = A.astype(float).reshape((num_nodes, num_nodes))
Adj_flatten = Adj.flatten().astype(bool) # Flatten boolean version.
# Loop through the species to compute the cost value.
# At the same time, prepare the different matrices.
Ks = [] # K_s
eigenvalues = [] # w
eigenvectors = [] # V.T
eigenvectors_inverse = [] # U.T
exponential_wt = [] # exp(eigenvalues * t).
x_matrix = [] # Pre-computed X matrices.
x0s = [] # Avoids reshaping.
qs = [] # Avoids reshaping.
xts = [] # Keeps x_s(t).
inside_norm = np.zeros((num_nodes, num_traits)) # Will hold the value prior to using the norm.
for s in range(num_species):
x0 = X_init[:, s].reshape((num_nodes, 1))
q = Q[s, :].reshape((1, num_traits))
x0s.append(x0)
qs.append(q)
k_ij = parameters[s * num_parameters_i:(s + 1) * num_parameters_i]
# Create K from individual k_{ij}.
K = np.zeros(Adj_flatten.shape)
K[Adj_flatten] = k_ij
K = K.reshape((num_nodes, num_nodes))
np.fill_diagonal(K, -np.sum(K, axis=0))
# Store K.
Ks.append(K)
# Perform eigen-decomposition to compute matrix exponential.
w, V = scipy.linalg.eig(K, right=True)
U = scipy.linalg.inv(V)
wt = w * t
exp_wt = np.exp(wt)
xt = Expm(V, exp_wt, U).dot(x0)
inside_norm += xt.dot(q)
# Store the transpose of these matrices for later use.
eigenvalues.append(w)
eigenvectors.append(V.T)
eigenvectors_inverse.append(U.T)
exponential_wt.append(exp_wt)
xts.append(xt)
# Pre-build X matrix.
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning) # We don't care about 0/0 on the diagonal.
X = np.subtract.outer(exp_wt, exp_wt) / (np.subtract.outer(wt, wt) + 1e-10)
np.fill_diagonal(X, exp_wt)
x_matrix.append(X)
inside_norm -= Y_desired
# Compute the final cost value depending on mode.
derivative_outer_norm = None # Holds the derivative of inside_norm (except the multiplication by (x0 * q)^T).
if mode == optimization.ABSOLUTE_AT_LEAST:
derivative_outer_norm = -inside_norm + margin
value = np.sum(np.maximum(derivative_outer_norm, 0))
derivative_outer_norm = -(derivative_outer_norm > 0).astype(float) # Keep only 1s for when it's larger than margin.
elif mode == optimization.ABSOLUTE_EXACT:
abs_inside_norm = np.abs(inside_norm)
index_zeros = abs_inside_norm < 1e-10
value = np.sum(np.abs(inside_norm))
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning) # We don't care about 0/0.
derivative_outer_norm = inside_norm / abs_inside_norm # Keep only 1s for when it's larger than 0 and -1s for when it's lower.
derivative_outer_norm[index_zeros] = 0 # Make sure we set 0/0 to 0.
elif mode == optimization.QUADRATIC_AT_LEAST:
derivative_outer_norm = -inside_norm + margin
value = np.sum(np.square(np.maximum(derivative_outer_norm, 0)))
index_negatives = derivative_outer_norm < 0
derivative_outer_norm *= -2.0
derivative_outer_norm[index_negatives] = 0 # Don't propagate gradient on negative values.
elif mode == optimization.QUADRATIC_EXACT:
value = np.sum(np.square(inside_norm))
derivative_outer_norm = 2.0 * inside_norm
return value
def StabilityConstraint(parameters,
Y_desired,
A, X_init, Q,
specified_time=None,
nu=1.0):
# Prepare variable depending on whether t part of the parameters.
num_nodes = A.shape[0]
num_species = X_init.shape[1]
num_traits = Q.shape[1]
if specified_time is None:
t = parameters[-1]
num_parameters_i = (np.size(parameters) - 1) / num_species
else:
t = specified_time
num_parameters_i = np.size(parameters) / num_species
# Reshape adjacency matrix to make sure.
Adj = A.astype(float).reshape((num_nodes, num_nodes))
Adj_flatten = Adj.flatten().astype(bool) # Flatten boolean version.
# Loop through the species to compute the cost value.
# At the same time, prepare the different matrices.
Ks = [] # K_s
eigenvalues = [] # w
eigenvectors = [] # V.T
eigenvectors_inverse = [] # U.T
exponential_wt = [] # exp(eigenvalues * t).
x_matrix = [] # Pre-computed X matrices.
x0s = [] # Avoids reshaping.
qs = [] # Avoids reshaping.
xts = [] # Keeps x_s(t).
inside_norm = np.zeros((num_nodes, num_traits)) # Will hold the value prior to using the norm.
for s in range(num_species):
x0 = X_init[:, s].reshape((num_nodes, 1))
q = Q[s, :].reshape((1, num_traits))
x0s.append(x0)
qs.append(q)
k_ij = parameters[s * num_parameters_i:(s + 1) * num_parameters_i]
# Create K from individual k_{ij}.
K = np.zeros(Adj_flatten.shape)
K[Adj_flatten] = k_ij
K = K.reshape((num_nodes, num_nodes))
np.fill_diagonal(K, -np.sum(K, axis=0))
# Store K.
Ks.append(K)
# Perform eigen-decomposition to compute matrix exponential.
w, V = scipy.linalg.eig(K, right=True)
U = scipy.linalg.inv(V)
wt = w * t
exp_wt = np.exp(wt)
xt = Expm(V, exp_wt, U).dot(x0)
# Store the transpose of these matrices for later use.
eigenvalues.append(w)
eigenvectors.append(V.T)
eigenvectors_inverse.append(U.T)
exponential_wt.append(exp_wt)
xts.append(xt)
# Pre-build X matrix.
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning) # We don't care about 0/0 on the diagonal.
X = np.subtract.outer(exp_wt, exp_wt) / (np.subtract.outer(wt, wt) + 1e-10)
np.fill_diagonal(X, exp_wt)
x_matrix.append(X)
# Forcing the steady state.
# We add a cost for keeping X(t) and X(t + nu) the same. We use the quadratic norm for this sub-cost.
# The larger beta and the larger nu, the closer to steady state.
value = 0.
for s in range(num_species):
# Compute exp of the eigenvalues of K * (t + nu).
wtdt = eigenvalues[s] * (t + nu)
exp_wtdt = np.exp(wtdt)
# Compute x_s(t) - x_s(t + nu) for that species.
# Note that since we store V.T and U.T, we do (U.T * D * V.T).T == V * D * U
inside_norm = xts[s] - Expm(eigenvectors_inverse[s], exp_wtdt, eigenvectors[s]).T.dot(x0s[s])
# Increment value.
value += np.sum(np.square(inside_norm))
return value
def BuildParameters(k1, k2):
return np.array([k1, k2])
if __name__ == '__main__':
num_nodes = 2 # DO NOT CHANGE.
num_traits = 1 # DO NOT CHANGE.
num_species = 1 # DO NOT CHANGE.
robots_per_species = 200
max_rate = 2.
t = 2.
num_points = 20
g = graph.Graph(num_nodes, fully_connected=True)
X_init = np.zeros((2, 1))
X_init[0, 0] = int(robots_per_species / 3. * 2.)
X_init[1, 0] = robots_per_species - X_init[0, 0]
Q = np.ones((1, 1))
X_final = np.empty_like(X_init)
X_final[0, 0] = int(robots_per_species / 3.)
X_final[1, 0] = robots_per_species - X_final[0, 0]
Y_desired = X_final.dot(Q)
A = g.AdjacencyMatrix()
K1, K2 = np.meshgrid(np.linspace(0, max_rate, num_points), np.linspace(0, max_rate, num_points))
Z1 = np.empty_like(K1)
Z2 = np.empty_like(K1)
for i, j in tqdm.tqdm(itertools.product(range(K1.shape[0]), range(K1.shape[1]))):
Z1[i, j] = ReachabilityConstraint(BuildParameters(K1[i, j], K2[i, j]), Y_desired,
A, X_init, Q, specified_time=t)
Z2[i, j] = StabilityConstraint(BuildParameters(K1[i, j], K2[i, j]), Y_desired,
A, X_init, Q, specified_time=t)
# Draw expected k1 vs. k2 line (that reaches steady state).
# Since we have 1 species with 1 trait, Y_desired is the expected steady state.
# So we want: y1 * k2 = y2 * k1 => k2 = y2/y1 * k2.
k1 = np.linspace(0, max_rate, num_points)
k2 = Y_desired[1] / Y_desired[0] * k1
index = np.logical_and(k1 < max_rate, k2 < max_rate)
k1 = k1[index]
k2 = k2[index]
z = np.ones_like(k1) * 0.1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(K1, K2, Z1, rstride=1, cstride=1, cmap='jet')
ax.plot(k1, k2, z, lw=2, c='r')
ax.set_title('Reach')
ax.set_xlim([0, max_rate])
ax.set_ylim([0, max_rate])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(K1, K2, Z2, rstride=1, cstride=1, cmap='jet')
ax.set_title('Stabilize')
ax.set_xlim([0, max_rate])
ax.set_ylim([0, max_rate])
plt.show()
| [
"numpy.array",
"numpy.exp",
"numpy.linspace",
"warnings.simplefilter",
"numpy.maximum",
"numpy.abs",
"numpy.ones",
"numpy.size",
"numpy.fill_diagonal",
"numpy.square",
"scipy.linalg.inv",
"matplotlib.pyplot.show",
"numpy.ones_like",
"numpy.subtract.outer",
"numpy.logical_and",
"warning... | [((1815, 1848), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_traits)'], {}), '((num_nodes, num_traits))\n', (1823, 1848), True, 'import numpy as np\n'), ((5772, 5805), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_traits)'], {}), '((num_nodes, num_traits))\n', (5780, 5805), True, 'import numpy as np\n'), ((7729, 7747), 'numpy.array', 'np.array', (['[k1, k2]'], {}), '([k1, k2])\n', (7737, 7747), True, 'import numpy as np\n'), ((7960, 8004), 'graph.Graph', 'graph.Graph', (['num_nodes'], {'fully_connected': '(True)'}), '(num_nodes, fully_connected=True)\n', (7971, 8004), False, 'import graph\n'), ((8016, 8032), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (8024, 8032), True, 'import numpy as np\n'), ((8141, 8156), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (8148, 8156), True, 'import numpy as np\n'), ((8169, 8190), 'numpy.empty_like', 'np.empty_like', (['X_init'], {}), '(X_init)\n', (8182, 8190), True, 'import numpy as np\n'), ((8453, 8470), 'numpy.empty_like', 'np.empty_like', (['K1'], {}), '(K1)\n', (8466, 8470), True, 'import numpy as np\n'), ((8478, 8495), 'numpy.empty_like', 'np.empty_like', (['K1'], {}), '(K1)\n', (8491, 8495), True, 'import numpy as np\n'), ((9100, 9136), 'numpy.linspace', 'np.linspace', (['(0)', 'max_rate', 'num_points'], {}), '(0, max_rate, num_points)\n', (9111, 9136), True, 'import numpy as np\n'), ((9187, 9231), 'numpy.logical_and', 'np.logical_and', (['(k1 < max_rate)', '(k2 < max_rate)'], {}), '(k1 < max_rate, k2 < max_rate)\n', (9201, 9231), True, 'import numpy as np\n'), ((9304, 9316), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9314, 9316), True, 'import matplotlib.pyplot as plt\n'), ((9551, 9563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9561, 9563), True, 'import matplotlib.pyplot as plt\n'), ((9762, 9772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9770, 9772), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2196), 'numpy.zeros', 'np.zeros', (['Adj_flatten.shape'], {}), '(Adj_flatten.shape)\n', (2177, 2196), True, 'import numpy as np\n'), ((2417, 2448), 'scipy.linalg.eig', 'scipy.linalg.eig', (['K'], {'right': '(True)'}), '(K, right=True)\n', (2433, 2448), False, 'import scipy\n'), ((2457, 2476), 'scipy.linalg.inv', 'scipy.linalg.inv', (['V'], {}), '(V)\n', (2473, 2476), False, 'import scipy\n'), ((2505, 2515), 'numpy.exp', 'np.exp', (['wt'], {}), '(wt)\n', (2511, 2515), True, 'import numpy as np\n'), ((3031, 3058), 'numpy.fill_diagonal', 'np.fill_diagonal', (['X', 'exp_wt'], {}), '(X, exp_wt)\n', (3047, 3058), True, 'import numpy as np\n'), ((6126, 6153), 'numpy.zeros', 'np.zeros', (['Adj_flatten.shape'], {}), '(Adj_flatten.shape)\n', (6134, 6153), True, 'import numpy as np\n'), ((6374, 6405), 'scipy.linalg.eig', 'scipy.linalg.eig', (['K'], {'right': '(True)'}), '(K, right=True)\n', (6390, 6405), False, 'import scipy\n'), ((6414, 6433), 'scipy.linalg.inv', 'scipy.linalg.inv', (['V'], {}), '(V)\n', (6430, 6433), False, 'import scipy\n'), ((6462, 6472), 'numpy.exp', 'np.exp', (['wt'], {}), '(wt)\n', (6468, 6472), True, 'import numpy as np\n'), ((6959, 6986), 'numpy.fill_diagonal', 'np.fill_diagonal', (['X', 'exp_wt'], {}), '(X, exp_wt)\n', (6975, 6986), True, 'import numpy as np\n'), ((7362, 7374), 'numpy.exp', 'np.exp', (['wtdt'], {}), '(wtdt)\n', (7368, 7374), True, 'import numpy as np\n'), ((8370, 8406), 'numpy.linspace', 'np.linspace', (['(0)', 'max_rate', 'num_points'], {}), '(0, max_rate, num_points)\n', (8381, 8406), True, 'import numpy as np\n'), ((8408, 8444), 'numpy.linspace', 'np.linspace', (['(0)', 'max_rate', 'num_points'], {}), '(0, max_rate, num_points)\n', (8419, 8444), True, 'import numpy as np\n'), ((9272, 9288), 'numpy.ones_like', 'np.ones_like', (['k1'], {}), '(k1)\n', (9284, 9288), True, 'import numpy as np\n'), ((1084, 1103), 'numpy.size', 'np.size', (['parameters'], {}), '(parameters)\n', (1091, 1103), True, 'import numpy as np\n'), ((2820, 2845), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2843, 2845), False, 'import warnings\n'), ((2853, 2900), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (2874, 2900), False, 'import warnings\n'), ((3389, 3425), 'numpy.maximum', 'np.maximum', (['derivative_outer_norm', '(0)'], {}), '(derivative_outer_norm, 0)\n', (3399, 3425), True, 'import numpy as np\n'), ((3614, 3633), 'numpy.abs', 'np.abs', (['inside_norm'], {}), '(inside_norm)\n', (3620, 3633), True, 'import numpy as np\n'), ((5041, 5060), 'numpy.size', 'np.size', (['parameters'], {}), '(parameters)\n', (5048, 5060), True, 'import numpy as np\n'), ((6748, 6773), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6771, 6773), False, 'import warnings\n'), ((6781, 6828), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (6802, 6828), False, 'import warnings\n'), ((7650, 7672), 'numpy.square', 'np.square', (['inside_norm'], {}), '(inside_norm)\n', (7659, 7672), True, 'import numpy as np\n'), ((991, 1010), 'numpy.size', 'np.size', (['parameters'], {}), '(parameters)\n', (998, 1010), True, 'import numpy as np\n'), ((2290, 2307), 'numpy.sum', 'np.sum', (['K'], {'axis': '(0)'}), '(K, axis=0)\n', (2296, 2307), True, 'import numpy as np\n'), ((2955, 2988), 'numpy.subtract.outer', 'np.subtract.outer', (['exp_wt', 'exp_wt'], {}), '(exp_wt, exp_wt)\n', (2972, 2988), True, 'import numpy as np\n'), ((3695, 3714), 'numpy.abs', 'np.abs', (['inside_norm'], {}), '(inside_norm)\n', (3701, 3714), True, 'import numpy as np\n'), ((3725, 3750), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3748, 3750), False, 'import warnings\n'), ((3758, 3805), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (3779, 3805), False, 'import warnings\n'), ((4948, 4967), 'numpy.size', 'np.size', (['parameters'], {}), '(parameters)\n', (4955, 4967), True, 'import numpy as np\n'), ((6247, 6264), 'numpy.sum', 'np.sum', (['K'], {'axis': '(0)'}), '(K, axis=0)\n', (6253, 6264), True, 'import numpy as np\n'), ((6883, 6916), 'numpy.subtract.outer', 'np.subtract.outer', (['exp_wt', 'exp_wt'], {}), '(exp_wt, exp_wt)\n', (6900, 6916), True, 'import numpy as np\n'), ((376, 391), 'numpy.diag', 'np.diag', (['exp_wt'], {}), '(exp_wt)\n', (383, 391), True, 'import numpy as np\n'), ((2992, 3017), 'numpy.subtract.outer', 'np.subtract.outer', (['wt', 'wt'], {}), '(wt, wt)\n', (3009, 3017), True, 'import numpy as np\n'), ((6920, 6945), 'numpy.subtract.outer', 'np.subtract.outer', (['wt', 'wt'], {}), '(wt, wt)\n', (6937, 6945), True, 'import numpy as np\n'), ((4167, 4203), 'numpy.maximum', 'np.maximum', (['derivative_outer_norm', '(0)'], {}), '(derivative_outer_norm, 0)\n', (4177, 4203), True, 'import numpy as np\n'), ((4447, 4469), 'numpy.square', 'np.square', (['inside_norm'], {}), '(inside_norm)\n', (4456, 4469), True, 'import numpy as np\n')] |
import os
import numpy as np
import warnings
import scipy.constants as sc
import optical_models as om
class Metal:
'''
Outline for a class describing metals.
'''
def __init__(self, model,modelparams):
if model=='DrudeSommerfeld':
if np.size(modelparams)!=2:
raise Exception('modelparams must have exactly two values:\n np.array([plasma frequency, loss rate]')
self.PlasmaFrequency = modelparams[0]
self.Gamma = modelparams[1]
self.Permittivity = lambda kp, w: om.DrudeSommerfeld(w,self.PlasmaFrequency,
self.Gamma)
else:
raise Exception('Permittivity model %s is unavailable' % (model))
class Dielectric:
def __init__(self):
pass
def Permittivity(self,kpar,omega):
pass
class Gold(Metal):
def __init__(self):
self.modelparams = np.array([13.8e15, 1.075e14])
Metal.__init__(self,'DrudeSommerfeld',self.modelparams)
class Aluminum(Metal):
def __init__(self):
'''
References
----------
[1] Palik
'''
self.modelparams = np.array([1.747e16,7.596e13])
Metal.__init__(self,'DrudeSommerfeld',self.modelparams)
class SiliconCarbide:
def __init__(self):
'''
From Spitzer et al. oscillator model
omegaL: 969 cm-1 = 1.827e14 rad/s
omegaT: 793 cm-1 = 1.495e14 rad/s
Gamma: 4.76 cm-1 = 0.9e12 rad/s
'''
self.epsinf = 6.7
self.modelparams = np.array([1.827e14,1.495e14,0.9e12])
self.wspp = 1.787e14 # Surface plasma frequency
def Permittivity(self,q,omega):
'''
Permittivity of SiC as given by Spitzer et al.
'''
num = ( self.modelparams[0]**2 - self.modelparams[1]**2 )
den = self.modelparams[1]**2-omega**2-1j*self.modelparams[2]*omega
eps = self.epsinf * (1 + num/den)
return eps
class HexagonalBoronNitride(Dielectric):
def __init__(self,model):
'''
References
----------
[1] <NAME>., <NAME>, and <NAME>. 1966.
“Normal Modes in Hexagonal Boron Nitride.”
Physical Review 146 (2): 543–47.
https://doi.org/10.1103/PhysRev.146.543.
[2] <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>,
<NAME>, <NAME>, et al. 2015.
“Highly Confined Low-Loss Plasmons in Graphene–Boron
Nitride Heterostructures.” Nature Materials 14 (4): 421–25.
https://doi.org/10.1038/nmat4169.
[3] Cai, Yongqing, <NAME>, <NAME>, <NAME>,
and <NAME>. 2007. “Infrared Reflectance Spectrum of BN
Calculated from First Principles.”
Solid State Communications 141 (5): 262–66.
https://doi.org/10.1016/j.ssc.2006.10.040.
[3] Brar, <NAME>., <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>. 2014.
“Hybrid Surface-Phonon-Plasmon Polariton Modes in Graphene/Monolayer
h-BN Heterostructures.” Nano Letters 14 (7): 3876–80.
https://doi.org/10.1021/nl501096s.
'''
if model=='Cai':
pass
if model=='Cai:clean':
self.epsinf_xy = 4.87
self.epsinf_z = 2.95
# Strength
self.s_xy = [1.83]
self.s_z = [0.61]
# Frequency
self.w_xy = [0.1701*sc.elementary_charge /sc.hbar]
self.w_z = [0.0925*sc.elementary_charge /sc.hbar]
# Loss
self.g_xy = [0.00087*sc.elementary_charge/sc.hbar]
self.g_z = [0.00025*sc.elementary_charge/sc.hbar]
def PermittivityInPlane(self,omega):
modes = [[self.w_xy[0],self.g_xy[0],self.s_xy[0]]]
eps = om.Lorentzian(omega,self.epsinf_xy,modes)
return eps
def PermittivityOutOfPlane(self,omega):
pass
def Permittivity(self,omega):
epsx = self.PermittivityInPlane(omega)
epsy = epsx
epsz = self.PermittivityOutOfPlane(omega)
return np.diag(epsx,epsy,epsz)
####################
# Useful Functions #
####################
def download_material_data(url,material,filename):
'''
Download data from a website, i.e. refractiveindex.info
'''
from urllib import request
savepath = os.path.join(os.environ['DATA'],'materials',material,filename)
request.urlretrieve(url,savepath)
def get_material_data_files(material):
'''
'''
path = os.path.join(os.environ['DATA'],'materials',material)
return os.listdir(path)
def load_material_data(material,filename):
'''
Loads a CSV file of data to a numpy array
'''
path = os.path.join(os.environ['DATA'],'materials',material,filename)
with open(path) as f:
data = np.loadtxt(f,delimiter=',',skiprows=1)
return data
| [
"os.listdir",
"urllib.request.urlretrieve",
"numpy.size",
"os.path.join",
"numpy.diag",
"numpy.array",
"numpy.loadtxt",
"optical_models.Lorentzian",
"optical_models.DrudeSommerfeld"
] | [((3720, 3785), 'os.path.join', 'os.path.join', (["os.environ['DATA']", '"""materials"""', 'material', 'filename'], {}), "(os.environ['DATA'], 'materials', material, filename)\n", (3732, 3785), False, 'import os\n'), ((3784, 3818), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'savepath'], {}), '(url, savepath)\n', (3803, 3818), False, 'from urllib import request\n'), ((3878, 3933), 'os.path.join', 'os.path.join', (["os.environ['DATA']", '"""materials"""', 'material'], {}), "(os.environ['DATA'], 'materials', material)\n", (3890, 3933), False, 'import os\n'), ((3941, 3957), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3951, 3957), False, 'import os\n'), ((4064, 4129), 'os.path.join', 'os.path.join', (["os.environ['DATA']", '"""materials"""', 'material', 'filename'], {}), "(os.environ['DATA'], 'materials', material, filename)\n", (4076, 4129), False, 'import os\n'), ((797, 836), 'numpy.array', 'np.array', (['[1.38e+16, 107500000000000.0]'], {}), '([1.38e+16, 107500000000000.0])\n', (805, 836), True, 'import numpy as np\n'), ((1006, 1045), 'numpy.array', 'np.array', (['[1.747e+16, 75960000000000.0]'], {}), '([1.747e+16, 75960000000000.0])\n', (1014, 1045), True, 'import numpy as np\n'), ((1342, 1406), 'numpy.array', 'np.array', (['[182700000000000.0, 149500000000000.0, 900000000000.0]'], {}), '([182700000000000.0, 149500000000000.0, 900000000000.0])\n', (1350, 1406), True, 'import numpy as np\n'), ((3225, 3268), 'optical_models.Lorentzian', 'om.Lorentzian', (['omega', 'self.epsinf_xy', 'modes'], {}), '(omega, self.epsinf_xy, modes)\n', (3238, 3268), True, 'import optical_models as om\n'), ((3471, 3496), 'numpy.diag', 'np.diag', (['epsx', 'epsy', 'epsz'], {}), '(epsx, epsy, epsz)\n', (3478, 3496), True, 'import numpy as np\n'), ((4161, 4201), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(f, delimiter=',', skiprows=1)\n", (4171, 4201), True, 'import numpy as np\n'), ((249, 269), 'numpy.size', 'np.size', (['modelparams'], {}), '(modelparams)\n', (256, 269), True, 'import numpy as np\n'), ((490, 545), 'optical_models.DrudeSommerfeld', 'om.DrudeSommerfeld', (['w', 'self.PlasmaFrequency', 'self.Gamma'], {}), '(w, self.PlasmaFrequency, self.Gamma)\n', (508, 545), True, 'import optical_models as om\n')] |
import unittest
from test_case import TestCase
import pinocchio as pin
from pinocchio.utils import rand, zero
import numpy as np
# common quantities for all tests.
# They correspond to the default values of the arguments, and they need to stay this way
r_coeff = 0.0
inv_damping = 0.0
update_kinematics = True
class TestDynamicsBindings(TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoidRandom()
self.data = self.model.createData()
qmax = np.matrix(np.full((self.model.nv,1),np.pi))
self.q = pin.randomConfiguration(self.model,-qmax,qmax)
self.v = rand(self.model.nv)
self.tau = rand(self.model.nv)
self.v0 = zero(self.model.nv)
self.tau0 = zero(self.model.nv)
self.tolerance = 1e-9
# we compute J on a different self.data
self.J = pin.jointJacobian(self.model,self.model.createData(),self.q,self.model.getJointId('lleg6_joint'),pin.ReferenceFrame.LOCAL,True)
self.gamma = zero(6)
def test_forwardDynamics7(self):
self.model.gravity = pin.Motion.Zero()
ddq = pin.forwardDynamics(self.model,self.data,self.q,self.v0,self.tau0,self.J,self.gamma)
self.assertLess(np.linalg.norm(ddq), self.tolerance)
def test_forwardDynamics8(self):
self.model.gravity = pin.Motion.Zero()
ddq = pin.forwardDynamics(self.model,self.data,self.q,self.v0,self.tau0,self.J,self.gamma,r_coeff)
self.assertLess(np.linalg.norm(ddq), self.tolerance)
def test_forwardDynamics9(self):
self.model.gravity = pin.Motion.Zero()
ddq = pin.forwardDynamics(self.model,self.data,self.q,self.v0,self.tau0,self.J,self.gamma,r_coeff,update_kinematics)
self.assertLess(np.linalg.norm(ddq), self.tolerance)
def test_forwardDynamics789(self):
data7 = self.data
data8 = self.model.createData()
data9 = self.model.createData()
ddq7 = pin.forwardDynamics(self.model,data7,self.q,self.v,self.tau,self.J,self.gamma)
ddq8 = pin.forwardDynamics(self.model,data8,self.q,self.v,self.tau,self.J,self.gamma,r_coeff)
ddq9 = pin.forwardDynamics(self.model,data9,self.q,self.v,self.tau,self.J,self.gamma,r_coeff,update_kinematics)
self.assertTrue((ddq7==ddq8).all())
self.assertTrue((ddq7==ddq9).all())
self.assertTrue((ddq8==ddq9).all())
def test_impulseDynamics5(self):
vnext = pin.impulseDynamics(self.model,self.data,self.q,self.v0,self.J)
self.assertLess(np.linalg.norm(vnext), self.tolerance)
def test_impulseDynamics6(self):
vnext = pin.impulseDynamics(self.model,self.data,self.q,self.v0,self.J,inv_damping)
self.assertLess(np.linalg.norm(vnext), self.tolerance)
def test_impulseDynamics7(self):
vnext = pin.impulseDynamics(self.model,self.data,self.q,self.v0,self.J,inv_damping,update_kinematics)
self.assertLess(np.linalg.norm(vnext), self.tolerance)
def test_impulseDynamics567(self):
data5 = self.data
data6 = self.model.createData()
data7 = self.model.createData()
vnext5 = pin.impulseDynamics(self.model,data5,self.q,self.v,self.J)
vnext6 = pin.impulseDynamics(self.model,data6,self.q,self.v,self.J,inv_damping)
vnext7 = pin.impulseDynamics(self.model,data7,self.q,self.v,self.J,inv_damping,update_kinematics)
self.assertTrue((vnext5==vnext6).all())
self.assertTrue((vnext5==vnext7).all())
self.assertTrue((vnext6==vnext7).all())
if __name__ == '__main__':
unittest.main()
| [
"pinocchio.utils.rand",
"pinocchio.Motion.Zero",
"pinocchio.buildSampleModelHumanoidRandom",
"pinocchio.impulseDynamics",
"pinocchio.randomConfiguration",
"pinocchio.utils.zero",
"numpy.linalg.norm",
"unittest.main",
"numpy.full",
"pinocchio.forwardDynamics"
] | [((3547, 3562), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3560, 3562), False, 'import unittest\n'), ((394, 430), 'pinocchio.buildSampleModelHumanoidRandom', 'pin.buildSampleModelHumanoidRandom', ([], {}), '()\n', (428, 430), True, 'import pinocchio as pin\n'), ((552, 600), 'pinocchio.randomConfiguration', 'pin.randomConfiguration', (['self.model', '(-qmax)', 'qmax'], {}), '(self.model, -qmax, qmax)\n', (575, 600), True, 'import pinocchio as pin\n'), ((616, 635), 'pinocchio.utils.rand', 'rand', (['self.model.nv'], {}), '(self.model.nv)\n', (620, 635), False, 'from pinocchio.utils import rand, zero\n'), ((655, 674), 'pinocchio.utils.rand', 'rand', (['self.model.nv'], {}), '(self.model.nv)\n', (659, 674), False, 'from pinocchio.utils import rand, zero\n'), ((694, 713), 'pinocchio.utils.zero', 'zero', (['self.model.nv'], {}), '(self.model.nv)\n', (698, 713), False, 'from pinocchio.utils import rand, zero\n'), ((734, 753), 'pinocchio.utils.zero', 'zero', (['self.model.nv'], {}), '(self.model.nv)\n', (738, 753), False, 'from pinocchio.utils import rand, zero\n'), ((999, 1006), 'pinocchio.utils.zero', 'zero', (['(6)'], {}), '(6)\n', (1003, 1006), False, 'from pinocchio.utils import rand, zero\n'), ((1074, 1091), 'pinocchio.Motion.Zero', 'pin.Motion.Zero', ([], {}), '()\n', (1089, 1091), True, 'import pinocchio as pin\n'), ((1106, 1201), 'pinocchio.forwardDynamics', 'pin.forwardDynamics', (['self.model', 'self.data', 'self.q', 'self.v0', 'self.tau0', 'self.J', 'self.gamma'], {}), '(self.model, self.data, self.q, self.v0, self.tau0, self\n .J, self.gamma)\n', (1125, 1201), True, 'import pinocchio as pin\n'), ((1319, 1336), 'pinocchio.Motion.Zero', 'pin.Motion.Zero', ([], {}), '()\n', (1334, 1336), True, 'import pinocchio as pin\n'), ((1351, 1455), 'pinocchio.forwardDynamics', 'pin.forwardDynamics', (['self.model', 'self.data', 'self.q', 'self.v0', 'self.tau0', 'self.J', 'self.gamma', 'r_coeff'], {}), '(self.model, self.data, self.q, self.v0, self.tau0, self\n .J, self.gamma, r_coeff)\n', (1370, 1455), True, 'import pinocchio as pin\n'), ((1572, 1589), 'pinocchio.Motion.Zero', 'pin.Motion.Zero', ([], {}), '()\n', (1587, 1589), True, 'import pinocchio as pin\n'), ((1604, 1727), 'pinocchio.forwardDynamics', 'pin.forwardDynamics', (['self.model', 'self.data', 'self.q', 'self.v0', 'self.tau0', 'self.J', 'self.gamma', 'r_coeff', 'update_kinematics'], {}), '(self.model, self.data, self.q, self.v0, self.tau0, self\n .J, self.gamma, r_coeff, update_kinematics)\n', (1623, 1727), True, 'import pinocchio as pin\n'), ((1937, 2025), 'pinocchio.forwardDynamics', 'pin.forwardDynamics', (['self.model', 'data7', 'self.q', 'self.v', 'self.tau', 'self.J', 'self.gamma'], {}), '(self.model, data7, self.q, self.v, self.tau, self.J,\n self.gamma)\n', (1956, 2025), True, 'import pinocchio as pin\n'), ((2031, 2128), 'pinocchio.forwardDynamics', 'pin.forwardDynamics', (['self.model', 'data8', 'self.q', 'self.v', 'self.tau', 'self.J', 'self.gamma', 'r_coeff'], {}), '(self.model, data8, self.q, self.v, self.tau, self.J,\n self.gamma, r_coeff)\n', (2050, 2128), True, 'import pinocchio as pin\n'), ((2133, 2249), 'pinocchio.forwardDynamics', 'pin.forwardDynamics', (['self.model', 'data9', 'self.q', 'self.v', 'self.tau', 'self.J', 'self.gamma', 'r_coeff', 'update_kinematics'], {}), '(self.model, data9, self.q, self.v, self.tau, self.J,\n self.gamma, r_coeff, update_kinematics)\n', (2152, 2249), True, 'import pinocchio as pin\n'), ((2424, 2491), 'pinocchio.impulseDynamics', 'pin.impulseDynamics', (['self.model', 'self.data', 'self.q', 'self.v0', 'self.J'], {}), '(self.model, self.data, self.q, self.v0, self.J)\n', (2443, 2491), True, 'import pinocchio as pin\n'), ((2605, 2690), 'pinocchio.impulseDynamics', 'pin.impulseDynamics', (['self.model', 'self.data', 'self.q', 'self.v0', 'self.J', 'inv_damping'], {}), '(self.model, self.data, self.q, self.v0, self.J, inv_damping\n )\n', (2624, 2690), True, 'import pinocchio as pin\n'), ((2798, 2901), 'pinocchio.impulseDynamics', 'pin.impulseDynamics', (['self.model', 'self.data', 'self.q', 'self.v0', 'self.J', 'inv_damping', 'update_kinematics'], {}), '(self.model, self.data, self.q, self.v0, self.J,\n inv_damping, update_kinematics)\n', (2817, 2901), True, 'import pinocchio as pin\n'), ((3118, 3180), 'pinocchio.impulseDynamics', 'pin.impulseDynamics', (['self.model', 'data5', 'self.q', 'self.v', 'self.J'], {}), '(self.model, data5, self.q, self.v, self.J)\n', (3137, 3180), True, 'import pinocchio as pin\n'), ((3194, 3269), 'pinocchio.impulseDynamics', 'pin.impulseDynamics', (['self.model', 'data6', 'self.q', 'self.v', 'self.J', 'inv_damping'], {}), '(self.model, data6, self.q, self.v, self.J, inv_damping)\n', (3213, 3269), True, 'import pinocchio as pin\n'), ((3282, 3380), 'pinocchio.impulseDynamics', 'pin.impulseDynamics', (['self.model', 'data7', 'self.q', 'self.v', 'self.J', 'inv_damping', 'update_kinematics'], {}), '(self.model, data7, self.q, self.v, self.J, inv_damping,\n update_kinematics)\n', (3301, 3380), True, 'import pinocchio as pin\n'), ((501, 535), 'numpy.full', 'np.full', (['(self.model.nv, 1)', 'np.pi'], {}), '((self.model.nv, 1), np.pi)\n', (508, 535), True, 'import numpy as np\n'), ((1215, 1234), 'numpy.linalg.norm', 'np.linalg.norm', (['ddq'], {}), '(ddq)\n', (1229, 1234), True, 'import numpy as np\n'), ((1468, 1487), 'numpy.linalg.norm', 'np.linalg.norm', (['ddq'], {}), '(ddq)\n', (1482, 1487), True, 'import numpy as np\n'), ((1739, 1758), 'numpy.linalg.norm', 'np.linalg.norm', (['ddq'], {}), '(ddq)\n', (1753, 1758), True, 'import numpy as np\n'), ((2512, 2533), 'numpy.linalg.norm', 'np.linalg.norm', (['vnext'], {}), '(vnext)\n', (2526, 2533), True, 'import numpy as np\n'), ((2705, 2726), 'numpy.linalg.norm', 'np.linalg.norm', (['vnext'], {}), '(vnext)\n', (2719, 2726), True, 'import numpy as np\n'), ((2916, 2937), 'numpy.linalg.norm', 'np.linalg.norm', (['vnext'], {}), '(vnext)\n', (2930, 2937), True, 'import numpy as np\n')] |
"""
Provide a general object detector interface for SMOT
"""
# pylint: disable=unused-wildcard-import,wildcard-import
import logging
import numpy as np
import mxnet as mx
from gluoncv.data import COCODetection
from .utils import mxnet_frame_preprocessing, timeit_context
from .utils import remap_bboxes as _remap_bboxes
from .presets import *
ssd_base_models = {'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_300_vgg16_atrous_custom': ssd_300_vgg16_atrous_custom,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_custom': ssd_512_vgg16_atrous_custom,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet18_v1_coco': ssd_512_resnet18_v1_coco,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet50_v1_custom': ssd_512_resnet50_v1_custom,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1.0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1.0_coco': ssd_512_mobilenet1_0_coco,
'ssd_300_mobilenet1.0_lite_coco': ssd_300_mobilenet1_0_lite_coco,
'ssd_512_mobilenet1.0_custom': ssd_512_mobilenet1_0_custom,
'ssd_300_mobilenet0.25_voc': ssd_300_mobilenet0_25_voc,
'ssd_300_mobilenet0.25_coco': ssd_300_mobilenet0_25_coco,
'ssd_300_mobilenet0.25_custom': ssd_300_mobilenet0_25_custom,
'ssd_300_resnet34_v1b_voc': ssd_300_resnet34_v1b_voc,
'ssd_300_resnet34_v1b_coco': ssd_300_resnet34_v1b_coco,
'ssd_300_resnet34_v1b_custom': ssd_300_resnet34_v1b_custom,}
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,unused-argument
def get_net(classes, model_name="", use_pretrained=False, param_path="",
ctx=None, **kwargs):
assert model_name in ssd_base_models, "the model name is not supported, where the supported models are {}".format(ssd_base_models.keys())
if use_pretrained:
# use off-the-shelf GluonCV pretrained SSD models
net = ssd_base_models[model_name](pretrained=use_pretrained,
pretrained_base=False, ctx=ctx, **kwargs)
else:
# use finetuned model weights or customized trained model weights
net = ssd_base_models[model_name](pretrained_base=False, ctx=ctx, **kwargs)
assert param_path != '', "Please provide the pretrained model weights if you are not using GluonCV pretrained detectors."
net.load_parameters(param_path, ctx=ctx)
net.hybridize()
return net
def _remap_keypoints(keypoints, padded_w, padded_h, expand, data_shape, ratio):
"""
Remap bboxes in (x0, y0, x1, y1) format into the input image space
Parameters
----------
bboxes
padded_w
padded_h
expand
Returns
-------
"""
keypoints[:, 0::2] *= padded_w / (data_shape * ratio)
keypoints[:, 1::2] *= padded_h / data_shape
keypoints[:, 0::2] -= expand[0]
keypoints[:, 1::2] -= expand[1]
return keypoints
class GeneralDetector:
def __init__(self, gpu_id,
aspect_ratio=1.,
data_shape=512,
model_name="",
use_pretrained=False,
param_path=""):
self.ctx = mx.gpu(gpu_id)
self.net = get_net(classes=COCODetection.CLASSES,
ctx=self.ctx,
model_name=model_name,
use_pretrained=use_pretrained,
param_path=param_path)
self.anchor_tensor = None
self._anchor_image_shape = (1, 1)
self._anchor_num = 1
self.mean_mx = mx.nd.array(np.array([0.485, 0.456, 0.406])).as_in_context(self.ctx)
self.std_mx = mx.nd.array(np.array([0.229, 0.224, 0.225])).as_in_context(self.ctx)
self.ratio = aspect_ratio
self.data_shape = data_shape
def run_detection(self, image, tracking_box_indices, tracking_box_weights, tracking_box_classes):
"""
Parameters
----------
image: RGB images
Returns
-------
"""
# pylint: disable=logging-format-interpolation
with timeit_context("preprocess"):
data_tensor, padded_w, padded_h, expand = mxnet_frame_preprocessing(image, self.data_shape, self.ratio,
self.mean_mx, self.std_mx, self.ctx)
logging.info("input tensor shape {}".format(data_tensor.shape))
mx.nd.waitall()
with timeit_context("network"):
real_tracking_indices = tracking_box_indices + tracking_box_classes * self._anchor_num
ids, scores, detection_bboxes, detection_anchor_indices, tracking_results, anchors = self.net(
data_tensor.as_in_context(self.ctx), real_tracking_indices, tracking_box_weights)
tracking_bboxes = tracking_results[:, [2, 3, 4, 5, 1]]
detection_bboxes = _remap_bboxes(detection_bboxes[0, :, :],
padded_w, padded_h, expand,
self.data_shape, self.ratio)
tracking_bboxes = _remap_bboxes(tracking_bboxes,
padded_w, padded_h, expand,
self.data_shape, self.ratio)
mx.nd.waitall()
# set anchors if needed
if self._anchor_image_shape != (image.shape[:2]):
self._anchor_image_shape = image.shape[:2]
# initialize the anchor tensor for assignment
self.anchor_tensor = anchors[0, :, :]
half_w = self.anchor_tensor[:, 2] / 2
half_h = self.anchor_tensor[:, 3] / 2
center_x = self.anchor_tensor[:, 0].copy()
center_y = self.anchor_tensor[:, 1].copy()
# anchors are in the original format of (center_x, center_y, w, h)
# translate them to (x0, y0, x1, y1)
self.anchor_tensor[:, 0] = center_x - half_w
self.anchor_tensor[:, 1] = center_y - half_h
self.anchor_tensor[:, 2] = center_x + half_w
self.anchor_tensor[:, 3] = center_y + half_h
self.anchor_tensor = _remap_bboxes(self.anchor_tensor, padded_w, padded_h, expand,
self.data_shape, self.ratio)
self._anchor_num = self.anchor_tensor.shape[0]
return ids[0], scores[0], detection_bboxes, tracking_bboxes, detection_anchor_indices[0].asnumpy()
| [
"numpy.array",
"mxnet.gpu",
"mxnet.nd.waitall"
] | [((3795, 3809), 'mxnet.gpu', 'mx.gpu', (['gpu_id'], {}), '(gpu_id)\n', (3801, 3809), True, 'import mxnet as mx\n'), ((5075, 5090), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (5088, 5090), True, 'import mxnet as mx\n'), ((5942, 5957), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (5955, 5957), True, 'import mxnet as mx\n'), ((4210, 4241), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (4218, 4241), True, 'import numpy as np\n'), ((4301, 4332), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4309, 4332), True, 'import numpy as np\n')] |
from winsound import PlaySound, SND_FILENAME as FILE_FLAG
import numpy as np
import scipy.io.wavfile as wav
# Input: inputSignal: Eingangssignal
# nDeleteFreq: Anzahl der zu löschenden Frequenzen
# fftLength: Länge der FFT
#
# Output: outputSignal: Ausgangssignal
#
# Nützliche Befehle/Collections/Libs: numpy, ftt, scipy.io.wavfile,...
def m_fft(inputSignal, nDeleteFreq, fftLength):
outputSignal = []
signals_block = np.array_split(inputSignal, fftLength)
for signal in signals_block:
freq = np.fft.fft(signal)
abs_arr = np.abs(freq)
for counter in range(0, nDeleteFreq):
current_min = np.min(abs_arr)
index_to_delete = np.where(abs_arr == current_min)[0][0]
abs_arr[index_to_delete] = np.inf
freq[index_to_delete] = 0
outputSignal.append(np.fft.ifft(freq))
return np.concatenate(outputSignal)
def play_sound(filename):
PlaySound(filename, FILE_FLAG)
def main():
play_sound("ceremony.wav")
rate, input_signal = wav.read("ceremony.wav")
print(rate)
print(input_signal)
output = m_fft(input_signal, 20, 500)
print(output)
n_output = np.real(output).astype(np.int16)
print(n_output)
wav.write("new_ceremony.wav", rate, n_output)
play_sound('new_ceremony.wav')
if __name__ == "__main__":
main()
# from wave import open
# from winsound import PlaySound, SND_FILENAME as FILE_FLAG
# from struct import pack, unpack
# from numpy import fft, floor
# import matplotlib.pyplot as plt
#
# BLOCK_SIZE = 512
# params = ()
# frames = []
# filter_frames = []
#
#
# def read_file(filename):
# global params, frames
#
# wave_file = open(filename)
# params = wave_file.getparams()
# # print(params)
#
# for i in range(wave_file.getnframes()):
# frame = wave_file.readframes(1)
# frames.append(unpack('<h', frame)[0])
# # print(wave_file.readframes(0))
# # print(len(frames))
#
# wave_file.close()
#
#
# def fourier():
# global frames
#
# number_blocks = int((len(frames) / BLOCK_SIZE))
#
# for block in range(number_blocks):
# fourier = fft.fft(frames[block * BLOCK_SIZE: (block + 1) * BLOCK_SIZE])
#
# delete_minmum(fourier)
#
# ifourier(fourier)
#
#
# def delete_minmum(fourier):
# # for i in range(BLOCK_SIZE):
# # minimum = min(fourier)
# for k in range(len(fourier)):
# if fourier[k] == min(fourier):
# fourier[k] = 0
# break
#
# return fourier
#
#
# def ifourier(fourier):
# global filter_frames
#
# ifourier = fft.ifft(fourier)
# # print(len(ifourier))
#
# for i in range(len(ifourier)):
# tmp = pack('<i', int(floor(ifourier[i].real)))
# filter_frames.append(tmp)
#
#
# def write_file(filename):
# global params
# wave_file = open(filename, 'w')
# wave_file.setparams(params)
#
# for filter_frame in filter_frames:
# wave_file.writeframesraw(filter_frame)
#
# wave_file.close()
#
#
# def play_sound(filename):
# PlaySound(filename, FILE_FLAG)
#
#
# if __name__ == "__main__":
# # filename = 'ceremony.wav'
# # output = 'output.wav'
# filename = 'itu_male1.wav'
# output = 'output_male1.wav'
#
# print("Spiele Ursprungs .wav File ab")
# play_sound(filename)
#
# read_file(filename)
# print("Datei eingelesen")
#
# print("Fouriertransformation")
# fourier()
#
# print("Schreibe neues .wav File")
# write_file(output)
#
# print("Spiele neues .wav File ab")
# play_sound(output)
#
# # Datei einlesen, in Blöcke einteilen, (sortieren), Minmum suchen, auf 0 setzen, wieder zusammensetzen
| [
"numpy.abs",
"numpy.where",
"numpy.fft.fft",
"numpy.array_split",
"numpy.real",
"scipy.io.wavfile.read",
"scipy.io.wavfile.write",
"numpy.concatenate",
"numpy.min",
"winsound.PlaySound",
"numpy.fft.ifft"
] | [((484, 522), 'numpy.array_split', 'np.array_split', (['inputSignal', 'fftLength'], {}), '(inputSignal, fftLength)\n', (498, 522), True, 'import numpy as np\n'), ((921, 949), 'numpy.concatenate', 'np.concatenate', (['outputSignal'], {}), '(outputSignal)\n', (935, 949), True, 'import numpy as np\n'), ((982, 1012), 'winsound.PlaySound', 'PlaySound', (['filename', 'FILE_FLAG'], {}), '(filename, FILE_FLAG)\n', (991, 1012), False, 'from winsound import PlaySound, SND_FILENAME as FILE_FLAG\n'), ((1083, 1107), 'scipy.io.wavfile.read', 'wav.read', (['"""ceremony.wav"""'], {}), "('ceremony.wav')\n", (1091, 1107), True, 'import scipy.io.wavfile as wav\n'), ((1280, 1325), 'scipy.io.wavfile.write', 'wav.write', (['"""new_ceremony.wav"""', 'rate', 'n_output'], {}), "('new_ceremony.wav', rate, n_output)\n", (1289, 1325), True, 'import scipy.io.wavfile as wav\n'), ((571, 589), 'numpy.fft.fft', 'np.fft.fft', (['signal'], {}), '(signal)\n', (581, 589), True, 'import numpy as np\n'), ((608, 620), 'numpy.abs', 'np.abs', (['freq'], {}), '(freq)\n', (614, 620), True, 'import numpy as np\n'), ((693, 708), 'numpy.min', 'np.min', (['abs_arr'], {}), '(abs_arr)\n', (699, 708), True, 'import numpy as np\n'), ((890, 907), 'numpy.fft.ifft', 'np.fft.ifft', (['freq'], {}), '(freq)\n', (901, 907), True, 'import numpy as np\n'), ((1223, 1238), 'numpy.real', 'np.real', (['output'], {}), '(output)\n', (1230, 1238), True, 'import numpy as np\n'), ((739, 771), 'numpy.where', 'np.where', (['(abs_arr == current_min)'], {}), '(abs_arr == current_min)\n', (747, 771), True, 'import numpy as np\n')] |
from sklearn.datasets import load_boston
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#from icecream import ic
import random
from functools import reduce
from collections import defaultdict
from nn import Placeholder#导入
# sns.heatmap(dataframe.corr())
# x, y ; x with 13 dimensions
# sns.heatmap(dataframe.corr())
# plt.subplots(1, 2, figsize=(20, 20))
# plt.scatter(dataframe['RM'], dataframe['price'])
# plt.scatter(dataframe['LSTAT'], dataframe['price'])
# plt.show()
#介绍KNN
def k_nearest_neighbors(train_rm, train_lstat, train_y, query_rm, query_lstat, topn=3):
""""
KNN model
--
input is the rm and lstat value of a perspective house
return: predicted house price
"""
elements = [(r, ls, y) for r, ls, y in zip(train_rm, train_lstat, train_y)]
def distance(e): return (e[0] - query_rm) ** 2 + (e[1] - query_lstat) ** 2
neighbors = sorted(elements, key=distance, reverse=True)[:topn]
return np.mean([y for r, ls, y in neighbors])
# => rm -> price
#有关计算数学公式
def random_linear(x):
w, b = np.random.normal(scale=10, size=(1, 2))[0]
return linear(x, w, b)
def linear(x, w, b):
return w * x + b
def loss(yhat, y):
return np.mean((yhat - y) ** 2)
def partial_w(y, yhat, x):
return -2 * np.mean((y - yhat) * x)
def partial_b(y, yhat):
return -2 * np.mean(y - yhat)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def complexity_function_fitting():
sub_x = np.linspace(-5, 5)
random_i = np.random.randint(0, len(sub_x))
left, right = sub_x[:random_i], sub_x[random_i:]
output = np.concatenate((
random_linear(sigmoid(random_linear(left))),
random_linear(sigmoid(random_linear(right)))
))
plt.plot(sub_x, output)
def topological_sort(graph: dict):#拓扑排序 1. topological sorting
"""
:graph: {
'node': [adjacent1, adjacent2, .. adjacentN],
}
:return: the topological sorting for this graph
"""
while graph:#图不为空时循环执行
all_inputs = reduce(lambda a, b: a + b, map(list,graph.values()))#list与graph进行相加,reduce() 函数会对参数序列中元素进行累积 此函数功能是将value单独分离
# print(all_inputs)
'''
map() 会根据提供的函数对指定序列做映射。
>>> map(square, [1,2,3,4,5]) # 计算列表各个元素的平方
[1, 4, 9, 16, 25]
>>> map(lambda x: x ** 2, [1, 2, 3, 4, 5]) # 使用 lambda 匿名函数
[1, 4, 9, 16, 25]
'''
need_remove = set(graph.keys()) - set(all_inputs)#输入减去输出 = 有输出无输入值
if need_remove: # len(need_remove) > 0 #将节点进行遍历输出,并将其删除输出了的节点
node = random.choice(list(need_remove))#随机选择节点
# print(node)#如b3
exit_node = graph[node][0]#随机选择对应计算节点
# print(exit_node)#如f5
graph.pop(node)
# print(graph)#b3:f5被移除
yield node# yield用于返回多个值,本式存到node数组中
if not graph:
yield exit_node #解决最后一个单独节点问题
'''
return:在程序函数中返回某个值,返回之后函数不在继续执行,彻底结束。
yield: 带有yield的函数是一个迭代器,函数返回某个值时,会停留在某个位置,返回函数值后,会在前面停留的位置继续执行, 直到程序结束
'''
else:
raise TypeError('the graph contain a cycle, the computing graph need acyclic graph')#有环图错误
def convert_feed_dict_to_graph(feed_dict: dict):
computing_graph = defaultdict(list)#defaultdict(list),会构建一个默认value为list的字典,
"""
from collections import defaultdict
result = defaultdict(list)
data = [("p", 1), ("p", 2), ("p", 3),
("h", 1), ("h", 2), ("h", 3)]
for (key, value) in data:
result[key].append(value)
print(result)#defaultdict(<class 'list'>, {'p': [1, 2, 3], 'h': [1, 2, 3]})
"""
nodes = list(feed_dict.keys())
print(feed_dict.keys())
print(feed_dict.values())
while nodes: #循环把节点连接起来,形成图
#node里没有f1,f2...计算节点
n = nodes.pop(0)#删除表中内容
print(n)
if n in computing_graph: continue #代替方案,直接初始化f1,f2,f3,f4,不需要通过append引出,需要有序???
if isinstance(n, Placeholder):#判断两个类型是否相同推荐使用 isinstance()
n.value = feed_dict[n]
for m in n.outputs:
computing_graph[n].append(m)#列表末尾添加新的对象.append() computing_graph[n]是defaultdict类型,写法result[key].append(value)直接载入,与传统数组不同
# print(n.outputs)
# print(computing_graph)
nodes.append(m)#连接,计算节点从这里被append进去
print(nodes)
return computing_graph#所有包括计算节点连成的图会被返回
def forward_and_backward(graph):
for node in graph:#正向排序输出
node.forward()
for node in graph[::-1]:#反向排序输出
node.backward()
def optimize(nodes, lr):
for node in nodes:
if node.trainable:
node.value = node.value - node.loss_gradient[node] * lr
# remains
"""
[done] 1. topological sorting
2. using topological sorting implement auto-grade
3. create a neural network framework
4. convert single-dimension version to multiply version
5. distribute neural network framework to internet (pip)
"""
if __name__ == '__main__':
data = load_boston()
x_data = data['data']
y = data['target']
desc = data['DESCR']
# x, y ; x with 13 dimensions
# let computer could predict house price using some features automatically
# correlation analysis
dataframe = pd.DataFrame(x_data)
dataframe.columns = data['feature_names']
dataframe['price'] = y
rm = dataframe['RM']
lstat = dataframe['LSTAT']
y = dataframe['price']
complex_graph = {#键值对
'x': ['f1', 'f2'],
'b1': ['f1'],
'w1': ['f1'],
'f1': ['f3'],
'f3': ['f4', 'f5'],
'f2': ['f5'],
'w2': ['f2'],
'b2':['f2'],
'f5': ['loss'],
'f4': ['loss'],
'y': ['loss']
}
ic(list(topological_sort(complex_graph)))
| [
"numpy.random.normal",
"numpy.mean",
"matplotlib.pyplot.plot",
"sklearn.datasets.load_boston",
"numpy.exp",
"numpy.linspace",
"collections.defaultdict",
"pandas.DataFrame"
] | [((1010, 1048), 'numpy.mean', 'np.mean', (['[y for r, ls, y in neighbors]'], {}), '([y for r, ls, y in neighbors])\n', (1017, 1048), True, 'import numpy as np\n'), ((1274, 1298), 'numpy.mean', 'np.mean', (['((yhat - y) ** 2)'], {}), '((yhat - y) ** 2)\n', (1281, 1298), True, 'import numpy as np\n'), ((1543, 1561), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)'], {}), '(-5, 5)\n', (1554, 1561), True, 'import numpy as np\n'), ((1821, 1844), 'matplotlib.pyplot.plot', 'plt.plot', (['sub_x', 'output'], {}), '(sub_x, output)\n', (1829, 1844), True, 'import matplotlib.pyplot as plt\n'), ((3387, 3404), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3398, 3404), False, 'from collections import defaultdict\n'), ((5186, 5199), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (5197, 5199), False, 'from sklearn.datasets import load_boston\n'), ((5445, 5465), 'pandas.DataFrame', 'pd.DataFrame', (['x_data'], {}), '(x_data)\n', (5457, 5465), True, 'import pandas as pd\n'), ((1119, 1158), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(10)', 'size': '(1, 2)'}), '(scale=10, size=(1, 2))\n', (1135, 1158), True, 'import numpy as np\n'), ((1348, 1371), 'numpy.mean', 'np.mean', (['((y - yhat) * x)'], {}), '((y - yhat) * x)\n', (1355, 1371), True, 'import numpy as np\n'), ((1418, 1435), 'numpy.mean', 'np.mean', (['(y - yhat)'], {}), '(y - yhat)\n', (1425, 1435), True, 'import numpy as np\n'), ((1478, 1488), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1484, 1488), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def drawMoons(names, xpositions, xlim=[-500,500], labels=True):
'''
Draw a plot of the positions of moons relative to Jupiter.
This function requires two input arguments. They should
both be lists, and they should be the same size as each
other. They are:
moons -- a 1-dimensional list of moon names
xpositions -- a 1-dimensional list of moon positions (in arcsec)
For example:
names = ['Io', 'Europa', 'Ganymede', 'Callisto']
xpositions = [-20, 40, 80, -160]
drawMoons(names, xpositions)
(this should display a plot of the moon positions)
Options keyword arguments
xlim = [-500,500]
This defines the x values of the left and
right edges of the plotting range to be included.
labels = True
If the function is called with labels=True,
then display the names of the moons.
If the function is called with labels=False,
then do not display the names of the moons.
'''
# since we're plotting only 1D positions, we make up y-values
ypositions = np.zeros_like(xpositions)
# we create a new figure, and set its size
plt.figure(figsize=(10,0.5))
# we plot the moons in their positions
plt.plot(xpositions, ypositions,
marker = '.',
linewidth=0,
color='black')
# if desired, we add text labels to all the moons
if labels:
for x, y, n in zip(xpositions, ypositions, names):
plt.text(x, y+0.5, n, ha='center', va='bottom', size=9)
# plot Jupiter in the center
plt.plot(0,0, marker='o', markersize=20, markerfacecolor='none', markeredgecolor='black')
# set the x and y limits of the plot
plt.xlim(*xlim)
plt.ylim(-1,1)
# turn off all axis labels (and the box around the plot)
plt.axis('off')
# make sure the plot shows to the screen
plt.show()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] | [((1177, 1202), 'numpy.zeros_like', 'np.zeros_like', (['xpositions'], {}), '(xpositions)\n', (1190, 1202), True, 'import numpy as np\n'), ((1255, 1284), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 0.5)'}), '(figsize=(10, 0.5))\n', (1265, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1404), 'matplotlib.pyplot.plot', 'plt.plot', (['xpositions', 'ypositions'], {'marker': '"""."""', 'linewidth': '(0)', 'color': '"""black"""'}), "(xpositions, ypositions, marker='.', linewidth=0, color='black')\n", (1340, 1404), True, 'import matplotlib.pyplot as plt\n'), ((1690, 1784), 'matplotlib.pyplot.plot', 'plt.plot', (['(0)', '(0)'], {'marker': '"""o"""', 'markersize': '(20)', 'markerfacecolor': '"""none"""', 'markeredgecolor': '"""black"""'}), "(0, 0, marker='o', markersize=20, markerfacecolor='none',\n markeredgecolor='black')\n", (1698, 1784), True, 'import matplotlib.pyplot as plt\n'), ((1826, 1841), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (1834, 1841), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1861), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1854, 1861), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1942), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1935, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2001, 2003), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1653), 'matplotlib.pyplot.text', 'plt.text', (['x', '(y + 0.5)', 'n'], {'ha': '"""center"""', 'va': '"""bottom"""', 'size': '(9)'}), "(x, y + 0.5, n, ha='center', va='bottom', size=9)\n", (1604, 1653), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import numpy as np
import pandas as pd
import os
import sys
sys.path.insert(0, os.path.abspath('../../../'))
from mastml.plots import Scatter, Histogram
class TestPlots(unittest.TestCase):
def test_scatter(self):
X = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
Scatter().plot_predicted_vs_true(y_true=X,
y_pred=y,
savepath=os.getcwd(),
x_label='TEST_scatter',
data_type='test',)
self.assertTrue(os.path.exists('parity_plot_test.png'))
os.remove('parity_plot_test.png')
return
def test_histogram(self):
X = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
Histogram().plot_histogram(df=X,
savepath=os.getcwd(),
file_name='TEST_hist',
x_label='TEST_hist')
self.assertTrue(os.path.exists('TEST_hist.png'))
self.assertTrue(os.path.exists('TEST_hist.xlsx'))
self.assertTrue(os.path.exists('TEST_hist_statistics.xlsx'))
os.remove('TEST_hist.png')
os.remove('TEST_hist.xlsx')
os.remove('TEST_hist_statistics.xlsx')
return
def test_residual_histogram(self):
X = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
Histogram().plot_residuals_histogram(y_true=X,
y_pred=y,
savepath=os.getcwd())
self.assertTrue(os.path.exists('residual_histogram.png'))
self.assertTrue(os.path.exists('residual_histogram.xlsx'))
self.assertTrue(os.path.exists('residual_histogram_statistics.xlsx'))
os.remove('residual_histogram.png')
os.remove('residual_histogram.xlsx')
os.remove('residual_histogram_statistics.xlsx')
return
if __name__ == '__main__':
unittest.main()
| [
"os.path.exists",
"unittest.main",
"os.getcwd",
"numpy.random.uniform",
"os.path.abspath",
"mastml.plots.Histogram",
"mastml.plots.Scatter",
"os.remove"
] | [((95, 123), 'os.path.abspath', 'os.path.abspath', (['"""../../../"""'], {}), "('../../../')\n", (110, 123), False, 'import os\n'), ((2191, 2206), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2204, 2206), False, 'import unittest\n'), ((743, 776), 'os.remove', 'os.remove', (['"""parity_plot_test.png"""'], {}), "('parity_plot_test.png')\n", (752, 776), False, 'import os\n'), ((1300, 1326), 'os.remove', 'os.remove', (['"""TEST_hist.png"""'], {}), "('TEST_hist.png')\n", (1309, 1326), False, 'import os\n'), ((1335, 1362), 'os.remove', 'os.remove', (['"""TEST_hist.xlsx"""'], {}), "('TEST_hist.xlsx')\n", (1344, 1362), False, 'import os\n'), ((1371, 1409), 'os.remove', 'os.remove', (['"""TEST_hist_statistics.xlsx"""'], {}), "('TEST_hist_statistics.xlsx')\n", (1380, 1409), False, 'import os\n'), ((2006, 2041), 'os.remove', 'os.remove', (['"""residual_histogram.png"""'], {}), "('residual_histogram.png')\n", (2015, 2041), False, 'import os\n'), ((2050, 2086), 'os.remove', 'os.remove', (['"""residual_histogram.xlsx"""'], {}), "('residual_histogram.xlsx')\n", (2059, 2086), False, 'import os\n'), ((2095, 2142), 'os.remove', 'os.remove', (['"""residual_histogram_statistics.xlsx"""'], {}), "('residual_histogram_statistics.xlsx')\n", (2104, 2142), False, 'import os\n'), ((258, 306), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(100)', 'size': '(50,)'}), '(low=0.0, high=100, size=(50,))\n', (275, 306), True, 'import numpy as np\n'), ((330, 378), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(100)', 'size': '(50,)'}), '(low=0.0, high=100, size=(50,))\n', (347, 378), True, 'import numpy as np\n'), ((695, 733), 'os.path.exists', 'os.path.exists', (['"""parity_plot_test.png"""'], {}), "('parity_plot_test.png')\n", (709, 733), False, 'import os\n'), ((845, 893), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(100)', 'size': '(50,)'}), '(low=0.0, high=100, size=(50,))\n', (862, 893), True, 'import numpy as np\n'), ((1132, 1163), 'os.path.exists', 'os.path.exists', (['"""TEST_hist.png"""'], {}), "('TEST_hist.png')\n", (1146, 1163), False, 'import os\n'), ((1189, 1221), 'os.path.exists', 'os.path.exists', (['"""TEST_hist.xlsx"""'], {}), "('TEST_hist.xlsx')\n", (1203, 1221), False, 'import os\n'), ((1247, 1290), 'os.path.exists', 'os.path.exists', (['"""TEST_hist_statistics.xlsx"""'], {}), "('TEST_hist_statistics.xlsx')\n", (1261, 1290), False, 'import os\n'), ((1487, 1535), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(100)', 'size': '(50,)'}), '(low=0.0, high=100, size=(50,))\n', (1504, 1535), True, 'import numpy as np\n'), ((1559, 1607), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(100)', 'size': '(50,)'}), '(low=0.0, high=100, size=(50,))\n', (1576, 1607), True, 'import numpy as np\n'), ((1811, 1851), 'os.path.exists', 'os.path.exists', (['"""residual_histogram.png"""'], {}), "('residual_histogram.png')\n", (1825, 1851), False, 'import os\n'), ((1877, 1918), 'os.path.exists', 'os.path.exists', (['"""residual_histogram.xlsx"""'], {}), "('residual_histogram.xlsx')\n", (1891, 1918), False, 'import os\n'), ((1944, 1996), 'os.path.exists', 'os.path.exists', (['"""residual_histogram_statistics.xlsx"""'], {}), "('residual_histogram_statistics.xlsx')\n", (1958, 1996), False, 'import os\n'), ((389, 398), 'mastml.plots.Scatter', 'Scatter', ([], {}), '()\n', (396, 398), False, 'from mastml.plots import Scatter, Histogram\n'), ((533, 544), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (542, 544), False, 'import os\n'), ((904, 915), 'mastml.plots.Histogram', 'Histogram', ([], {}), '()\n', (913, 915), False, 'from mastml.plots import Scatter, Histogram\n'), ((981, 992), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (990, 992), False, 'import os\n'), ((1618, 1629), 'mastml.plots.Histogram', 'Histogram', ([], {}), '()\n', (1627, 1629), False, 'from mastml.plots import Scatter, Histogram\n'), ((1774, 1785), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1783, 1785), False, 'import os\n')] |
#!/usr/bin/env python
"""Pre-schedule DDF sequences
"""
# pylint: disable=no-member
# imports
import sys
import logging
from argparse import ArgumentParser
import yaml
import numpy as np
import pandas as pd
import astropy.coordinates
import astropy.units as u
import lsst.sims.utils
# constants
# exception classes
# interface functions
def schedule_all(mag_limit, location, config):
"""Schedule one field on one band.
Parameters
----------
m5 : `pandas.DataFrame`
Has a multilevel index with the following levels:
field_name : `str`
the field name
band : `str`
the band
Includes the following columns:
mjd : `float`
MJD of candidate time
m5 : `float`
5-sigma limiting magnitude of the field if observed at that time
`location` : `astropy.coordinates.EarthLocation`
the location of the observatory
config : `dict`
Configuration parameters
Return
------
schedule : `pandas.DataFrame`
includes three columns:
mjd : `float`
the best time at which to start the sequence of exposures
why : `str`
an indicator of why this sequence was scheduled
night : `int`
the MJD of the night (at midnight) on which the sequence is to be scheduled
sequence : `str`
which sequence this is
"""
seq_schedules = []
for seq_config in config["sequences"]:
logger.info(f'Scheduling {seq_config["label"]}')
seq_schedule = schedule_sequence(mag_limit, location, seq_config)
seq_schedule["sequence"] = seq_config["label"]
logger.info(f'Computing scheduled for {seq_config["label"]}')
mag_limit["scheduled"] = _compute_scheduled(
mag_limit, seq_schedule, seq_config["sequence_duration"]
)
seq_schedules.append(seq_schedule)
logger.info("Compiling full schedule")
full_schedule = (
pd.concat(seq_schedules).sort_values("mjd").set_index("mjd", drop=False)
)
return full_schedule
def schedule_sequence(mag_limit, location, config):
"""Schedule one set of sequences.
Parameters
----------
m5 : `pandas.DataFrame`
Has a multilevel index with the following levels:
field_name : `str`
the field name
band : `str`
the band
Includes the following columns:
mjd : `float`
MJD of candidate time
m5 : `float`
5-sigma limiting magnitude of the field if observed at that time
`location` : `astropy.coordinates.EarthLocation`
the location of the observatory
config : `dict`
Configuration parameters, with the following contents:
field_name : `str`
the name of the field to schedule
mag_lim_band : `str`
the name of the filter to schedule
sequence_duration : `astropy.units.Quantity`
the duration of a block of one sequence of exposures
caninocal_gap : `astropy.units.Quantity`
the desired time between sequences of exposures
min_gap: `astropy.units.Quantity`
the minimum gap for which "bridge" exposures should be scheduled
max_gap: `astropy.units.Quantity`
the target maximum time between sequences of exposures
season_gap : `astropy.units.Quantity`
the gap time greater than which no bridges should be attempted
mag_limit : `dict` of `str`: `float`
target magnitude limits in each band
Return
------
schedule : `pandas.DataFrame`
includes three columns:
mjd : `float`
the best time at which to start the sequence of exposures
why : `str`
an indicator of why this sequence was scheduled
night : `int`
the MJD of the night (at midnight) on which the sequence is to be scheduled
"""
# pylint: disable=too-many-locals
these_m5 = (
mag_limit.sort_index()
.loc[(config["field_name"], config["mag_lim_band"])]
.sort_index()
.copy()
)
min_m5 = _compute_rolling_m5(these_m5, config["sequence_duration"]).set_index(
"mjd", drop=False
)
min_m5["night_mjd"] = compute_night_mjd(min_m5["mjd"], location)
bridge_nights = _find_bridge_nights(mag_limit, location, config)
bridge_gap = config["bridge_gap"]
maintain_cadence = config["maintain_cadence_in_gap"]
scheduled_sequences = []
for night_mjd in range(min_m5.night_mjd.min(), min_m5.night_mjd.max()):
if night_mjd in bridge_nights["night_before_mjd"].values:
why = "pregap"
attempt_tonight = True
force_tonight = True
elif bridge_gap and (night_mjd in bridge_nights["bridge_night_mjd"].values):
why = "bridge"
attempt_tonight = True
force_tonight = True
elif night_mjd in bridge_nights["night_after_mjd"].values:
why = "postgap"
attempt_tonight = True
force_tonight = True
elif len(scheduled_sequences) == 0:
# We are just starting
why = "start"
attempt_tonight = True
force_tonight = False
elif (night_mjd - scheduled_sequences[-1]["night_mjd"]) * u.day >= config[
"canonical_gap"
]:
why = "cadence"
attempt_tonight = True
force_tonight = maintain_cadence
else:
continue
if not attempt_tonight:
continue
candidate_times = min_m5.query(f"night_mjd == {night_mjd}")
if len(candidate_times) < 1:
assert maintain_cadence or not force_tonight
continue
best_time = min_m5.loc[candidate_times["m5"].idxmax()]
if isinstance(best_time, pd.DataFrame):
best_time = best_time.sort_values("count", ascending=True).iloc[-1]
if (not force_tonight) and (best_time.m5 < config["mag_limit"]):
continue
if best_time.m5 < config["gap_mag_limit"]:
continue
scheduled_sequences.append({"mjd": best_time.mjd, "why": why})
scheduled_sequences[-1]["night_mjd"] = compute_night_mjd(
best_time.mjd, location
)
schedule = pd.DataFrame(scheduled_sequences)
return schedule
def compute_night_mjd(mjd, location):
"""Convert the floating point mjd to the integer local Julian date for the night.
Parameters
----------
mjd : `float`, `pandas.Series`, or `numpy.ndarray`
Returns
-------
jd : `int`, `pandas.Series`, or `numpy.ndarray`
"""
# add longitude to get into the local timezone,
# round to find the nearest midnight
night_mjd = np.round(mjd + (location.lon.deg / 360.0)).astype(int)
return night_mjd
def read_config(fname):
"""Read m5 configuration file
Parameters
----------
fname: `str`
The name of the file to read configuration from.
Return
------
config: `dict`
Dictionary of configuration values
"""
logger.debug("Reading configuration from %s", fname)
with open(fname, "r") as config_file:
config = yaml.load(config_file.read(), Loader=yaml.FullLoader)
# Apply units
for seq_config in config["sequences"]:
seq_config["sequence_duration"] = u.Quantity(
seq_config["sequence_duration"]
).to(u.second)
seq_config["max_gap"] = u.Quantity(seq_config["max_gap"]).to(u.day)
seq_config["min_gap"] = u.Quantity(seq_config["min_gap"]).to(u.day)
seq_config["season_gap"] = u.Quantity(seq_config["season_gap"]).to(u.day)
seq_config["canonical_gap"] = u.Quantity(seq_config["canonical_gap"]).to(u.day)
site_name = "LSST" if config["site_name"] == "LSST" else config["site_name"]
site = lsst.sims.utils.Site(site_name)
config["location"] = astropy.coordinates.EarthLocation(
lat=site.latitude, lon=site.longitude, height=site.height
)
return config
# classes
# internal functions & classes
def _infer_time_sampling(mag_limit):
mjds = pd.Series(mag_limit["mjd"].unique()).sort_values()
timestep_duration = ((mjds - mjds.shift(1)).median() * u.day).to(u.minute)
return timestep_duration
def _compute_rolling_m5(mag_limit, roll_window):
mag_limit = mag_limit.query("not scheduled").copy().sort_index()
mag_limit["datetime"] = pd.to_datetime(
mag_limit.mjd + 2400000.5, origin="julian", unit="D"
)
mag_limit["counter"] = 1
mag_limit.set_index("datetime", inplace=True, drop=False)
roll_seconds = roll_window.to("second").value
mag_limit_roll = mag_limit.rolling(f"{int(roll_seconds)}s")
min_mag_limit = mag_limit_roll[["mjd", "moon_angle", "night", "m5"]].min()
min_mag_limit["start_datetime"] = pd.to_datetime(
min_mag_limit.mjd + 2400000.5, origin="julian", unit="D"
)
min_mag_limit["count"] = mag_limit_roll["counter"].sum().astype(int)
min_mag_limit = (
min_mag_limit.reset_index()
.rename(columns={"datetime": "end_datetime"})
.set_index("start_datetime", drop=False)
)
min_mag_limit["m5"] = min_mag_limit["m5"].fillna(-np.inf)
# Infer which windows do not have a full set of samples, and toss them
sample_dt = _infer_time_sampling(mag_limit)
expected_samples = int(np.floor((roll_window.to(sample_dt.unit) / sample_dt).value))
min_mag_limit.query(
f"(count == {expected_samples}) or (count == {expected_samples+1})",
inplace=True,
)
min_mag_limit.sort_values("count", ascending=False).groupby(
level="start_datetime"
).first()
return min_mag_limit
def _find_gaps(mjds, min_gap, season_gap, location, night_epoch_mjd=0):
gaps = pd.DataFrame({"start": np.unique(np.sort(mjds))})
gaps["end"] = gaps.start.shift(-1)
gaps.dropna(inplace=True)
gaps["duration"] = gaps["end"] - gaps["start"]
gaps["mjd"] = 0.5 * (gaps["end"] + gaps["start"])
gaps["night_before"] = compute_night_mjd(gaps["start"], location) - night_epoch_mjd
gaps["night_after"] = compute_night_mjd(gaps["end"], location) - night_epoch_mjd
gaps["gap_nights"] = gaps["night_after"] - gaps["night_before"]
gaps.query(
f"({min_gap} <= gap_nights) and ({season_gap} > gap_nights)", inplace=True
)
gaps.set_index("mjd", inplace=True)
gaps.sort_index(inplace=True)
return gaps
def _find_bridge_nights(all_mag_limit, location, config):
oversampled_mag_limit = (
all_mag_limit.sort_index()
.loc[(config["field_name"], config["mag_lim_band"])]
.sort_index()
.copy()
)
mag_limit = _compute_rolling_m5(oversampled_mag_limit, config["sequence_duration"])
good_mag_limit = mag_limit.query(f'm5>{config["mag_limit"]}')
night_epoch_mjd = (
compute_night_mjd(mag_limit.iloc[0].mjd, location) - mag_limit.iloc[0].night
)
gaps = _find_gaps(
good_mag_limit.mjd,
config["min_gap"].to(u.day).value,
config["season_gap"].to(u.day).value,
location,
night_epoch_mjd,
)
gaps["bridge_mjd"] = np.nan
gaps["has_bridge"] = False
max_gap = config["max_gap"].to(u.day).value
for mjd, gap in gaps.iterrows():
candidate_bridges = mag_limit.query(
f"(night > {gap.night_before}) and (night < {gap.night_after})"
).query(f"(mjd < {gap.start+max_gap}) and (mjd > {gap.end-max_gap})")
if len(candidate_bridges) == 0:
continue
best_bridge = candidate_bridges.loc[candidate_bridges["m5"].idxmax()]
# Sometimes there can be two time windows with the same starting,
# differing by a sample time.
if isinstance(best_bridge, pd.DataFrame):
best_bridge = best_bridge.sort_values("count").iloc[-1]
gaps["has_bridge"] = True
gaps.loc[mjd, "bridge_mjd"] = best_bridge["mjd"]
gaps["bridge_night_mjd"] = compute_night_mjd(gaps["bridge_mjd"].fillna(0), location)
gaps["night_before_mjd"] = (gaps["night_before"] + night_epoch_mjd).astype(int)
gaps["night_after_mjd"] = (gaps["night_after"] + night_epoch_mjd).astype(int)
return gaps
def _compute_scheduled(m5_limits, schedule, sequence_duration):
scheduled = (
m5_limits["scheduled"]
.reset_index()
.set_index("mjd", drop=False)
.sort_index()
.copy()
)
seq_days = sequence_duration.to(u.day).value
for _, obs_seq in schedule.iterrows():
start_mjd = obs_seq.mjd
end_mjd = obs_seq.mjd + seq_days
scheduled.loc[start_mjd:end_mjd, "scheduled"] = True
scheduled.set_index(m5_limits.index.names, inplace=True)
return scheduled["scheduled"]
def main():
"""Parse command line arguments and config file, and run"""
parser = ArgumentParser()
parser.add_argument("config", help="configuration file")
parser.add_argument("m5", help="file from which to load limiting magnitudes")
parser.add_argument("output", help="file in which to write results")
args = parser.parse_args()
config_fname = args.config
m5_fname = args.m5
output_fname = args.output
config = read_config(config_fname)
logger.info("Reading m5 from %s", m5_fname)
m5_limits = (
pd.read_hdf(m5_fname)
.reset_index()
.query("sun_alt < -18")
.set_index(["field_name", "band", "mjd"], drop=False)
.assign(scheduled=False)
)
schedule = schedule_all(m5_limits, config["location"], config)
schedule.to_csv(output_fname, sep="\t", index=False, header=True)
return 0
def _init_logger(log_level=logging.DEBUG):
"""Create the ddfpresched logger and set initial configuration"""
ddfpresched_logger = logging.getLogger("ddfpresched")
ddfpresched_logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setLevel(log_level)
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
handler.setFormatter(formatter)
ddfpresched_logger.addHandler(handler)
return ddfpresched_logger
if __name__ == "__main__":
logger = _init_logger()
status = main() # pylint: disable=invalid-name
sys.exit(status)
| [
"logging.getLogger",
"astropy.units.Quantity",
"logging.StreamHandler",
"argparse.ArgumentParser",
"numpy.round",
"logging.Formatter",
"numpy.sort",
"pandas.read_hdf",
"sys.exit",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] | [((6370, 6403), 'pandas.DataFrame', 'pd.DataFrame', (['scheduled_sequences'], {}), '(scheduled_sequences)\n', (6382, 6403), True, 'import pandas as pd\n'), ((8518, 8586), 'pandas.to_datetime', 'pd.to_datetime', (['(mag_limit.mjd + 2400000.5)'], {'origin': '"""julian"""', 'unit': '"""D"""'}), "(mag_limit.mjd + 2400000.5, origin='julian', unit='D')\n", (8532, 8586), True, 'import pandas as pd\n'), ((8923, 8995), 'pandas.to_datetime', 'pd.to_datetime', (['(min_mag_limit.mjd + 2400000.5)'], {'origin': '"""julian"""', 'unit': '"""D"""'}), "(min_mag_limit.mjd + 2400000.5, origin='julian', unit='D')\n", (8937, 8995), True, 'import pandas as pd\n'), ((12939, 12955), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (12953, 12955), False, 'from argparse import ArgumentParser\n'), ((13874, 13906), 'logging.getLogger', 'logging.getLogger', (['"""ddfpresched"""'], {}), "('ddfpresched')\n", (13891, 13906), False, 'import logging\n'), ((13964, 13987), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (13985, 13987), False, 'import logging\n'), ((14036, 14106), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s"""'], {}), "('%(asctime)s\\t%(name)s\\t%(levelname)s\\t%(message)s')\n", (14053, 14106), False, 'import logging\n'), ((14329, 14345), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (14337, 14345), False, 'import sys\n'), ((6831, 6871), 'numpy.round', 'np.round', (['(mjd + location.lon.deg / 360.0)'], {}), '(mjd + location.lon.deg / 360.0)\n', (6839, 6871), True, 'import numpy as np\n'), ((7440, 7483), 'astropy.units.Quantity', 'u.Quantity', (["seq_config['sequence_duration']"], {}), "(seq_config['sequence_duration'])\n", (7450, 7483), True, 'import astropy.units as u\n'), ((7551, 7584), 'astropy.units.Quantity', 'u.Quantity', (["seq_config['max_gap']"], {}), "(seq_config['max_gap'])\n", (7561, 7584), True, 'import astropy.units as u\n'), ((7627, 7660), 'astropy.units.Quantity', 'u.Quantity', (["seq_config['min_gap']"], {}), "(seq_config['min_gap'])\n", (7637, 7660), True, 'import astropy.units as u\n'), ((7706, 7742), 'astropy.units.Quantity', 'u.Quantity', (["seq_config['season_gap']"], {}), "(seq_config['season_gap'])\n", (7716, 7742), True, 'import astropy.units as u\n'), ((7791, 7830), 'astropy.units.Quantity', 'u.Quantity', (["seq_config['canonical_gap']"], {}), "(seq_config['canonical_gap'])\n", (7801, 7830), True, 'import astropy.units as u\n'), ((9910, 9923), 'numpy.sort', 'np.sort', (['mjds'], {}), '(mjds)\n', (9917, 9923), True, 'import numpy as np\n'), ((1999, 2023), 'pandas.concat', 'pd.concat', (['seq_schedules'], {}), '(seq_schedules)\n', (2008, 2023), True, 'import pandas as pd\n'), ((13404, 13425), 'pandas.read_hdf', 'pd.read_hdf', (['m5_fname'], {}), '(m5_fname)\n', (13415, 13425), True, 'import pandas as pd\n')] |
from utils import *
from tqdm import tqdm
import numpy as np
import gzip
import pickle
from qa_bert_based import InputFeatures, Example
from argparse import ArgumentParser
import gc
def iter_data(features, example_dict, query_entity_path):
def foo(features, examples, query_entities):
entity_cnt = []
entity_graphs = {}
for case in tqdm(features):
# case.__dict__['answer'] = examples[case.qas_id].orig_answer_text
case.__dict__['query_entities'] = [ent[0] for ent in query_entities[case.qas_id]]
graph = create_entity_graph(case, 80, 512, 'sent', False, False, relational=False)
entity_cnt.append(graph['entity_length'])
# Simplify Graph dicts
targets = ['entity_length', 'start_entities', 'entity_mapping', 'adj']
simp_graph = dict([(t, graph[t]) for t in targets])
entity_graphs[case.qas_id] = simp_graph
entity_cnt = np.array(entity_cnt)
for thr in range(40, 100, 10):
print(len(np.where(entity_cnt > thr)[0]) / len(entity_cnt), f'> {thr}')
# del features
# del examples
# del query_entities
# gc.collect()
return entity_graphs
# pickle.dump(entity_graphs, gzip.open(args.graph_path, 'wb'))
# with gzip.open(args.example_path, 'rb') as fin:
# examples = pickle.load(fin)
# example_dict = {e.qas_id: e for e in examples}
#
# with gzip.open(args.feature_path, 'rb') as fin:
# features = pickle.load(fin)
#
with open(query_entity_path, 'r') as fin:
query_entities = json.load(fin)
# del examples
entity_graphs = foo(features, example_dict, query_entities)
# del features
# del example_dict
# del query_entities
gc.collect()
# with open(args.graph_path, 'w', encoding='utf-8') as f:
# f.write(entity_graphs)
# json.dump(entity_graphs, open(args.graph_path, 'w', encoding='utf-8'), cls=JsonEncoder)
# pickle.dump(entity_graphs, gzip.open(args.graph_path, 'wb'))
return entity_graphs
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
# elif isinstance(obj, datetime):
# return obj.__str__()
# else:
# return super(MyEncoder, self).default(obj)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--example_path', default=r"E:\DATA\HotpotQA\output\example.pkl.gz", type=str)
parser.add_argument('--feature_path', default=r"E:\DATA\HotpotQA\output\feature.pkl.gz", type=str)
parser.add_argument('--query_entity_path', default=r"E:\DATA\HotpotQA\entities\train_query_entities.json", type=str)
parser.add_argument('--graph_path', default=r"E:\DATA\HotpotQA\entities\train_graph.json", type=str)
args = parser.parse_args()
iter_data()
| [
"argparse.ArgumentParser",
"numpy.where",
"tqdm.tqdm",
"numpy.array",
"gc.collect"
] | [((1796, 1808), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1806, 1808), False, 'import gc\n'), ((2564, 2580), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2578, 2580), False, 'from argparse import ArgumentParser\n'), ((362, 376), 'tqdm.tqdm', 'tqdm', (['features'], {}), '(features)\n', (366, 376), False, 'from tqdm import tqdm\n'), ((957, 977), 'numpy.array', 'np.array', (['entity_cnt'], {}), '(entity_cnt)\n', (965, 977), True, 'import numpy as np\n'), ((1039, 1065), 'numpy.where', 'np.where', (['(entity_cnt > thr)'], {}), '(entity_cnt > thr)\n', (1047, 1065), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 21:10:30 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
import utils
PREF = 'f003'
os.system(f'rm ../data/t*_{PREF}*')
os.system(f'rm ../feature/t*_{PREF}*')
def quantile(n):
def quantile_(x):
return np.percentile(x, n)
quantile_.__name__ = 'q%s' % n
return quantile_
num_aggregations = {
'mjd_diff': ['min', 'max', 'size'],
'passband_diff': ['min', 'max', 'mean', 'median', 'std', quantile(25), quantile(75)],
'flux_diff': ['min', 'max', 'mean', 'median', 'std', quantile(25), quantile(75)],
'flux_err_diff': ['min', 'max', 'mean', 'median', 'std', quantile(25), quantile(75)],
'detected_diff': ['min', 'max', 'mean', 'median', 'std', quantile(25), quantile(75)],
}
def aggregate(df, output_path):
df_diff = df.diff().add_suffix('_diff')
df_diff.loc[df['object_id'] != df['object_id'].shift()] = np.nan
df_diff.drop('object_id_diff', axis=1, inplace=True)
df_diff['object_id'] = df['object_id']
del df; gc.collect()
df_agg = df_diff.groupby('object_id').agg(num_aggregations)
df_agg.columns = pd.Index([e[0] + "_" + e[1] for e in df_agg.columns.tolist()])
# std / mean
col_std = [c for c in df_agg.columns if c.endswith('_std')]
for c in col_std:
df_agg[f'{c}-d-mean'] = df_agg[c]/df_agg[c.replace('_std', '_mean')]
# max / min
col_max = [c for c in df_agg.columns if c.endswith('_max')]
for c in col_max:
df_agg[f'{c}-d-min'] = df_agg[c]/df_agg[c.replace('_max', '_min')]
df_agg.reset_index(drop=True, inplace=True)
df_agg.add_prefix(PREF+'_').to_feather(output_path)
return
# =============================================================================
# main
# =============================================================================
if __name__ == "__main__":
utils.start(__file__)
aggregate(pd.read_feather('../data/train_log.f'), f'../data/train_{PREF}.f')
aggregate(pd.read_feather('../data/test_log.f'), f'../data/test_{PREF}.f')
utils.end(__file__)
| [
"pandas.read_feather",
"utils.start",
"utils.end",
"numpy.percentile",
"gc.collect",
"os.system"
] | [((191, 226), 'os.system', 'os.system', (['f"""rm ../data/t*_{PREF}*"""'], {}), "(f'rm ../data/t*_{PREF}*')\n", (200, 226), False, 'import os, gc\n'), ((227, 265), 'os.system', 'os.system', (['f"""rm ../feature/t*_{PREF}*"""'], {}), "(f'rm ../feature/t*_{PREF}*')\n", (236, 265), False, 'import os, gc\n'), ((1108, 1120), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1118, 1120), False, 'import os, gc\n'), ((1965, 1986), 'utils.start', 'utils.start', (['__file__'], {}), '(__file__)\n', (1976, 1986), False, 'import utils\n'), ((2162, 2181), 'utils.end', 'utils.end', (['__file__'], {}), '(__file__)\n', (2171, 2181), False, 'import utils\n'), ((321, 340), 'numpy.percentile', 'np.percentile', (['x', 'n'], {}), '(x, n)\n', (334, 340), True, 'import numpy as np\n'), ((2006, 2044), 'pandas.read_feather', 'pd.read_feather', (['"""../data/train_log.f"""'], {}), "('../data/train_log.f')\n", (2021, 2044), True, 'import pandas as pd\n'), ((2087, 2124), 'pandas.read_feather', 'pd.read_feather', (['"""../data/test_log.f"""'], {}), "('../data/test_log.f')\n", (2102, 2124), True, 'import pandas as pd\n')] |
'''
Example of MBEANN in Python solving XOR.
'''
import multiprocessing
import os
import pickle
import random
import time
import numpy as np
from examples.xor.settings import SettingsEA, SettingsMBEANN
from mbeann.base import Individual, ToolboxMBEANN
from mbeann.visualize import visualizeIndividual
def evaluateIndividual(ind):
# XOR settings
# Third value in the inputsSet is for the bias.
# inputsSet = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], [1.0, 0.0, 0.5], [1.0, 1.0, 0.5]])
# XOR without bias inputs.
inputsSet = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
outputsSet = np.array([[0.0], [1.0], [1.0], [0.0]])
outputsFromNetwork = []
for inputs in inputsSet:
outputsFromNetwork += [ind.calculateNetwork(inputs)]
fitness = 0.0
for a, b in zip(outputsSet, outputsFromNetwork):
fitness += np.abs(a - b)
return fitness
if __name__ == '__main__':
# Number of worker processes to run evolution.
numProcesses = multiprocessing.cpu_count()
# Evolutionary algorithm settings.
popSize = SettingsEA.popSize
maxGeneration = SettingsEA.maxGeneration
isMaximizingFit = SettingsEA.isMaximizingFit
eliteSize = SettingsEA.eliteSize
tournamentSize = SettingsEA.tournamentSize
tournamentBestN = SettingsEA.tournamentBestN
randomSeed = 0 # int(time.time())
random.seed(randomSeed)
st = random.getstate()
data_dir = os.path.join(os.path.dirname(__file__), 'results_xor_{}'.format(randomSeed))
os.makedirs(data_dir, exist_ok=True)
with open('{}/random_state.pkl'.format(data_dir), mode='wb') as out_pkl:
# Saving the random state just in case.
pickle.dump(st, out_pkl)
if numProcesses > 1:
pool = multiprocessing.Pool(processes=numProcesses)
pop = [Individual(SettingsMBEANN.inSize, SettingsMBEANN.outSize, SettingsMBEANN.hidSize,
SettingsMBEANN.initialConnection,
SettingsMBEANN.maxWeight, SettingsMBEANN.minWeight, SettingsMBEANN.initialWeightType,
SettingsMBEANN.initialWeighMean, SettingsMBEANN.initialWeightScale,
SettingsMBEANN.maxBias, SettingsMBEANN.minBias, SettingsMBEANN.initialBiasType,
SettingsMBEANN.initialBiasMean, SettingsMBEANN.initialBiasScale,
SettingsMBEANN.isReccurent, SettingsMBEANN.activationFunc,
SettingsMBEANN.actFunc_Alpha, SettingsMBEANN.actFunc_Beta) for i in range(popSize)]
tools = ToolboxMBEANN(SettingsMBEANN.p_addNode, SettingsMBEANN.p_addLink,
SettingsMBEANN.p_weight, SettingsMBEANN.p_bias,
SettingsMBEANN.weightMutationType, SettingsMBEANN.weightMutationScale,
SettingsMBEANN.biasMutationType, SettingsMBEANN.biasMutationScale,
SettingsMBEANN.addNodeWeightValue)
log_stats = ['Gen', 'Mean', 'Std', 'Max', 'Min']
with open('{}/log_stats.pkl'.format(data_dir), mode='wb') as out_pkl:
pickle.dump(log_stats, out_pkl)
for gen in range(maxGeneration):
print("------")
print("Gen {}".format(gen))
if numProcesses > 1:
fitnessValues = pool.map(evaluateIndividual, pop)
else:
fitnessValues = []
for ind in pop:
fitnessValues += [evaluateIndividual(ind)]
for ind, fit in zip(pop, fitnessValues):
ind.fitness = fit[0]
log_stats = [gen, np.mean(fitnessValues), np.std(fitnessValues),
np.max(fitnessValues), np.min(fitnessValues)]
with open('{}/log_stats.pkl'.format(data_dir), mode='ab') as out_pkl:
pickle.dump(log_stats, out_pkl)
print("Mean: " + str(np.mean(fitnessValues)) +
"\tStd: " + str(np.std(fitnessValues)) +
"\tMax: " + str(np.max(fitnessValues)) +
"\tMin: " + str(np.min(fitnessValues)))
# Save the best individual.
with open('{}/data_ind_gen{:0>4}.pkl'.format(data_dir, gen),
mode='wb') as out_pkl:
pop.sort(key=lambda ind: ind.fitness, reverse=isMaximizingFit)
pickle.dump(pop[0], out_pkl)
visualizeIndividual(
pop[0], '{}/mbeann_ind_gen{:0>4}.pdf'.format(data_dir, gen))
tools.selectionSettings(pop, popSize, isMaximizingFit, eliteSize)
if eliteSize > 0:
elite = tools.preserveElite()
# pop = tools.selectionRandom()
pop = tools.selectionTournament(tournamentSize, tournamentBestN)
for i, ind in enumerate(pop):
tools.mutateWeightValue(ind)
tools.mutateBiasValue(ind)
tools.mutateAddNode(ind)
tools.mutateAddLink(ind)
if eliteSize > 0:
pop = elite + pop
| [
"numpy.abs",
"numpy.mean",
"pickle.dump",
"os.makedirs",
"mbeann.base.Individual",
"numpy.min",
"multiprocessing.cpu_count",
"random.getstate",
"random.seed",
"numpy.array",
"os.path.dirname",
"mbeann.base.ToolboxMBEANN",
"numpy.max",
"multiprocessing.Pool",
"numpy.std"
] | [((552, 610), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])\n', (560, 610), True, 'import numpy as np\n'), ((628, 666), 'numpy.array', 'np.array', (['[[0.0], [1.0], [1.0], [0.0]]'], {}), '([[0.0], [1.0], [1.0], [0.0]])\n', (636, 666), True, 'import numpy as np\n'), ((1010, 1037), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1035, 1037), False, 'import multiprocessing\n'), ((1382, 1405), 'random.seed', 'random.seed', (['randomSeed'], {}), '(randomSeed)\n', (1393, 1405), False, 'import random\n'), ((1415, 1432), 'random.getstate', 'random.getstate', ([], {}), '()\n', (1430, 1432), False, 'import random\n'), ((1530, 1566), 'os.makedirs', 'os.makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (1541, 1566), False, 'import os\n'), ((2548, 2853), 'mbeann.base.ToolboxMBEANN', 'ToolboxMBEANN', (['SettingsMBEANN.p_addNode', 'SettingsMBEANN.p_addLink', 'SettingsMBEANN.p_weight', 'SettingsMBEANN.p_bias', 'SettingsMBEANN.weightMutationType', 'SettingsMBEANN.weightMutationScale', 'SettingsMBEANN.biasMutationType', 'SettingsMBEANN.biasMutationScale', 'SettingsMBEANN.addNodeWeightValue'], {}), '(SettingsMBEANN.p_addNode, SettingsMBEANN.p_addLink,\n SettingsMBEANN.p_weight, SettingsMBEANN.p_bias, SettingsMBEANN.\n weightMutationType, SettingsMBEANN.weightMutationScale, SettingsMBEANN.\n biasMutationType, SettingsMBEANN.biasMutationScale, SettingsMBEANN.\n addNodeWeightValue)\n', (2561, 2853), False, 'from mbeann.base import Individual, ToolboxMBEANN\n'), ((877, 890), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (883, 890), True, 'import numpy as np\n'), ((1462, 1487), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1477, 1487), False, 'import os\n'), ((1701, 1725), 'pickle.dump', 'pickle.dump', (['st', 'out_pkl'], {}), '(st, out_pkl)\n', (1712, 1725), False, 'import pickle\n'), ((1767, 1811), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'numProcesses'}), '(processes=numProcesses)\n', (1787, 1811), False, 'import multiprocessing\n'), ((1824, 2388), 'mbeann.base.Individual', 'Individual', (['SettingsMBEANN.inSize', 'SettingsMBEANN.outSize', 'SettingsMBEANN.hidSize', 'SettingsMBEANN.initialConnection', 'SettingsMBEANN.maxWeight', 'SettingsMBEANN.minWeight', 'SettingsMBEANN.initialWeightType', 'SettingsMBEANN.initialWeighMean', 'SettingsMBEANN.initialWeightScale', 'SettingsMBEANN.maxBias', 'SettingsMBEANN.minBias', 'SettingsMBEANN.initialBiasType', 'SettingsMBEANN.initialBiasMean', 'SettingsMBEANN.initialBiasScale', 'SettingsMBEANN.isReccurent', 'SettingsMBEANN.activationFunc', 'SettingsMBEANN.actFunc_Alpha', 'SettingsMBEANN.actFunc_Beta'], {}), '(SettingsMBEANN.inSize, SettingsMBEANN.outSize, SettingsMBEANN.\n hidSize, SettingsMBEANN.initialConnection, SettingsMBEANN.maxWeight,\n SettingsMBEANN.minWeight, SettingsMBEANN.initialWeightType,\n SettingsMBEANN.initialWeighMean, SettingsMBEANN.initialWeightScale,\n SettingsMBEANN.maxBias, SettingsMBEANN.minBias, SettingsMBEANN.\n initialBiasType, SettingsMBEANN.initialBiasMean, SettingsMBEANN.\n initialBiasScale, SettingsMBEANN.isReccurent, SettingsMBEANN.\n activationFunc, SettingsMBEANN.actFunc_Alpha, SettingsMBEANN.actFunc_Beta)\n', (1834, 2388), False, 'from mbeann.base import Individual, ToolboxMBEANN\n'), ((3075, 3106), 'pickle.dump', 'pickle.dump', (['log_stats', 'out_pkl'], {}), '(log_stats, out_pkl)\n', (3086, 3106), False, 'import pickle\n'), ((3539, 3561), 'numpy.mean', 'np.mean', (['fitnessValues'], {}), '(fitnessValues)\n', (3546, 3561), True, 'import numpy as np\n'), ((3563, 3584), 'numpy.std', 'np.std', (['fitnessValues'], {}), '(fitnessValues)\n', (3569, 3584), True, 'import numpy as np\n'), ((3607, 3628), 'numpy.max', 'np.max', (['fitnessValues'], {}), '(fitnessValues)\n', (3613, 3628), True, 'import numpy as np\n'), ((3630, 3651), 'numpy.min', 'np.min', (['fitnessValues'], {}), '(fitnessValues)\n', (3636, 3651), True, 'import numpy as np\n'), ((3744, 3775), 'pickle.dump', 'pickle.dump', (['log_stats', 'out_pkl'], {}), '(log_stats, out_pkl)\n', (3755, 3775), False, 'import pickle\n'), ((4230, 4258), 'pickle.dump', 'pickle.dump', (['pop[0]', 'out_pkl'], {}), '(pop[0], out_pkl)\n', (4241, 4258), False, 'import pickle\n'), ((3972, 3993), 'numpy.min', 'np.min', (['fitnessValues'], {}), '(fitnessValues)\n', (3978, 3993), True, 'import numpy as np\n'), ((3917, 3938), 'numpy.max', 'np.max', (['fitnessValues'], {}), '(fitnessValues)\n', (3923, 3938), True, 'import numpy as np\n'), ((3862, 3883), 'numpy.std', 'np.std', (['fitnessValues'], {}), '(fitnessValues)\n', (3868, 3883), True, 'import numpy as np\n'), ((3806, 3828), 'numpy.mean', 'np.mean', (['fitnessValues'], {}), '(fitnessValues)\n', (3813, 3828), True, 'import numpy as np\n')] |
import numpy as np
from flask import Flask, request, jsonify, render_template
import joblib
import pandas as pd
import datetime as dt
app = Flask(__name__)
model=joblib.load(open("Employee_attrition.joblib", 'rb'))
@app.route('/')
def home():
return render_template('index.html')
def age(DOB):
DOB = pd.to_datetime(DOB)
today = dt.date.today()
return today.year - DOB.year - ((today.month,today.day) < (DOB.month,DOB.day))
def vintage(joing_date):
joing_date = pd.to_datetime(joing_date)
today = dt.datetime.now()
return int(((today-joing_date)/np.timedelta64(1,"M")))
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
today = dt.date.today()
int_features = request.form.to_dict()
df=pd.DataFrame(int_features,index=[0])
employee=df['Employee_Name'][0]
df['Age']=df['Employee_DOB'].apply(age)
df['week']=pd.to_datetime(df["Employee_Joining_Date"]).dt.week
df['Employee_Vintage']=df['Employee_Joining_Date'].apply(vintage)
df.drop(['Employee_Name','Employee_DOB','Employee_Joining_Date'],axis=1)
output=np.round(model.predict_proba(df)[0][1],2)
return render_template('index.html', prediction_text=f'{employee} will leave the Organization in next 6 month probability is {output}')
if __name__ == "__main__":
app.run(debug=True) | [
"flask.render_template",
"flask.Flask",
"datetime.datetime.now",
"flask.request.form.to_dict",
"numpy.timedelta64",
"pandas.DataFrame",
"datetime.date.today",
"pandas.to_datetime"
] | [((141, 156), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'from flask import Flask, request, jsonify, render_template\n'), ((256, 285), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (271, 285), False, 'from flask import Flask, request, jsonify, render_template\n'), ((311, 330), 'pandas.to_datetime', 'pd.to_datetime', (['DOB'], {}), '(DOB)\n', (325, 330), True, 'import pandas as pd\n'), ((343, 358), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (356, 358), True, 'import datetime as dt\n'), ((485, 511), 'pandas.to_datetime', 'pd.to_datetime', (['joing_date'], {}), '(joing_date)\n', (499, 511), True, 'import pandas as pd\n'), ((524, 541), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (539, 541), True, 'import datetime as dt\n'), ((723, 738), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (736, 738), True, 'import datetime as dt\n'), ((758, 780), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (778, 780), False, 'from flask import Flask, request, jsonify, render_template\n'), ((788, 825), 'pandas.DataFrame', 'pd.DataFrame', (['int_features'], {'index': '[0]'}), '(int_features, index=[0])\n', (800, 825), True, 'import pandas as pd\n'), ((1183, 1321), 'flask.render_template', 'render_template', (['"""index.html"""'], {'prediction_text': 'f"""{employee} will leave the Organization in next 6 month probability is {output}"""'}), "('index.html', prediction_text=\n f'{employee} will leave the Organization in next 6 month probability is {output}'\n )\n", (1198, 1321), False, 'from flask import Flask, request, jsonify, render_template\n'), ((577, 599), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""M"""'], {}), "(1, 'M')\n", (591, 599), True, 'import numpy as np\n'), ((920, 963), 'pandas.to_datetime', 'pd.to_datetime', (["df['Employee_Joining_Date']"], {}), "(df['Employee_Joining_Date'])\n", (934, 963), True, 'import pandas as pd\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
MAST Portal
===========
Module to query the <NAME> Archive for Space Telescopes (MAST).
"""
from __future__ import print_function, division
import warnings
import json
import time
import os
import re
#import keyring
import io
import numpy as np
from requests import HTTPError
from getpass import getpass
from base64 import b64encode
import astropy.units as u
import astropy.coordinates as coord
from astropy.table import Table, Row, vstack, MaskedColumn
from astropy.extern.six.moves.urllib.parse import quote as urlencode
from astropy.extern.six.moves.http_cookiejar import Cookie
from astropy.utils.exceptions import AstropyWarning
from astropy.logger import log
from ..query import BaseQuery
from ..utils import commons, async_to_sync
from ..utils.class_or_instance import class_or_instance
from ..exceptions import (TimeoutError, InvalidQueryError, RemoteServiceError,
LoginError, ResolverError, MaxResultsWarning,
NoResultsWarning, InputWarning, AuthenticationWarning)
from . import conf
__all__ = ['Registry', 'RegistryClass']
#
# Functions to help replace bytes with strings in astropy tables that came from VOTABLEs
#
def sval(val):
"""
Returns a string value for the given object. When the object is an instanceof bytes,
utf-8 decoding is used.
Parameters
----------
val : object
The object to convert
Returns
-------
string
The input value converted (if needed) to a string
"""
if (isinstance(val, bytes)):
return str(val, 'utf-8')
else:
return str(val)
# Create a version of sval() that operates on a whole column.
svalv = np.vectorize(sval)
def sval_whole_column(single_column):
"""
Returns a new column whose values are the string versions of the values
in the input column. The new column also keeps the metadata from the input column.
Parameters
----------
single_column : astropy.table.Column
The input column to stringify
Returns
-------
astropy.table.Column
Stringified version of input column
"""
new_col = svalv(single_column)
new_col.meta = single_column.meta
return new_col
def stringify_table(t):
"""
Substitutes strings for bytes values in the given table.
Parameters
----------
t : astropy.table.Table
An astropy table assumed to have been created from a VOTABLE.
Returns
-------
astropy.table.Table
The same table as input, but with bytes-valued cells replaced by strings.
"""
# This mess will look for columns that should be strings and convert them.
if (len(t) is 0):
return # Nothing to convert
scols = []
for col in t.columns:
colobj = t.columns[col]
if (colobj.dtype == 'object' and isinstance(t[colobj.name][0], bytes)):
scols.append(colobj.name)
for colname in scols:
t[colname] = sval_whole_column(t[colname])
class RegistryClass(BaseQuery):
"""
Registry query class.
"""
def __init__(self):
super(RegistryClass, self).__init__()
self._REGISTRY_TAP_SYNC_URL = conf.registry_tap_url + "/sync"
def query(self, **kwargs):
adql = self._build_adql(**kwargs)
x = """
select b.waveband,b.short_name,a.ivoid,b.res_description,c.access_url,b.reference_url from rr.capability a
natural join rr.resource b
natural join rr.interface c
where a.cap_type='SimpleImageAccess' and a.ivoid like 'ivo://%stsci%'
order by short_name
"""
if 'debug' in kwargs and kwargs['debug']==True: print ('Registry: sending query ADQL = {}\n'.format(adql))
if 'method' in kwargs:
method = kewargs['method']
else:
method = 'POST'
url = self._REGISTRY_TAP_SYNC_URL
tap_params = {
"request": "doQuery",
"lang": "ADQL",
"query": adql
}
response = self._request(method, url, data=tap_params)
if 'debug' in kwargs and kwargs['debug']==True: print('Queried: {}\n'.format(response.url))
aptable = self._astropy_table_from_votable_response(response)
return aptable
def _build_adql(self, **kwargs):
# Default values
service_type=""
keyword=""
waveband=""
source=""
order_by=""
logic_string=" and "
# Find the keywords we recognize
for key,val in kwargs.items():
if (key == 'service_type'):
service_type = val
elif (key == 'keyword'):
keyword = val
elif (key == 'waveband'):
waveband = val
elif (key == 'source'):
source = val
elif (key == 'order_by'):
order_by = val
elif (key == 'logic_string'):
logic_string = val
##
if "image" in service_type.lower():
service_type="simpleimageaccess"
elif "spectr" in service_type.lower():
service_type="simplespectralaccess"
elif "cone" in service_type.lower():
service_type="conesearch"
else:
service_type="tableaccess"
query_retcols="""
select res.waveband,res.short_name,cap.ivoid,res.res_description,
int.access_url, res.reference_url
from rr.capability cap
natural join rr.resource res
natural join rr.interface int
"""
x = """
select b.waveband,b.short_name,a.ivoid,b.res_description,c.access_url,b.reference_url from rr.capability a
natural join rr.resource b
natural join rr.interface c
"""
query_where="where "
wheres=[]
if service_type is not "":
wheres.append("cap.cap_type='{}'".format(service_type))
if source is not "":
wheres.append("cap.ivoid like '%{}%'".format(source))
if waveband is not "":
wheres.append("res.waveband like '%{}%'".format(waveband))
if (keyword is not ""):
keyword_where = """
(res.res_description like '%{}%' or
res.res_title like '%{}%' or
cap.ivoid like '%{}%')
""".format(keyword, keyword, keyword)
wheres.append(keyword_where)
query_where=query_where+logic_string.join(wheres)
if order_by is not "":
query_order="order by {}".format(order_by)
else: query_order=""
query=query_retcols+query_where+query_order
return query
def _astropy_table_from_votable_response(self, response):
"""
Takes a VOTABLE response from a web service and returns an astropy table.
Parameters
----------
response : requests.Response
Response whose contents are assumed to be a VOTABLE.
Returns
-------
astropy.table.Table
Astropy Table containing the data from the first TABLE in the VOTABLE.
"""
# The astropy table reader would like a file-like object, so convert
# the response content a byte stream. This assumes Python 3.x.
#
# (The reader also accepts just a string, but that seems to have two
# problems: It looks for newlines to see if the string is itself a table,
# and we need to support unicode content.)
file_like_content = io.BytesIO(response.content)
# The astropy table reader will auto-detect that the content is a VOTABLE
# and parse it appropriately.
aptable = Table.read(file_like_content)
# String values in the VOTABLE are stored in the astropy Table as bytes instead
# of strings. To makes accessing them more convenient, we will convert all those
# bytes values to strings.
stringify_table(aptable)
return aptable
Registry = RegistryClass()
| [
"numpy.vectorize",
"io.BytesIO",
"astropy.table.Table.read"
] | [((1759, 1777), 'numpy.vectorize', 'np.vectorize', (['sval'], {}), '(sval)\n', (1771, 1777), True, 'import numpy as np\n'), ((7806, 7834), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (7816, 7834), False, 'import io\n'), ((7982, 8011), 'astropy.table.Table.read', 'Table.read', (['file_like_content'], {}), '(file_like_content)\n', (7992, 8011), False, 'from astropy.table import Table, Row, vstack, MaskedColumn\n')] |
######### imports #########
from ast import arg
from datetime import timedelta
import sys
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_constants import *
from Reff_functions import *
import glob
import os
from sys import argv
import arviz as az
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
from math import ceil
import pickle
from cmdstanpy import CmdStanModel
matplotlib.use("Agg")
from params import (
truncation_days,
start_date,
third_start_date,
alpha_start_date,
omicron_start_date,
omicron_only_date,
omicron_dominance_date,
pop_sizes,
num_forecast_days,
get_all_p_detect_old,
get_all_p_detect,
)
def process_vax_data_array(
data_date,
third_states,
third_end_date,
variant="Delta",
print_latest_date_in_ts=False,
):
"""
Processes the vaccination data to an array for either the Omicron or Delta strain.
"""
# Load in vaccination data by state and date
vaccination_by_state = pd.read_csv(
"data/vaccine_effect_timeseries_" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly
# different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state.loc[
vaccination_by_state["variant"] == variant
]
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
if print_latest_date_in_ts:
# display the latest available date in the NSW data (will be the same date between states)
print(
"Latest date in vaccine data is {}".format(
vaccination_by_state[vaccination_by_state.state == "NSW"].date.values[-1]
)
)
# Get only the dates we need + 1 (this serves as the initial value)
vaccination_by_state = vaccination_by_state[
(
vaccination_by_state.date
>= pd.to_datetime(third_start_date) - timedelta(days=1)
)
& (vaccination_by_state.date <= third_end_date)
]
vaccination_by_state = vaccination_by_state[
vaccination_by_state["state"].isin(third_states)
] # Isolate fitting states
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# If we are missing recent vaccination data, fill it in with the most recent available data.
latest_vacc_data = vaccination_by_state.columns[-1]
if latest_vacc_data < pd.to_datetime(third_end_date):
vaccination_by_state = pd.concat(
[vaccination_by_state]
+ [
pd.Series(vaccination_by_state[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
# Convert to simple array only useful to pass to stan (index 1 onwards)
vaccination_by_state_array = vaccination_by_state.iloc[:, 1:].to_numpy()
return vaccination_by_state_array
def get_data_for_posterior(data_date):
"""
Read in the various datastreams and combine the samples into a dictionary that we then
dump to a pickle file.
"""
print("Performing inference on state level Reff")
data_date = pd.to_datetime(data_date) # Define data date
print("Data date is {}".format(data_date.strftime("%d%b%Y")))
fit_date = pd.to_datetime(data_date - timedelta(days=truncation_days))
print("Last date in fitting {}".format(fit_date.strftime("%d%b%Y")))
# * Note: 2020-09-09 won't work (for some reason)
# read in microdistancing survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
# read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
# save the case file for convenience
df_state.to_csv("results/cases_" + data_date.strftime("%Y-%m-%d") + ".csv")
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True, moving_window=7)
# df_google = read_in_google(moving=False)
df = df_google.merge(df_Reff[[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]],
on=["date", "state"],
how="inner",
)
######### Create useable dataset #########
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
"NSW",
# "VIC",
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
fit_mask = df.state.isin(first_states)
if fit_post_March:
fit_mask = (fit_mask) & (df.date >= start_date)
fit_mask = (fit_mask) & (df.date <= first_end_date)
second_wave_mask = df.state.isin(sec_states)
second_wave_mask = (second_wave_mask) & (df.date >= sec_start_date)
second_wave_mask = (second_wave_mask) & (df.date <= sec_end_date)
# Add third wave stuff here
third_wave_mask = df.state.isin(third_states)
third_wave_mask = (third_wave_mask) & (df.date >= third_start_date)
third_wave_mask = (third_wave_mask) & (df.date <= third_end_date)
predictors = mov_values.copy()
# predictors.extend(['driving_7days','transit_7days','walking_7days','pc'])
# remove residential to see if it improves fit
# predictors.remove("residential_7days")
df["post_policy"] = (df.date >= ban).astype(int)
dfX = df.loc[fit_mask].sort_values("date")
df2X = df.loc[second_wave_mask].sort_values("date")
df3X = df.loc[third_wave_mask].sort_values("date")
dfX["is_first_wave"] = 0
for state in first_states:
dfX.loc[dfX.state == state, "is_first_wave"] = (
dfX.loc[dfX.state == state]
.date.isin(first_date_range[state])
.astype(int)
.values
)
df2X["is_sec_wave"] = 0
for state in sec_states:
df2X.loc[df2X.state == state, "is_sec_wave"] = (
df2X.loc[df2X.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# used to index what dates are featured in omicron AND third wave
omicron_date_range = pd.date_range(start=omicron_start_date, end=third_end_date)
df3X["is_third_wave"] = 0
for state in third_states:
df3X.loc[df3X.state == state, "is_third_wave"] = (
df3X.loc[df3X.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# condition on being in third wave AND omicron
df3X.loc[df3X.state == state, "is_omicron_wave"] = (
(
df3X.loc[df3X.state == state].date.isin(omicron_date_range)
* df3X.loc[df3X.state == state].date.isin(third_date_range[state])
)
.astype(int)
.values
)
data_by_state = {}
sec_data_by_state = {}
third_data_by_state = {}
for value in ["mean", "std", "local", "imported"]:
data_by_state[value] = pd.pivot(
dfX[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre second wave
if df2X.loc[df2X.state == sec_states[0]].shape[0] == 0:
print("making empty")
sec_data_by_state[value] = pd.DataFrame(columns=sec_states).astype(float)
else:
sec_data_by_state[value] = pd.pivot(
df2X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre third wave
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# now add in the summary stats for Omicron Reff
for value in ["mean_omicron", "std_omicron"]:
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# FIRST PHASE
mobility_by_state = []
mobility_std_by_state = []
count_by_state = []
respond_by_state = []
mask_wearing_count_by_state = []
mask_wearing_respond_by_state = []
include_in_first_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: dfX.date.values[-1]]
survey_counts = survey_counts_base.loc[: dfX.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: dfX.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: dfX.date.values[-1]]
for state in first_states:
mobility_by_state.append(dfX.loc[dfX.state == state, predictors].values / 100)
mobility_std_by_state.append(
dfX.loc[dfX.state == state, [val + "_std" for val in predictors]].values / 100
)
count_by_state.append(survey_counts.loc[start_date:first_end_date, state].values)
respond_by_state.append(survey_respond.loc[start_date:first_end_date, state].values)
mask_wearing_count_by_state.append(
mask_wearing_counts.loc[start_date:first_end_date, state].values
)
mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[start_date:first_end_date, state].values
)
include_in_first_wave.append(
dfX.loc[dfX.state == state, "is_first_wave"].values
)
# SECOND PHASE
sec_mobility_by_state = []
sec_mobility_std_by_state = []
sec_count_by_state = []
sec_respond_by_state = []
sec_mask_wearing_count_by_state = []
sec_mask_wearing_respond_by_state = []
include_in_sec_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df2X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df2X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df2X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df2X.date.values[-1]]
for state in sec_states:
sec_mobility_by_state.append(
df2X.loc[df2X.state == state, predictors].values / 100
)
sec_mobility_std_by_state.append(
df2X.loc[df2X.state == state, [val + "_std" for val in predictors]].values / 100
)
sec_count_by_state.append(
survey_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_respond_by_state.append(
survey_respond.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[sec_start_date:sec_end_date, state].values
)
include_in_sec_wave.append(df2X.loc[df2X.state == state, "is_sec_wave"].values)
# THIRD WAVE
third_mobility_by_state = []
third_mobility_std_by_state = []
third_count_by_state = []
third_respond_by_state = []
third_mask_wearing_count_by_state = []
third_mask_wearing_respond_by_state = []
include_in_third_wave = []
include_in_omicron_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df3X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df3X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df3X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df3X.date.values[-1]]
for state in third_states:
third_mobility_by_state.append(
df3X.loc[df3X.state == state, predictors].values / 100
)
third_mobility_std_by_state.append(
df3X.loc[df3X.state == state, [val + "_std" for val in predictors]].values / 100
)
third_count_by_state.append(
survey_counts.loc[third_start_date:third_end_date, state].values
)
third_respond_by_state.append(
survey_respond.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[third_start_date:third_end_date, state].values
)
include_in_third_wave.append(
df3X.loc[df3X.state == state, "is_third_wave"].values
)
include_in_omicron_wave.append(
df3X.loc[df3X.state == state, "is_omicron_wave"].values
)
# policy boolean flag for after travel ban in each wave
policy = dfX.loc[
dfX.state == first_states[0], "post_policy"
] # this is the post ban policy
policy_sec_wave = [1] * df2X.loc[df2X.state == sec_states[0]].shape[0]
policy_third_wave = [1] * df3X.loc[df3X.state == third_states[0]].shape[0]
# read in the vaccination data
delta_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Delta",
print_latest_date_in_ts=True,
)
omicron_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Omicron",
)
# Make state by state arrays
state_index = {state: i + 1 for i, state in enumerate(states_to_fit_all_waves)}
# dates to apply alpha in the second wave (this won't allow for VIC to be added as
# the date_ranges are different)
apply_alpha_sec_wave = (
sec_date_range["NSW"] >= pd.to_datetime(alpha_start_date)
).astype(int)
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(third_start_date)
).days
omicron_only_day = (
pd.to_datetime(omicron_only_date) - pd.to_datetime(third_start_date)
).days
heterogeneity_start_day = (
pd.to_datetime("2021-08-20") - pd.to_datetime(third_start_date)
).days
# number of days we fit the average VE over
tau_vax_block_size = 3
# get pop size array
pop_size_array = []
for s in states_to_fit_all_waves:
pop_size_array.append(pop_sizes[s])
p_detect = get_all_p_detect_old(
states=third_states,
end_date=third_end_date,
num_days=df3X.loc[df3X.state == "NSW"].shape[0],
)
df_p_detect = pd.DataFrame(p_detect, columns=third_states)
df_p_detect["date"] = third_date_range["NSW"]
df_p_detect.to_csv("results/CA_" + data_date.strftime("%Y-%m-%d") + ".csv")
# p_detect = get_all_p_detect(
# end_date=third_end_date,
# num_days=df3X.loc[df3X.state == "NSW"].shape[0],
# )
# input data block for stan model
input_data = {
"j_total": len(states_to_fit_all_waves),
"N_first": dfX.loc[dfX.state == first_states[0]].shape[0],
"K": len(predictors),
"j_first": len(first_states),
"Reff": data_by_state["mean"].values,
"mob": mobility_by_state,
"mob_std": mobility_std_by_state,
"sigma2": data_by_state["std"].values ** 2,
"policy": policy.values,
"local": data_by_state["local"].values,
"imported": data_by_state["imported"].values,
"N_sec": df2X.loc[df2X.state == sec_states[0]].shape[0],
"j_sec": len(sec_states),
"Reff_sec": sec_data_by_state["mean"].values,
"mob_sec": sec_mobility_by_state,
"mob_sec_std": sec_mobility_std_by_state,
"sigma2_sec": sec_data_by_state["std"].values ** 2,
"policy_sec": policy_sec_wave,
"local_sec": sec_data_by_state["local"].values,
"imported_sec": sec_data_by_state["imported"].values,
"apply_alpha_sec": apply_alpha_sec_wave,
"N_third": df3X.loc[df3X.state == "NSW"].shape[0],
"j_third": len(third_states),
"Reff_third": third_data_by_state["mean"].values,
"Reff_omicron": third_data_by_state["mean_omicron"].values,
"mob_third": third_mobility_by_state,
"mob_third_std": third_mobility_std_by_state,
"sigma2_third": third_data_by_state["std"].values ** 2,
"sigma2_omicron": third_data_by_state["std_omicron"].values ** 2,
"policy_third": policy_third_wave,
"local_third": third_data_by_state["local"].values,
"imported_third": third_data_by_state["imported"].values,
"count_md": count_by_state,
"respond_md": respond_by_state,
"count_md_sec": sec_count_by_state,
"respond_md_sec": sec_respond_by_state,
"count_md_third": third_count_by_state,
"respond_md_third": third_respond_by_state,
"count_masks": mask_wearing_count_by_state,
"respond_masks": mask_wearing_respond_by_state,
"count_masks_sec": sec_mask_wearing_count_by_state,
"respond_masks_sec": sec_mask_wearing_respond_by_state,
"count_masks_third": third_mask_wearing_count_by_state,
"respond_masks_third": third_mask_wearing_respond_by_state,
"map_to_state_index_first": [state_index[state] for state in first_states],
"map_to_state_index_sec": [state_index[state] for state in sec_states],
"map_to_state_index_third": [state_index[state] for state in third_states],
"total_N_p_sec": sum([sum(x) for x in include_in_sec_wave]).item(),
"total_N_p_third": sum([sum(x) for x in include_in_third_wave]).item(),
"include_in_first": include_in_first_wave,
"include_in_sec": include_in_sec_wave,
"include_in_third": include_in_third_wave,
"pos_starts_sec": np.cumsum([sum(x) for x in include_in_sec_wave]).astype(int).tolist(),
"pos_starts_third": np.cumsum(
[sum(x) for x in include_in_third_wave]
).astype(int).tolist(),
"ve_delta_data": delta_vaccination_by_state_array,
"ve_omicron_data": omicron_vaccination_by_state_array,
"omicron_start_day": omicron_start_day,
"omicron_only_day": omicron_only_day,
"include_in_omicron": include_in_omicron_wave,
"total_N_p_third_omicron": int(sum([sum(x) for x in include_in_omicron_wave]).item()),
"pos_starts_third_omicron": np.cumsum(
[sum(x) for x in include_in_omicron_wave]
).astype(int).tolist(),
'tau_vax_block_size': tau_vax_block_size,
'total_N_p_third_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave])
),
'pos_starts_third_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave]
).astype(int),
'total_N_p_third_omicron_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave])
),
'pos_starts_third_omicron_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave]
).astype(int),
"pop_size_array": pop_size_array,
"heterogeneity_start_day": heterogeneity_start_day,
"p_detect": p_detect,
}
# dump the dictionary to a json file
with open("results/stan_input_data.pkl", "wb") as f:
pickle.dump(input_data, f)
return None
def run_stan(
data_date,
num_chains=4,
num_samples=1000,
num_warmup_samples=500,
max_treedepth=12,
):
"""
Read the input_data.json in and run the stan model.
"""
data_date = pd.to_datetime(data_date)
# read in the input data as a dictionary
with open("results/stan_input_data.pkl", "rb") as f:
input_data = pickle.load(f)
# make results and figs dir
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
os.makedirs(figs_dir, exist_ok=True)
os.makedirs(results_dir, exist_ok=True)
# path to the stan model
# basic model with a switchover between Reffs
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover.stan"
# mixture model with basic susceptible depletion
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection.stan"
# model that incorporates a waning in infection acquired immunity but is coded as a mixture
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix_waning_infection.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection_single_md.stan"
# compile the stan model
model = CmdStanModel(stan_file=rho_model_gamma)
# obtain a posterior sample from the model conditioned on the data
fit = model.sample(
chains=num_chains,
iter_warmup=num_warmup_samples,
iter_sampling=num_samples,
data=input_data,
max_treedepth=max_treedepth,
refresh=10
)
# display convergence diagnostics for the current run
print("===========")
print(fit.diagnose())
print("===========")
# save output file to
fit.save_csvfiles(dir=results_dir)
df_fit = fit.draws_pd()
df_fit.to_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_all_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# save a summary file for all parameters; this involves ESS and ESS/s as well as summary stats
fit_summary = fit.summary()
fit_summary.to_csv(filename)
# now save a small summary to easily view key parameters
pars_of_interest = ["bet[" + str(i + 1) + "]" for i in range(5)]
pars_of_interest = pars_of_interest + ["R_Li[" + str(i + 1) + "]" for i in range(8)]
pars_of_interest = pars_of_interest + [
"R_I",
"R_L",
"theta_md",
"theta_masks",
"sig",
"voc_effect_alpha",
"voc_effect_delta",
"voc_effect_omicron",
]
pars_of_interest = pars_of_interest + [
col for col in df_fit if "phi" in col and "simplex" not in col
]
# save a summary for ease of viewing
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_main_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
fit_summary.loc[pars_of_interest].to_csv(filename)
return None
def plot_and_save_posterior_samples(data_date):
"""
Runs the full suite of plotting.
"""
data_date = pd.to_datetime(data_date) # Define data date
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# read in the posterior sample
samples_mov_gamma = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# * Note: 2020-09-09 won't work (for some reason)
######### Read in microdistancing (md) surveys #########
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True)
df = df_google.merge(
df_Reff[
[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]
],
on=["date", "state"],
how="inner",
)
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
'NSW',
# 'VIC',
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
fit_mask = df.state.isin(first_states)
if fit_post_March:
fit_mask = (fit_mask) & (df.date >= start_date)
fit_mask = (fit_mask) & (df.date <= first_end_date)
second_wave_mask = df.state.isin(sec_states)
second_wave_mask = (second_wave_mask) & (df.date >= sec_start_date)
second_wave_mask = (second_wave_mask) & (df.date <= sec_end_date)
# Add third wave stuff here
third_wave_mask = df.state.isin(third_states)
third_wave_mask = (third_wave_mask) & (df.date >= third_start_date)
third_wave_mask = (third_wave_mask) & (df.date <= third_end_date)
predictors = mov_values.copy()
# predictors.extend(['driving_7days','transit_7days','walking_7days','pc'])
# remove residential to see if it improves fit
# predictors.remove("residential_7days")
df["post_policy"] = (df.date >= ban).astype(int)
dfX = df.loc[fit_mask].sort_values("date")
df2X = df.loc[second_wave_mask].sort_values("date")
df3X = df.loc[third_wave_mask].sort_values("date")
dfX["is_first_wave"] = 0
for state in first_states:
dfX.loc[dfX.state == state, "is_first_wave"] = (
dfX.loc[dfX.state == state]
.date.isin(first_date_range[state])
.astype(int)
.values
)
df2X["is_sec_wave"] = 0
for state in sec_states:
df2X.loc[df2X.state == state, "is_sec_wave"] = (
df2X.loc[df2X.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# used to index what dates are also featured in omicron
omicron_date_range = pd.date_range(start=omicron_start_date, end=third_end_date)
df3X["is_third_wave"] = 0
for state in third_states:
df3X.loc[df3X.state == state, "is_third_wave"] = (
df3X.loc[df3X.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# condition on being in third wave AND omicron
df3X.loc[df3X.state == state, "is_omicron_wave"] = (
(
df3X.loc[df3X.state == state].date.isin(omicron_date_range)
* df3X.loc[df3X.state == state].date.isin(third_date_range[state])
)
.astype(int)
.values
)
data_by_state = {}
sec_data_by_state = {}
third_data_by_state = {}
for value in ["mean", "std", "local", "imported"]:
data_by_state[value] = pd.pivot(
dfX[["state", value, "date"]], index="date", columns="state", values=value
).sort_index(axis="columns")
# account for dates pre pre second wave
if df2X.loc[df2X.state == sec_states[0]].shape[0] == 0:
print("making empty")
sec_data_by_state[value] = pd.DataFrame(columns=sec_states).astype(float)
else:
sec_data_by_state[value] = pd.pivot(
df2X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre third wave
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# now add in the summary stats for Omicron Reff
for value in ["mean_omicron", "std_omicron"]:
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# FIRST PHASE
mobility_by_state = []
mobility_std_by_state = []
count_by_state = []
respond_by_state = []
mask_wearing_count_by_state = []
mask_wearing_respond_by_state = []
include_in_first_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: dfX.date.values[-1]]
survey_counts = survey_counts_base.loc[: dfX.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: dfX.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: dfX.date.values[-1]]
for state in first_states:
mobility_by_state.append(dfX.loc[dfX.state == state, predictors].values / 100)
mobility_std_by_state.append(
dfX.loc[dfX.state == state, [val + "_std" for val in predictors]].values
/ 100
)
count_by_state.append(survey_counts.loc[start_date:first_end_date, state].values)
respond_by_state.append(survey_respond.loc[start_date:first_end_date, state].values)
mask_wearing_count_by_state.append(
mask_wearing_counts.loc[start_date:first_end_date, state].values
)
mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[start_date:first_end_date, state].values
)
include_in_first_wave.append(
dfX.loc[dfX.state == state, "is_first_wave"].values
)
# SECOND PHASE
sec_mobility_by_state = []
sec_mobility_std_by_state = []
sec_count_by_state = []
sec_respond_by_state = []
sec_mask_wearing_count_by_state = []
sec_mask_wearing_respond_by_state = []
include_in_sec_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df2X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df2X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df2X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df2X.date.values[-1]]
for state in sec_states:
sec_mobility_by_state.append(
df2X.loc[df2X.state == state, predictors].values / 100
)
sec_mobility_std_by_state.append(
df2X.loc[df2X.state == state, [val + "_std" for val in predictors]].values
/ 100
)
sec_count_by_state.append(
survey_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_respond_by_state.append(
survey_respond.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[sec_start_date:sec_end_date, state].values
)
include_in_sec_wave.append(df2X.loc[df2X.state == state, "is_sec_wave"].values)
# THIRD WAVE
third_mobility_by_state = []
third_mobility_std_by_state = []
third_count_by_state = []
third_respond_by_state = []
third_mask_wearing_count_by_state = []
third_mask_wearing_respond_by_state = []
include_in_third_wave = []
include_in_omicron_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df3X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df3X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df3X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df3X.date.values[-1]]
for state in third_states:
third_mobility_by_state.append(
df3X.loc[df3X.state == state, predictors].values / 100
)
third_mobility_std_by_state.append(
df3X.loc[df3X.state == state, [val + "_std" for val in predictors]].values
/ 100
)
third_count_by_state.append(
survey_counts.loc[third_start_date:third_end_date, state].values
)
third_respond_by_state.append(
survey_respond.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[third_start_date:third_end_date, state].values
)
include_in_third_wave.append(
df3X.loc[df3X.state == state, "is_third_wave"].values
)
include_in_omicron_wave.append(
df3X.loc[df3X.state == state, "is_omicron_wave"].values
)
# Make state by state arrays
state_index = {state: i for i, state in enumerate(states_to_fit_all_waves)}
# get pop size array
pop_size_array = []
for s in states_to_fit_all_waves:
pop_size_array.append(pop_sizes[s])
# First phase
# rho calculated at data entry
if isinstance(df_state.index, pd.MultiIndex):
df_state = df_state.reset_index()
states = sorted(["NSW", "QLD", "VIC", "TAS", "SA", "WA", "ACT", "NT"])
fig, ax = plt.subplots(figsize=(24, 9), ncols=len(states), sharey=True)
states_to_fitd = {state: i + 1 for i, state in enumerate(first_states)}
for i, state in enumerate(states):
if state in first_states:
dates = df_Reff.loc[
(df_Reff.date >= start_date)
& (df_Reff.state == state)
& (df_Reff.date <= first_end_date)
].date
rho_samples = samples_mov_gamma[
[
"brho[" + str(j + 1) + "," + str(states_to_fitd[state]) + "]"
for j in range(dfX.loc[dfX.state == first_states[0]].shape[0])
]
]
ax[i].plot(dates, rho_samples.median(), label="fit", color="C0")
ax[i].fill_between(
dates,
rho_samples.quantile(0.25),
rho_samples.quantile(0.75),
color="C0",
alpha=0.4,
)
ax[i].fill_between(
dates,
rho_samples.quantile(0.05),
rho_samples.quantile(0.95),
color="C0",
alpha=0.4,
)
else:
sns.lineplot(
x="date_inferred",
y="rho",
data=df_state.loc[
(df_state.date_inferred >= start_date)
& (df_state.STATE == state)
& (df_state.date_inferred <= first_end_date)
],
ax=ax[i],
color="C1",
label="data",
)
sns.lineplot(
x="date",
y="rho",
data=df_Reff.loc[
(df_Reff.date >= start_date)
& (df_Reff.state == state)
& (df_Reff.date <= first_end_date)
],
ax=ax[i],
color="C1",
label="data",
)
sns.lineplot(
x="date",
y="rho_moving",
data=df_Reff.loc[
(df_Reff.date >= start_date)
& (df_Reff.state == state)
& (df_Reff.date <= first_end_date)
],
ax=ax[i],
color="C2",
label="moving",
)
dates = dfX.loc[dfX.state == first_states[0]].date
ax[i].tick_params("x", rotation=90)
ax[i].xaxis.set_major_locator(plt.MaxNLocator(4))
ax[i].set_title(state)
ax[0].set_ylabel("Proportion of imported cases")
plt.legend()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "rho_first_phase.png", dpi=144
)
# Second phase
if df2X.shape[0] > 0:
fig, ax = plt.subplots(
figsize=(24, 9), ncols=len(sec_states), sharey=True, squeeze=False
)
states_to_fitd = {state: i + 1 for i, state in enumerate(sec_states)}
pos = 0
for i, state in enumerate(sec_states):
# Google mobility only up to a certain date, so take only up to that value
dates = df2X.loc[
(df2X.state == state) & (df2X.is_sec_wave == 1)
].date.values
rho_samples = samples_mov_gamma[
[
"brho_sec[" + str(j + 1) + "]"
for j in range(
pos, pos + df2X.loc[df2X.state == state].is_sec_wave.sum()
)
]
]
pos = pos + df2X.loc[df2X.state == state].is_sec_wave.sum()
ax[0, i].plot(dates, rho_samples.median(), label="fit", color="C0")
ax[0, i].fill_between(
dates,
rho_samples.quantile(0.25),
rho_samples.quantile(0.75),
color="C0",
alpha=0.4,
)
ax[0, i].fill_between(
dates,
rho_samples.quantile(0.05),
rho_samples.quantile(0.95),
color="C0",
alpha=0.4,
)
sns.lineplot(
x="date_inferred",
y="rho",
data=df_state.loc[
(df_state.date_inferred >= sec_start_date)
& (df_state.STATE == state)
& (df_state.date_inferred <= sec_end_date)
],
ax=ax[0, i],
color="C1",
label="data",
)
sns.lineplot(
x="date",
y="rho",
data=df_Reff.loc[
(df_Reff.date >= sec_start_date)
& (df_Reff.state == state)
& (df_Reff.date <= sec_end_date)
],
ax=ax[0, i],
color="C1",
label="data",
)
sns.lineplot(
x="date",
y="rho_moving",
data=df_Reff.loc[
(df_Reff.date >= sec_start_date)
& (df_Reff.state == state)
& (df_Reff.date <= sec_end_date)
],
ax=ax[0, i],
color="C2",
label="moving",
)
dates = dfX.loc[dfX.state == sec_states[0]].date
ax[0, i].tick_params("x", rotation=90)
ax[0, i].xaxis.set_major_locator(plt.MaxNLocator(4))
ax[0, i].set_title(state)
ax[0, 0].set_ylabel("Proportion of imported cases")
plt.legend()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "rho_sec_phase.png", dpi=144
)
df_rho_third_all_states = pd.DataFrame()
df_rho_third_tmp = pd.DataFrame()
# Third phase
if df3X.shape[0] > 0:
fig, ax = plt.subplots(
figsize=(9, 24), nrows=len(third_states), sharex=True, squeeze=False
)
states_to_fitd = {state: i + 1 for i, state in enumerate(third_states)}
pos = 0
for i, state in enumerate(third_states):
# Google mobility only up to a certain date, so take only up to that value
dates = df3X.loc[
(df3X.state == state) & (df3X.is_third_wave == 1)
].date.values
rho_samples = samples_mov_gamma[
[
"brho_third[" + str(j + 1) + "]"
for j in range(
pos, pos + df3X.loc[df3X.state == state].is_third_wave.sum()
)
]
]
pos = pos + df3X.loc[df3X.state == state].is_third_wave.sum()
df_rho_third_tmp = rho_samples.T
df_rho_third_tmp["date"] = dates
df_rho_third_tmp["state"] = state
df_rho_third_all_states = pd.concat([df_rho_third_all_states, df_rho_third_tmp])
ax[i, 0].plot(dates, rho_samples.median(), label="fit", color="C0")
ax[i, 0].fill_between(
dates,
rho_samples.quantile(0.25),
rho_samples.quantile(0.75),
color="C0",
alpha=0.4,
)
ax[i, 0].fill_between(
dates,
rho_samples.quantile(0.05),
rho_samples.quantile(0.95),
color="C0",
alpha=0.4,
)
sns.lineplot(
x="date_inferred",
y="rho",
data=df_state.loc[
(df_state.date_inferred >= third_start_date)
& (df_state.STATE == state)
& (df_state.date_inferred <= third_end_date)
],
ax=ax[i, 0],
color="C1",
label="data",
)
sns.lineplot(
x="date",
y="rho",
data=df_Reff.loc[
(df_Reff.date >= third_start_date)
& (df_Reff.state == state)
& (df_Reff.date <= third_end_date)
],
ax=ax[i, 0],
color="C1",
label="data",
)
sns.lineplot(
x="date",
y="rho_moving",
data=df_Reff.loc[
(df_Reff.date >= third_start_date)
& (df_Reff.state == state)
& (df_Reff.date <= third_end_date)
],
ax=ax[i, 0],
color="C2",
label="moving",
)
dates = dfX.loc[dfX.state == third_states[0]].date
ax[i, 0].tick_params("x", rotation=90)
ax[i, 0].xaxis.set_major_locator(plt.MaxNLocator(4))
ax[i, 0].set_title(state)
ax[i, 0].set_ylabel("Proportion of imported cases")
plt.legend()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "rho_third_phase.png", dpi=144,
)
df_rho_third_all_states.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/rho_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# plotting
fig, ax = plt.subplots(figsize=(12, 9))
# sample from the priors for RL and RI
samples_mov_gamma["R_L_prior"] = np.random.gamma(
1.8 * 1.8 / 0.05, 0.05 / 1.8, size=samples_mov_gamma.shape[0]
)
samples_mov_gamma["R_I_prior"] = np.random.gamma(
0.5 ** 2 / 0.2, 0.2 / 0.5, size=samples_mov_gamma.shape[0]
)
samples_mov_gamma["R_L_national"] = np.random.gamma(
samples_mov_gamma.R_L.values ** 2 / samples_mov_gamma.sig.values,
samples_mov_gamma.sig.values / samples_mov_gamma.R_L.values,
)
sns.violinplot(
x="variable",
y="value",
data=pd.melt(
samples_mov_gamma[[
col for col in samples_mov_gamma
if "R" in col and col not in ("R_I0", "R_I0_omicron")
]]
),
ax=ax,
cut=0,
)
ax.set_yticks(
[1],
minor=True,
)
ax.set_yticks([0, 2, 3], minor=False)
ax.set_yticklabels([0, 2, 3], minor=False)
ax.set_ylim((0, 3))
# state labels in alphabetical
ax.set_xticklabels(
[
"R_I",
"R_I_omicron",
"R_L0 mean",
"R_L0 ACT",
"R_L0 NSW",
"R_L0 NT",
"R_L0 QLD",
"R_L0 SA",
"R_L0 TAS",
"R_L0 VIC",
"R_L0 WA",
"R_L0 prior",
"R_I prior",
"R_L0 national",
]
)
ax.set_xlabel("")
ax.set_ylabel("Effective reproduction number")
ax.tick_params("x", rotation=90)
ax.yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
plt.tight_layout()
plt.savefig(figs_dir + data_date.strftime("%Y-%m-%d") + "R_priors.png", dpi=144)
# Making a new figure that doesn't include the priors
fig, ax = plt.subplots(figsize=(12, 9))
small_plot_cols = ["R_Li[" + str(i) + "]" for i in range(1, 9)] + ["R_I"]
sns.violinplot(
x="variable",
y="value",
data=pd.melt(samples_mov_gamma[small_plot_cols]),
ax=ax,
cut=0,
)
ax.set_yticks(
[1],
minor=True,
)
ax.set_yticks([0, 2, 3], minor=False)
ax.set_yticklabels([0, 2, 3], minor=False)
ax.set_ylim((0, 3))
# state labels in alphabetical
ax.set_xticklabels(
[
"$R_L0$ ACT",
"$R_L0$ NSW",
"$R_L0$ NT",
"$R_L0$ QLD",
"$R_L0$ SA",
"$R_L0$ TAS",
"$R_L0$ VIC",
"$R_L0$ WA",
"$R_I$",
]
)
ax.tick_params("x", rotation=90)
ax.set_xlabel("")
ax.set_ylabel("Effective reproduction number")
ax.yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
plt.tight_layout()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "R_priors_(without_priors).png",
dpi=288,
)
# Making a new figure that doesn't include the priors
fig, ax = plt.subplots(figsize=(12, 9))
samples_mov_gamma["voc_effect_third_prior"] = np.random.gamma(
1.5 * 1.5 / 0.05, 0.05 / 1.5, size=samples_mov_gamma.shape[0]
)
small_plot_cols = [
"voc_effect_third_prior",
"voc_effect_delta",
"voc_effect_omicron",
]
sns.violinplot(
x="variable",
y="value",
data=pd.melt(samples_mov_gamma[small_plot_cols]),
ax=ax,
cut=0,
)
ax.set_yticks([1], minor=True)
# ax.set_yticks([0, 0.5, 1, 1.5, 2, 2.5, 3], minor=False)
# ax.set_yticklabels([0, 0.5, 1, 1.5, 2, 2.5, 3], minor=False)
# ax.set_ylim((0, 1))
# state labels in alphabetical
ax.set_xticklabels(["VoC (prior)", "VoC (Delta)", "VoC (Omicron)"])
# ax.tick_params('x', rotation=90)
ax.set_xlabel("")
ax.set_ylabel("value")
ax.yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
plt.tight_layout()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "voc_effect_posteriors.png",
dpi=288,
)
posterior = samples_mov_gamma[["bet[" + str(i + 1) + "]" for i in range(len(predictors))]]
split = True
md = "power" # samples_mov_gamma.md.values
posterior.columns = [val for val in predictors]
long = pd.melt(posterior)
fig, ax2 = plt.subplots(figsize=(12, 9))
ax2 = sns.violinplot(x="variable", y="value", data=long, ax=ax2, color="C0")
ax2.plot([0] * len(predictors), linestyle="dashed", alpha=0.6, color="grey")
ax2.tick_params(axis="x", rotation=90)
ax2.set_title("Coefficients of mobility indices")
ax2.set_xlabel("Social mobility index")
ax2.set_xticklabels([var[:-6] for var in predictors])
ax2.set_xticklabels(
[
"Retail and Recreation",
"Grocery and Pharmacy",
"Parks",
"Transit Stations",
"Workplaces",
"Residential",
]
)
ax2.tick_params("x", rotation=15)
plt.tight_layout()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "mobility_posteriors.png",
dpi=288,
)
# plot the TP's
RL_by_state = {
state: samples_mov_gamma["R_Li[" + str(i + 1) + "]"].values
for state, i in state_index.items()
}
ax3 = predict_plot(
samples_mov_gamma,
df.loc[(df.date >= start_date) & (df.date <= first_end_date)],
moving=True,
grocery=True,
rho=first_states,
)
for ax in ax3:
for a in ax:
a.set_ylim((0, 2.5))
a.set_xlim((pd.to_datetime(start_date), pd.to_datetime(first_end_date)))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "Reff_first_phase.png",
dpi=144,
)
if df2X.shape[0] > 0:
df["is_sec_wave"] = 0
for state in sec_states:
df.loc[df.state == state, "is_sec_wave"] = (
df.loc[df.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# plot only if there is second phase data - have to have second_phase=True
ax4 = predict_plot(
samples_mov_gamma,
df.loc[(df.date >= sec_start_date) & (df.date <= sec_end_date)],
moving=True,
grocery=True,
rho=sec_states,
second_phase=True,
)
for ax in ax4:
for a in ax:
a.set_ylim((0, 2.5))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "Reff_sec_phase.png", dpi=144
)
# remove plots from memory
fig.clear()
plt.close(fig)
# Load in vaccination data by state and date
vaccination_by_state = pd.read_csv(
"data/vaccine_effect_timeseries_" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly
# different start dates
vaccination_by_state.fillna(1, inplace=True)
# we take the whole set of estimates up to the end of the forecast period
# (with 10 days padding which won't be used in the forecast)
vaccination_by_state = vaccination_by_state[
(
vaccination_by_state.date
>= pd.to_datetime(third_start_date) - timedelta(days=1)
)
& (
vaccination_by_state.date
<= pd.to_datetime(data_date) + timedelta(days=num_forecast_days + 10)
)
]
vaccination_by_state_delta = vaccination_by_state.loc[
vaccination_by_state["variant"] == "Delta"
][["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state.loc[
vaccination_by_state["variant"] == "Omicron"
][["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# If we are missing recent vaccination data, fill it in with the most recent available data.
latest_vacc_data = vaccination_by_state_omicron.columns[-1]
if latest_vacc_data < pd.to_datetime(third_end_date):
vaccination_by_state_delta = pd.concat(
[vaccination_by_state_delta]
+ [
pd.Series(vaccination_by_state_delta[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
vaccination_by_state_omicron = pd.concat(
[vaccination_by_state_omicron]
+ [
pd.Series(vaccination_by_state_omicron[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
# get the dates for vaccination
dates = vaccination_by_state_delta.columns
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append([0], np.cumsum([v for v in third_days.values()]))
delta_ve_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# construct a range of dates for omicron which starts at the maximum of the start date
# for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {k: v.shape[0] for (k, v) in third_omicron_date_range.items()}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(third_omicron_days_cumulative[i], third_omicron_days_cumulative[i + 1])
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# extrac the samples
delta_ve_samples = samples_mov_gamma[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
omicron_ve_samples = samples_mov_gamma[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
# now we plot and save the adjusted ve time series to be read in by the forecasting
plot_adjusted_ve(
data_date,
samples_mov_gamma,
states,
vaccination_by_state_delta,
third_states,
third_date_range,
delta_ve_samples,
delta_ve_idx_ranges,
figs_dir,
"delta",
)
plot_adjusted_ve(
data_date,
samples_mov_gamma,
states,
vaccination_by_state_omicron,
third_states,
third_omicron_date_range,
omicron_ve_samples,
omicron_ve_idx_ranges,
figs_dir,
"omicron",
)
if df3X.shape[0] > 0:
df["is_third_wave"] = 0
for state in third_states:
df.loc[df.state == state, "is_third_wave"] = (
df.loc[df.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# plot only if there is third phase data - have to have third_phase=True
ax4 = macro_factor_plots(
samples_mov_gamma,
df.loc[(df.date >= third_start_date) & (df.date <= third_end_date)],
) # by states....
for ax in ax4:
for a in ax:
a.set_ylim((0, 1.25))
# a.set_xlim((start_date,end_date))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "macro_factor_comp.png",
dpi=144,
)
# remove plots from memory
fig.clear()
plt.close(fig)
df["is_third_wave"] = 0
for state in third_states:
df.loc[df.state == state, "is_third_wave"] = (
df.loc[df.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# plot only if there is third phase data - have to have third_phase=True
ax4 = predict_plot(
samples_mov_gamma,
df.loc[(df.date >= third_start_date) & (df.date <= third_end_date)],
moving=True,
grocery=True,
rho=third_states,
third_phase=True,
) # by states....
for ax in ax4:
for a in ax:
a.set_ylim((0, 2.5))
# a.set_xlim((start_date,end_date))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "Reff_third_phase_combined.png",
dpi=144,
)
# remove plots from memory
fig.clear()
plt.close(fig)
# plot only if there is third phase data - have to have third_phase=True
ax4 = predict_plot(
samples_mov_gamma,
df.loc[(df.date >= third_start_date) & (df.date <= third_end_date)],
moving=True,
grocery=True,
rho=third_states,
third_phase=True,
third_plot_type="delta"
) # by states....
for ax in ax4:
for a in ax:
a.set_ylim((0, 2.5))
# a.set_xlim((start_date,end_date))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "Reff_third_phase_delta.png",
dpi=144,
)
# remove plots from memory
fig.clear()
plt.close(fig)
for param in ("micro", "macro", "susceptibility"):
# plot only if there is third phase data - have to have third_phase=True
ax4 = predict_multiplier_plot(
samples_mov_gamma,
df.loc[(df.date >= third_start_date) & (df.date <= third_end_date)],
param=param,
) # by states....
for ax in ax4:
for a in ax:
if param == "macro":
a.set_ylim((0, 1.25))
else:
a.set_ylim((0, 1.1))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + param + "_factor.png",
dpi=144,
)
# remove plots from memory
fig.clear()
plt.close(fig)
if df3X.shape[0] > 0:
df["is_omicron_wave"] = 0
for state in third_states:
df.loc[df.state == state, "is_omicron_wave"] = (
df.loc[df.state == state]
.date.isin(third_omicron_date_range[state])
.astype(int)
.values
)
# plot only if there is third phase data - have to have third_phase=True
ax4 = predict_plot(
samples_mov_gamma,
df.loc[(df.date >= omicron_start_date) & (df.date <= third_end_date)],
moving=True,
grocery=True,
rho=third_states,
third_phase=True,
third_plot_type="omicron"
) # by states....
for ax in ax4:
for a in ax:
a.set_ylim((0, 2.5))
# a.set_xlim((start_date,end_date))
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "Reff_third_phase_omicron.png",
dpi=144,
)
# remove plots from memory
fig.clear()
plt.close(fig)
# plot the omicron proportion
# create a range of dates from the beginning of Omicron to use for producing the Omicron
# proportion
omicron_date_range = pd.date_range(
omicron_start_date, pd.to_datetime(data_date) + timedelta(45)
)
prop_omicron_to_delta = np.array([])
# create array of times to plot against
t = np.tile(range(len(omicron_date_range)), (samples_mov_gamma.shape[0], 1)).T
fig, ax = plt.subplots(figsize=(15, 12), nrows=4, ncols=2, sharex=True, sharey=True)
for (i, state) in enumerate(third_states):
m0 = np.tile(samples_mov_gamma.loc[:, "m0[" + str(i + 1) + "]"], (len(omicron_date_range), 1))
m1 = np.tile(samples_mov_gamma.loc[:, "m1[" + str(i + 1) + "]"], (len(omicron_date_range), 1))
# m1 = 1.0
r = np.tile(samples_mov_gamma.loc[:, "r[" + str(i + 1) + "]"], (len(omicron_date_range), 1))
tau = np.tile(samples_mov_gamma.loc[:, "tau[" + str(i + 1) + "]"] , (len(omicron_date_range), 1))
omicron_start_date_tmp = max(
pd.to_datetime(omicron_start_date), third_date_range[state][0]
)
omicron_date_range_tmp = pd.date_range(
omicron_start_date_tmp, third_date_range[state][-1]
)
# if state in {"TAS", "WA", "NT"}:
# prop_omicron_to_delta_tmp = m1
# else:
# prop_omicron_to_delta_tmp = m0 + (m1 - m0) / (1 + np.exp(-r * (t - tau)))
prop_omicron_to_delta_tmp = m0 + (m1 - m0) / (1 + np.exp(-r * (t - tau)))
ax[i // 2, i % 2].plot(
omicron_date_range,
np.median(prop_omicron_to_delta_tmp, axis=1),
)
ax[i // 2, i % 2].fill_between(
omicron_date_range,
np.quantile(prop_omicron_to_delta_tmp, 0.05, axis=1),
np.quantile(prop_omicron_to_delta_tmp, 0.95, axis=1),
alpha=0.2,
)
ax[i // 2, i % 2].axvline(
omicron_date_range_tmp[0], ls="--", c="k", lw=1
)
ax[i // 2, i % 2].axvline(
omicron_date_range_tmp[-1], ls="--", c="k", lw=1
)
ax[i // 2, i % 2].set_title(state)
ax[i // 2, i % 2].xaxis.set_major_locator(plt.MaxNLocator(3))
ax[i // 2, 0].set_ylabel("Proportion of Omicron\ncases to Delta")
if len(prop_omicron_to_delta) == 0:
prop_omicron_to_delta = prop_omicron_to_delta_tmp[:, -len(omicron_date_range_tmp):]
else:
prop_omicron_to_delta = np.hstack(
(
prop_omicron_to_delta,
prop_omicron_to_delta_tmp[:, -len(omicron_date_range_tmp):],
)
)
fig.tight_layout()
plt.savefig(
figs_dir + data_date.strftime("%Y-%m-%d") + "omicron_proportion.png", dpi=144
)
# need to rotate to put into a good format
prop_omicron_to_delta = prop_omicron_to_delta.T
df_prop_omicron_to_delta = pd.DataFrame(
prop_omicron_to_delta,
columns=[
"prop_omicron_to_delta." + str(i+1) for i in range(prop_omicron_to_delta.shape[1])
]
)
df_prop_omicron_to_delta.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/prop_omicron_to_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# saving the final processed posterior samples to h5 for generate_RL_forecasts.py
var_to_csv = predictors
samples_mov_gamma[predictors] = samples_mov_gamma[
["bet[" + str(i + 1) + "]" for i in range(len(predictors))]
]
# var_to_csv = [
# "R_I",
# "R_I_omicron",
# "R_L",
# "sig",
# "theta_masks",
# "theta_md",
# "voc_effect_alpha",
# "voc_effect_delta",
# "voc_effect_omicron",
# "sus_dep_factor",
# ]
var_to_csv = [
"R_I",
"R_I_omicron",
"R_L",
"sig",
"theta_masks",
"theta_md",
"voc_effect_alpha",
"voc_effect_delta",
"voc_effect_omicron",
]
var_to_csv = var_to_csv + [col for col in samples_mov_gamma if "phi" in col]
var_to_csv = (
var_to_csv
+ predictors
+ ["R_Li[" + str(i + 1) + "]" for i in range(len(states_to_fit_all_waves))]
)
var_to_csv = var_to_csv + ["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
var_to_csv = var_to_csv + [
"ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)
]
var_to_csv = var_to_csv + ["r[" + str(j + 1) + "]" for j in range(len(third_states))]
var_to_csv = var_to_csv + ["tau[" + str(j + 1) + "]" for j in range(len(third_states))]
var_to_csv = var_to_csv + ["m0[" + str(j + 1) + "]" for j in range(len(third_states))]
var_to_csv = var_to_csv + ["m1[" + str(j + 1) + "]" for j in range(len(third_states))]
# save the posterior
samples_mov_gamma[var_to_csv].to_hdf(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/soc_mob_posterior"
+ data_date.strftime("%Y-%m-%d")
+ ".h5",
key="samples",
)
return None
def main(data_date, run_flag=0):
"""
Runs the stan model in parts to cut down on memory. The run_flag enables us to run components
of the model as required and has the following settings:
run_flag=0 (default) : Run full inference and plotting procedures.
run_flag=1 : Generate the data, save it.
run_flag=2 : Using the data from 1, run the inference.
run_flag=3 : Run plotting methods.
"""
if run_flag in (0, 1):
get_data_for_posterior(data_date=data_date)
if run_flag in (0, 2):
num_chains = 4
num_warmup_samples = 500
num_samples = 1000
max_treedepth = 12
run_stan(
data_date=data_date,
num_chains=num_chains,
num_samples=num_samples,
num_warmup_samples=num_warmup_samples,
max_treedepth=max_treedepth,
)
if run_flag in (0, 3):
# remove the susceptibility depletion term from Reff
for strain in ("Delta", "Omicron"):
# remove_sus_from_Reff(strain=strain, data_date=data_date)
remove_sus_with_waning_from_Reff(strain=strain, data_date=data_date)
plot_and_save_posterior_samples(data_date=data_date)
return None
if __name__ == "__main__":
"""
If we are running the script here (which is always) then this ensures things run appropriately.
"""
data_date = argv[1]
try:
run_flag = int(argv[2])
except:
run_flag = 0
main(data_date, run_flag=run_flag) | [
"sys.path.insert",
"pandas.read_csv",
"numpy.array",
"seaborn.violinplot",
"datetime.timedelta",
"pandas.to_datetime",
"pandas.date_range",
"cmdstanpy.CmdStanModel",
"pandas.pivot_table",
"params.get_all_p_detect_old",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.random.gamma",
"pandas.D... | [((91, 121), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""TP_model"""'], {}), "(0, 'TP_model')\n", (106, 121), False, 'import sys\n'), ((122, 169), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""TP_model/fit_and_forecast"""'], {}), "(0, 'TP_model/fit_and_forecast')\n", (137, 169), False, 'import sys\n'), ((473, 494), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (487, 494), False, 'import matplotlib\n'), ((3431, 3456), 'pandas.to_datetime', 'pd.to_datetime', (['data_date'], {}), '(data_date)\n', (3445, 3456), True, 'import pandas as pd\n'), ((3802, 3816), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3814, 3816), True, 'import pandas as pd\n'), ((3874, 3889), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (3883, 3889), False, 'import glob\n'), ((4265, 4293), 'pandas.to_datetime', 'pd.to_datetime', (['surveys.date'], {}), '(surveys.date)\n', (4279, 4293), True, 'import pandas as pd\n'), ((5320, 5399), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""proportion"""'}), "(data=always, index='date', columns='state', values='proportion')\n", (5334, 5399), True, 'import pandas as pd\n'), ((5852, 5866), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5864, 5866), True, 'import pandas as pd\n'), ((5937, 5952), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (5946, 5952), False, 'import glob\n'), ((6401, 6434), 'pandas.to_datetime', 'pd.to_datetime', (['mask_wearing.date'], {}), '(mask_wearing.date)\n', (6415, 6434), True, 'import pandas as pd\n'), ((7513, 7609), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'mask_wearing_always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""proportion"""'}), "(data=mask_wearing_always, index='date', columns='state',\n values='proportion')\n", (7527, 7609), True, 'import pandas as pd\n'), ((10134, 10159), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (10149, 10159), False, 'import sys\n'), ((15072, 15131), 'pandas.date_range', 'pd.date_range', ([], {'start': 'omicron_start_date', 'end': 'third_end_date'}), '(start=omicron_start_date, end=third_end_date)\n', (15085, 15131), True, 'import pandas as pd\n'), ((24084, 24204), 'params.get_all_p_detect_old', 'get_all_p_detect_old', ([], {'states': 'third_states', 'end_date': 'third_end_date', 'num_days': "df3X.loc[df3X.state == 'NSW'].shape[0]"}), "(states=third_states, end_date=third_end_date, num_days\n =df3X.loc[df3X.state == 'NSW'].shape[0])\n", (24104, 24204), False, 'from params import truncation_days, start_date, third_start_date, alpha_start_date, omicron_start_date, omicron_only_date, omicron_dominance_date, pop_sizes, num_forecast_days, get_all_p_detect_old, get_all_p_detect\n'), ((24255, 24299), 'pandas.DataFrame', 'pd.DataFrame', (['p_detect'], {'columns': 'third_states'}), '(p_detect, columns=third_states)\n', (24267, 24299), True, 'import pandas as pd\n'), ((29426, 29451), 'pandas.to_datetime', 'pd.to_datetime', (['data_date'], {}), '(data_date)\n', (29440, 29451), True, 'import pandas as pd\n'), ((29854, 29890), 'os.makedirs', 'os.makedirs', (['figs_dir'], {'exist_ok': '(True)'}), '(figs_dir, exist_ok=True)\n', (29865, 29890), False, 'import os\n'), ((29895, 29934), 'os.makedirs', 'os.makedirs', (['results_dir'], {'exist_ok': '(True)'}), '(results_dir, exist_ok=True)\n', (29906, 29934), False, 'import os\n'), ((30883, 30922), 'cmdstanpy.CmdStanModel', 'CmdStanModel', ([], {'stan_file': 'rho_model_gamma'}), '(stan_file=rho_model_gamma)\n', (30895, 30922), False, 'from cmdstanpy import CmdStanModel\n'), ((32926, 32951), 'pandas.to_datetime', 'pd.to_datetime', (['data_date'], {}), '(data_date)\n', (32940, 32951), True, 'import pandas as pd\n'), ((33448, 33462), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (33460, 33462), True, 'import pandas as pd\n'), ((33520, 33535), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (33529, 33535), False, 'import glob\n'), ((33911, 33939), 'pandas.to_datetime', 'pd.to_datetime', (['surveys.date'], {}), '(surveys.date)\n', (33925, 33939), True, 'import pandas as pd\n'), ((34966, 35045), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""proportion"""'}), "(data=always, index='date', columns='state', values='proportion')\n", (34980, 35045), True, 'import pandas as pd\n'), ((35499, 35513), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (35511, 35513), True, 'import pandas as pd\n'), ((35584, 35599), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (35593, 35599), False, 'import glob\n'), ((36048, 36081), 'pandas.to_datetime', 'pd.to_datetime', (['mask_wearing.date'], {}), '(mask_wearing.date)\n', (36062, 36081), True, 'import pandas as pd\n'), ((37160, 37256), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'mask_wearing_always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""proportion"""'}), "(data=mask_wearing_always, index='date', columns='state',\n values='proportion')\n", (37174, 37256), True, 'import pandas as pd\n'), ((39654, 39679), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (39669, 39679), False, 'import sys\n'), ((44579, 44638), 'pandas.date_range', 'pd.date_range', ([], {'start': 'omicron_start_date', 'end': 'third_end_date'}), '(start=omicron_start_date, end=third_end_date)\n', (44592, 44638), True, 'import pandas as pd\n'), ((54845, 54857), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (54855, 54857), True, 'import matplotlib.pyplot as plt\n'), ((57989, 58003), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (58001, 58003), True, 'import pandas as pd\n'), ((58027, 58041), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (58039, 58041), True, 'import pandas as pd\n'), ((61557, 61586), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (61569, 61586), True, 'import matplotlib.pyplot as plt\n'), ((61668, 61746), 'numpy.random.gamma', 'np.random.gamma', (['(1.8 * 1.8 / 0.05)', '(0.05 / 1.8)'], {'size': 'samples_mov_gamma.shape[0]'}), '(1.8 * 1.8 / 0.05, 0.05 / 1.8, size=samples_mov_gamma.shape[0])\n', (61683, 61746), True, 'import numpy as np\n'), ((61798, 61873), 'numpy.random.gamma', 'np.random.gamma', (['(0.5 ** 2 / 0.2)', '(0.2 / 0.5)'], {'size': 'samples_mov_gamma.shape[0]'}), '(0.5 ** 2 / 0.2, 0.2 / 0.5, size=samples_mov_gamma.shape[0])\n', (61813, 61873), True, 'import numpy as np\n'), ((61929, 62076), 'numpy.random.gamma', 'np.random.gamma', (['(samples_mov_gamma.R_L.values ** 2 / samples_mov_gamma.sig.values)', '(samples_mov_gamma.sig.values / samples_mov_gamma.R_L.values)'], {}), '(samples_mov_gamma.R_L.values ** 2 / samples_mov_gamma.sig.\n values, samples_mov_gamma.sig.values / samples_mov_gamma.R_L.values)\n', (61944, 62076), True, 'import numpy as np\n'), ((63181, 63199), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (63197, 63199), True, 'import matplotlib.pyplot as plt\n'), ((63358, 63387), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (63370, 63387), True, 'import matplotlib.pyplot as plt\n'), ((64297, 64315), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (64313, 64315), True, 'import matplotlib.pyplot as plt\n'), ((64514, 64543), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (64526, 64543), True, 'import matplotlib.pyplot as plt\n'), ((64594, 64672), 'numpy.random.gamma', 'np.random.gamma', (['(1.5 * 1.5 / 0.05)', '(0.05 / 1.5)'], {'size': 'samples_mov_gamma.shape[0]'}), '(1.5 * 1.5 / 0.05, 0.05 / 1.5, size=samples_mov_gamma.shape[0])\n', (64609, 64672), True, 'import numpy as np\n'), ((65432, 65450), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (65448, 65450), True, 'import matplotlib.pyplot as plt\n'), ((65798, 65816), 'pandas.melt', 'pd.melt', (['posterior'], {}), '(posterior)\n', (65805, 65816), True, 'import pandas as pd\n'), ((65833, 65862), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (65845, 65862), True, 'import matplotlib.pyplot as plt\n'), ((65874, 65944), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': '"""variable"""', 'y': '"""value"""', 'data': 'long', 'ax': 'ax2', 'color': '"""C0"""'}), "(x='variable', y='value', data=long, ax=ax2, color='C0')\n", (65888, 65944), True, 'import seaborn as sns\n'), ((66499, 66517), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (66515, 66517), True, 'import matplotlib.pyplot as plt\n'), ((77637, 77649), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (77645, 77649), True, 'import numpy as np\n'), ((77793, 77867), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 12)', 'nrows': '(4)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(figsize=(15, 12), nrows=4, ncols=2, sharex=True, sharey=True)\n', (77805, 77867), True, 'import matplotlib.pyplot as plt\n'), ((2661, 2691), 'pandas.to_datetime', 'pd.to_datetime', (['third_end_date'], {}), '(third_end_date)\n', (2675, 2691), True, 'import pandas as pd\n'), ((4597, 4620), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (4611, 4620), True, 'import pandas as pd\n'), ((6666, 6689), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (6680, 6689), True, 'import pandas as pd\n'), ((12591, 12625), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'truncation_days'}), '(days=truncation_days)\n', (12603, 12625), True, 'import pandas as pd\n'), ((29165, 29191), 'pickle.dump', 'pickle.dump', (['input_data', 'f'], {}), '(input_data, f)\n', (29176, 29191), False, 'import pickle\n'), ((29576, 29590), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (29587, 29590), False, 'import pickle\n'), ((34243, 34266), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (34257, 34266), True, 'import pandas as pd\n'), ((36313, 36336), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (36327, 36336), True, 'import pandas as pd\n'), ((42108, 42142), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'truncation_days'}), '(days=truncation_days)\n', (42120, 42142), True, 'import pandas as pd\n'), ((53930, 54115), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date"""', 'y': '"""rho"""', 'data': 'df_Reff.loc[(df_Reff.date >= start_date) & (df_Reff.state == state) & (\n df_Reff.date <= first_end_date)]', 'ax': 'ax[i]', 'color': '"""C1"""', 'label': '"""data"""'}), "(x='date', y='rho', data=df_Reff.loc[(df_Reff.date >=\n start_date) & (df_Reff.state == state) & (df_Reff.date <=\n first_end_date)], ax=ax[i], color='C1', label='data')\n", (53942, 54115), True, 'import seaborn as sns\n'), ((54261, 54455), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date"""', 'y': '"""rho_moving"""', 'data': 'df_Reff.loc[(df_Reff.date >= start_date) & (df_Reff.state == state) & (\n df_Reff.date <= first_end_date)]', 'ax': 'ax[i]', 'color': '"""C2"""', 'label': '"""moving"""'}), "(x='date', y='rho_moving', data=df_Reff.loc[(df_Reff.date >=\n start_date) & (df_Reff.state == state) & (df_Reff.date <=\n first_end_date)], ax=ax[i], color='C2', label='moving')\n", (54273, 54455), True, 'import seaborn as sns\n'), ((57829, 57841), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (57839, 57841), True, 'import matplotlib.pyplot as plt\n'), ((61204, 61216), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (61214, 61216), True, 'import matplotlib.pyplot as plt\n'), ((68181, 68195), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (68190, 68195), True, 'import matplotlib.pyplot as plt\n'), ((69835, 69865), 'pandas.to_datetime', 'pd.to_datetime', (['third_end_date'], {}), '(third_end_date)\n', (69849, 69865), True, 'import pandas as pd\n'), ((73596, 73610), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (73605, 73610), True, 'import matplotlib.pyplot as plt\n'), ((74620, 74634), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (74629, 74634), True, 'import matplotlib.pyplot as plt\n'), ((75380, 75394), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (75389, 75394), True, 'import matplotlib.pyplot as plt\n'), ((77323, 77337), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (77332, 77337), True, 'import matplotlib.pyplot as plt\n'), ((78513, 78579), 'pandas.date_range', 'pd.date_range', (['omicron_start_date_tmp', 'third_date_range[state][-1]'], {}), '(omicron_start_date_tmp, third_date_range[state][-1])\n', (78526, 78579), True, 'import pandas as pd\n'), ((3585, 3616), 'datetime.timedelta', 'timedelta', ([], {'days': 'truncation_days'}), '(days=truncation_days)\n', (3594, 3616), False, 'from datetime import timedelta\n'), ((3924, 3963), 'pandas.read_csv', 'pd.read_csv', (['file'], {'parse_dates': "['date']"}), "(file, parse_dates=['date'])\n", (3935, 3963), True, 'import pandas as pd\n'), ((5997, 6036), 'pandas.read_csv', 'pd.read_csv', (['file'], {'parse_dates': "['date']"}), "(file, parse_dates=['date'])\n", (6008, 6036), True, 'import pandas as pd\n'), ((7651, 7742), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'mask_wearing_always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""count"""'}), "(data=mask_wearing_always, index='date', columns='state',\n values='count')\n", (7665, 7742), True, 'import pandas as pd\n'), ((7797, 7894), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'mask_wearing_always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""respondents"""'}), "(data=mask_wearing_always, index='date', columns='state',\n values='respondents')\n", (7811, 7894), True, 'import pandas as pd\n'), ((11408, 11461), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (11421, 11461), True, 'import pandas as pd\n'), ((11485, 11538), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (11498, 11538), True, 'import pandas as pd\n'), ((11561, 11614), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (11574, 11614), True, 'import pandas as pd\n'), ((11638, 11691), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (11651, 11691), True, 'import pandas as pd\n'), ((11715, 11768), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (11728, 11768), True, 'import pandas as pd\n'), ((11791, 11844), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (11804, 11844), True, 'import pandas as pd\n'), ((12103, 12154), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-06-01"""', 'end': '"""2021-01-19"""'}), "(start='2020-06-01', end='2021-01-19')\n", (12116, 12154), True, 'import pandas as pd\n'), ((12828, 12881), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-08-15"""', 'end': 'third_end_date'}), "(start='2021-08-15', end=third_end_date)\n", (12841, 12881), True, 'import pandas as pd\n'), ((12905, 12958), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-06-25"""', 'end': 'third_end_date'}), "(start='2021-06-25', end=third_end_date)\n", (12918, 12958), True, 'import pandas as pd\n'), ((13060, 13113), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-07-30"""', 'end': 'third_end_date'}), "(start='2021-07-30', end=third_end_date)\n", (13073, 13113), True, 'import pandas as pd\n'), ((13136, 13189), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-12-10"""', 'end': 'third_end_date'}), "(start='2021-12-10', end=third_end_date)\n", (13149, 13189), True, 'import pandas as pd\n'), ((13213, 13266), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-12-20"""', 'end': 'third_end_date'}), "(start='2021-12-20', end=third_end_date)\n", (13226, 13266), True, 'import pandas as pd\n'), ((13290, 13343), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-07-10"""', 'end': 'third_end_date'}), "(start='2021-07-10', end=third_end_date)\n", (13303, 13343), True, 'import pandas as pd\n'), ((13366, 13419), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2022-01-01"""', 'end': 'third_end_date'}), "(start='2022-01-01', end=third_end_date)\n", (13379, 13419), True, 'import pandas as pd\n'), ((23533, 23567), 'pandas.to_datetime', 'pd.to_datetime', (['omicron_start_date'], {}), '(omicron_start_date)\n', (23547, 23567), True, 'import pandas as pd\n'), ((23570, 23602), 'pandas.to_datetime', 'pd.to_datetime', (['third_start_date'], {}), '(third_start_date)\n', (23584, 23602), True, 'import pandas as pd\n'), ((23647, 23680), 'pandas.to_datetime', 'pd.to_datetime', (['omicron_only_date'], {}), '(omicron_only_date)\n', (23661, 23680), True, 'import pandas as pd\n'), ((23683, 23715), 'pandas.to_datetime', 'pd.to_datetime', (['third_start_date'], {}), '(third_start_date)\n', (23697, 23715), True, 'import pandas as pd\n'), ((23767, 23795), 'pandas.to_datetime', 'pd.to_datetime', (['"""2021-08-20"""'], {}), "('2021-08-20')\n", (23781, 23795), True, 'import pandas as pd\n'), ((23798, 23830), 'pandas.to_datetime', 'pd.to_datetime', (['third_start_date'], {}), '(third_start_date)\n', (23812, 23830), True, 'import pandas as pd\n'), ((33570, 33609), 'pandas.read_csv', 'pd.read_csv', (['file'], {'parse_dates': "['date']"}), "(file, parse_dates=['date'])\n", (33581, 33609), True, 'import pandas as pd\n'), ((35644, 35683), 'pandas.read_csv', 'pd.read_csv', (['file'], {'parse_dates': "['date']"}), "(file, parse_dates=['date'])\n", (35655, 35683), True, 'import pandas as pd\n'), ((37298, 37389), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'mask_wearing_always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""count"""'}), "(data=mask_wearing_always, index='date', columns='state',\n values='count')\n", (37312, 37389), True, 'import pandas as pd\n'), ((37444, 37541), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'mask_wearing_always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""respondents"""'}), "(data=mask_wearing_always, index='date', columns='state',\n values='respondents')\n", (37458, 37541), True, 'import pandas as pd\n'), ((40925, 40978), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (40938, 40978), True, 'import pandas as pd\n'), ((41002, 41055), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (41015, 41055), True, 'import pandas as pd\n'), ((41078, 41131), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (41091, 41131), True, 'import pandas as pd\n'), ((41155, 41208), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (41168, 41208), True, 'import pandas as pd\n'), ((41232, 41285), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (41245, 41285), True, 'import pandas as pd\n'), ((41308, 41361), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-03-01"""', 'end': 'first_end_date'}), "(start='2020-03-01', end=first_end_date)\n", (41321, 41361), True, 'import pandas as pd\n'), ((41620, 41671), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-06-01"""', 'end': '"""2021-01-19"""'}), "(start='2020-06-01', end='2021-01-19')\n", (41633, 41671), True, 'import pandas as pd\n'), ((42345, 42398), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-08-15"""', 'end': 'third_end_date'}), "(start='2021-08-15', end=third_end_date)\n", (42358, 42398), True, 'import pandas as pd\n'), ((42422, 42475), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-06-25"""', 'end': 'third_end_date'}), "(start='2021-06-25', end=third_end_date)\n", (42435, 42475), True, 'import pandas as pd\n'), ((42577, 42630), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-07-30"""', 'end': 'third_end_date'}), "(start='2021-07-30', end=third_end_date)\n", (42590, 42630), True, 'import pandas as pd\n'), ((42653, 42706), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-12-10"""', 'end': 'third_end_date'}), "(start='2021-12-10', end=third_end_date)\n", (42666, 42706), True, 'import pandas as pd\n'), ((42730, 42783), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-12-20"""', 'end': 'third_end_date'}), "(start='2021-12-20', end=third_end_date)\n", (42743, 42783), True, 'import pandas as pd\n'), ((42807, 42860), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-07-10"""', 'end': 'third_end_date'}), "(start='2021-07-10', end=third_end_date)\n", (42820, 42860), True, 'import pandas as pd\n'), ((42883, 42936), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2022-01-01"""', 'end': 'third_end_date'}), "(start='2022-01-01', end=third_end_date)\n", (42896, 42936), True, 'import pandas as pd\n'), ((53523, 53741), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date_inferred"""', 'y': '"""rho"""', 'data': 'df_state.loc[(df_state.date_inferred >= start_date) & (df_state.STATE ==\n state) & (df_state.date_inferred <= first_end_date)]', 'ax': 'ax[i]', 'color': '"""C1"""', 'label': '"""data"""'}), "(x='date_inferred', y='rho', data=df_state.loc[(df_state.\n date_inferred >= start_date) & (df_state.STATE == state) & (df_state.\n date_inferred <= first_end_date)], ax=ax[i], color='C1', label='data')\n", (53535, 53741), True, 'import seaborn as sns\n'), ((54736, 54754), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(4)'], {}), '(4)\n', (54751, 54754), True, 'import matplotlib.pyplot as plt\n'), ((56363, 56590), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date_inferred"""', 'y': '"""rho"""', 'data': 'df_state.loc[(df_state.date_inferred >= sec_start_date) & (df_state.STATE ==\n state) & (df_state.date_inferred <= sec_end_date)]', 'ax': 'ax[0, i]', 'color': '"""C1"""', 'label': '"""data"""'}), "(x='date_inferred', y='rho', data=df_state.loc[(df_state.\n date_inferred >= sec_start_date) & (df_state.STATE == state) & (\n df_state.date_inferred <= sec_end_date)], ax=ax[0, i], color='C1',\n label='data')\n", (56375, 56590), True, 'import seaborn as sns\n'), ((56778, 56968), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date"""', 'y': '"""rho"""', 'data': 'df_Reff.loc[(df_Reff.date >= sec_start_date) & (df_Reff.state == state) & (\n df_Reff.date <= sec_end_date)]', 'ax': 'ax[0, i]', 'color': '"""C1"""', 'label': '"""data"""'}), "(x='date', y='rho', data=df_Reff.loc[(df_Reff.date >=\n sec_start_date) & (df_Reff.state == state) & (df_Reff.date <=\n sec_end_date)], ax=ax[0, i], color='C1', label='data')\n", (56790, 56968), True, 'import seaborn as sns\n'), ((57162, 57361), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date"""', 'y': '"""rho_moving"""', 'data': 'df_Reff.loc[(df_Reff.date >= sec_start_date) & (df_Reff.state == state) & (\n df_Reff.date <= sec_end_date)]', 'ax': 'ax[0, i]', 'color': '"""C2"""', 'label': '"""moving"""'}), "(x='date', y='rho_moving', data=df_Reff.loc[(df_Reff.date >=\n sec_start_date) & (df_Reff.state == state) & (df_Reff.date <=\n sec_end_date)], ax=ax[0, i], color='C2', label='moving')\n", (57174, 57361), True, 'import seaborn as sns\n'), ((59140, 59194), 'pandas.concat', 'pd.concat', (['[df_rho_third_all_states, df_rho_third_tmp]'], {}), '([df_rho_third_all_states, df_rho_third_tmp])\n', (59149, 59194), True, 'import pandas as pd\n'), ((59720, 59951), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date_inferred"""', 'y': '"""rho"""', 'data': 'df_state.loc[(df_state.date_inferred >= third_start_date) & (df_state.STATE ==\n state) & (df_state.date_inferred <= third_end_date)]', 'ax': 'ax[i, 0]', 'color': '"""C1"""', 'label': '"""data"""'}), "(x='date_inferred', y='rho', data=df_state.loc[(df_state.\n date_inferred >= third_start_date) & (df_state.STATE == state) & (\n df_state.date_inferred <= third_end_date)], ax=ax[i, 0], color='C1',\n label='data')\n", (59732, 59951), True, 'import seaborn as sns\n'), ((60139, 60333), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date"""', 'y': '"""rho"""', 'data': 'df_Reff.loc[(df_Reff.date >= third_start_date) & (df_Reff.state == state) &\n (df_Reff.date <= third_end_date)]', 'ax': 'ax[i, 0]', 'color': '"""C1"""', 'label': '"""data"""'}), "(x='date', y='rho', data=df_Reff.loc[(df_Reff.date >=\n third_start_date) & (df_Reff.state == state) & (df_Reff.date <=\n third_end_date)], ax=ax[i, 0], color='C1', label='data')\n", (60151, 60333), True, 'import seaborn as sns\n'), ((60527, 60730), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""date"""', 'y': '"""rho_moving"""', 'data': 'df_Reff.loc[(df_Reff.date >= third_start_date) & (df_Reff.state == state) &\n (df_Reff.date <= third_end_date)]', 'ax': 'ax[i, 0]', 'color': '"""C2"""', 'label': '"""moving"""'}), "(x='date', y='rho_moving', data=df_Reff.loc[(df_Reff.date >=\n third_start_date) & (df_Reff.state == state) & (df_Reff.date <=\n third_end_date)], ax=ax[i, 0], color='C2', label='moving')\n", (60539, 60730), True, 'import seaborn as sns\n'), ((62170, 62290), 'pandas.melt', 'pd.melt', (["samples_mov_gamma[[col for col in samples_mov_gamma if 'R' in col and col\n not in ('R_I0', 'R_I0_omicron')]]"], {}), "(samples_mov_gamma[[col for col in samples_mov_gamma if 'R' in col and\n col not in ('R_I0', 'R_I0_omicron')]])\n", (62177, 62290), True, 'import pandas as pd\n'), ((63542, 63585), 'pandas.melt', 'pd.melt', (['samples_mov_gamma[small_plot_cols]'], {}), '(samples_mov_gamma[small_plot_cols])\n', (63549, 63585), True, 'import pandas as pd\n'), ((64884, 64927), 'pandas.melt', 'pd.melt', (['samples_mov_gamma[small_plot_cols]'], {}), '(samples_mov_gamma[small_plot_cols])\n', (64891, 64927), True, 'import pandas as pd\n'), ((76222, 76236), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (76231, 76236), True, 'import matplotlib.pyplot as plt\n'), ((77561, 77586), 'pandas.to_datetime', 'pd.to_datetime', (['data_date'], {}), '(data_date)\n', (77575, 77586), True, 'import pandas as pd\n'), ((77589, 77602), 'datetime.timedelta', 'timedelta', (['(45)'], {}), '(45)\n', (77598, 77602), False, 'from datetime import timedelta\n'), ((78407, 78441), 'pandas.to_datetime', 'pd.to_datetime', (['omicron_start_date'], {}), '(omicron_start_date)\n', (78421, 78441), True, 'import pandas as pd\n'), ((78981, 79025), 'numpy.median', 'np.median', (['prop_omicron_to_delta_tmp'], {'axis': '(1)'}), '(prop_omicron_to_delta_tmp, axis=1)\n', (78990, 79025), True, 'import numpy as np\n'), ((79130, 79182), 'numpy.quantile', 'np.quantile', (['prop_omicron_to_delta_tmp', '(0.05)'], {'axis': '(1)'}), '(prop_omicron_to_delta_tmp, 0.05, axis=1)\n', (79141, 79182), True, 'import numpy as np\n'), ((79196, 79248), 'numpy.quantile', 'np.quantile', (['prop_omicron_to_delta_tmp', '(0.95)'], {'axis': '(1)'}), '(prop_omicron_to_delta_tmp, 0.95, axis=1)\n', (79207, 79248), True, 'import numpy as np\n'), ((79614, 79632), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(3)'], {}), '(3)\n', (79629, 79632), True, 'import matplotlib.pyplot as plt\n'), ((15923, 16011), 'pandas.pivot', 'pd.pivot', (["dfX[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(dfX[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (15931, 16011), True, 'import pandas as pd\n'), ((23448, 23480), 'pandas.to_datetime', 'pd.to_datetime', (['alpha_start_date'], {}), '(alpha_start_date)\n', (23462, 23480), True, 'import pandas as pd\n'), ((45430, 45518), 'pandas.pivot', 'pd.pivot', (["dfX[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(dfX[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (45438, 45518), True, 'import pandas as pd\n'), ((57702, 57720), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(4)'], {}), '(4)\n', (57717, 57720), True, 'import matplotlib.pyplot as plt\n'), ((61073, 61091), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(4)'], {}), '(4)\n', (61088, 61091), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2108), 'pandas.to_datetime', 'pd.to_datetime', (['third_start_date'], {}), '(third_start_date)\n', (2090, 2108), True, 'import pandas as pd\n'), ((2111, 2128), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2120, 2128), False, 'from datetime import timedelta\n'), ((2802, 2861), 'pandas.Series', 'pd.Series', (['vaccination_by_state[latest_vacc_data]'], {'name': 'day'}), '(vaccination_by_state[latest_vacc_data], name=day)\n', (2811, 2861), True, 'import pandas as pd\n'), ((5449, 5523), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""count"""'}), "(data=always, index='date', columns='state', values='count')\n", (5463, 5523), True, 'import pandas as pd\n'), ((5634, 5719), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""respondents"""'}), "(data=always, index='date', columns='state', values='respondents'\n )\n", (5648, 5719), True, 'import pandas as pd\n'), ((16280, 16312), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'sec_states'}), '(columns=sec_states)\n', (16292, 16312), True, 'import pandas as pd\n'), ((16380, 16469), 'pandas.pivot', 'pd.pivot', (["df2X[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(df2X[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (16388, 16469), True, 'import pandas as pd\n'), ((16760, 16794), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'third_states'}), '(columns=third_states)\n', (16772, 16794), True, 'import pandas as pd\n'), ((16894, 16983), 'pandas.pivot', 'pd.pivot', (["df3X[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(df3X[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (16902, 16983), True, 'import pandas as pd\n'), ((17343, 17377), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'third_states'}), '(columns=third_states)\n', (17355, 17377), True, 'import pandas as pd\n'), ((17477, 17566), 'pandas.pivot', 'pd.pivot', (["df3X[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(df3X[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (17485, 17566), True, 'import pandas as pd\n'), ((35095, 35169), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""count"""'}), "(data=always, index='date', columns='state', values='count')\n", (35109, 35169), True, 'import pandas as pd\n'), ((35280, 35365), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'always', 'index': '"""date"""', 'columns': '"""state"""', 'values': '"""respondents"""'}), "(data=always, index='date', columns='state', values='respondents'\n )\n", (35294, 35365), True, 'import pandas as pd\n'), ((45750, 45782), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'sec_states'}), '(columns=sec_states)\n', (45762, 45782), True, 'import pandas as pd\n'), ((45850, 45939), 'pandas.pivot', 'pd.pivot', (["df2X[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(df2X[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (45858, 45939), True, 'import pandas as pd\n'), ((46230, 46264), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'third_states'}), '(columns=third_states)\n', (46242, 46264), True, 'import pandas as pd\n'), ((46364, 46453), 'pandas.pivot', 'pd.pivot', (["df3X[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(df3X[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (46372, 46453), True, 'import pandas as pd\n'), ((46826, 46860), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'third_states'}), '(columns=third_states)\n', (46838, 46860), True, 'import pandas as pd\n'), ((46960, 47049), 'pandas.pivot', 'pd.pivot', (["df3X[['state', value, 'date']]"], {'index': '"""date"""', 'columns': '"""state"""', 'values': 'value'}), "(df3X[['state', value, 'date']], index='date', columns='state',\n values=value)\n", (46968, 47049), True, 'import pandas as pd\n'), ((67091, 67117), 'pandas.to_datetime', 'pd.to_datetime', (['start_date'], {}), '(start_date)\n', (67105, 67117), True, 'import pandas as pd\n'), ((67119, 67149), 'pandas.to_datetime', 'pd.to_datetime', (['first_end_date'], {}), '(first_end_date)\n', (67133, 67149), True, 'import pandas as pd\n'), ((68829, 68861), 'pandas.to_datetime', 'pd.to_datetime', (['third_start_date'], {}), '(third_start_date)\n', (68843, 68861), True, 'import pandas as pd\n'), ((68864, 68881), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (68873, 68881), False, 'from datetime import timedelta\n'), ((68957, 68982), 'pandas.to_datetime', 'pd.to_datetime', (['data_date'], {}), '(data_date)\n', (68971, 68982), True, 'import pandas as pd\n'), ((68985, 69023), 'datetime.timedelta', 'timedelta', ([], {'days': '(num_forecast_days + 10)'}), '(days=num_forecast_days + 10)\n', (68994, 69023), False, 'from datetime import timedelta\n'), ((69988, 70053), 'pandas.Series', 'pd.Series', (['vaccination_by_state_delta[latest_vacc_data]'], {'name': 'day'}), '(vaccination_by_state_delta[latest_vacc_data], name=day)\n', (69997, 70053), True, 'import pandas as pd\n'), ((70309, 70376), 'pandas.Series', 'pd.Series', (['vaccination_by_state_omicron[latest_vacc_data]'], {'name': 'day'}), '(vaccination_by_state_omicron[latest_vacc_data], name=day)\n', (70318, 70376), True, 'import pandas as pd\n'), ((78871, 78893), 'numpy.exp', 'np.exp', (['(-r * (t - tau))'], {}), '(-r * (t - tau))\n', (78877, 78893), True, 'import numpy as np\n'), ((2889, 2946), 'pandas.date_range', 'pd.date_range', ([], {'start': 'latest_vacc_data', 'end': 'third_end_date'}), '(start=latest_vacc_data, end=third_end_date)\n', (2902, 2946), True, 'import pandas as pd\n'), ((70081, 70138), 'pandas.date_range', 'pd.date_range', ([], {'start': 'latest_vacc_data', 'end': 'third_end_date'}), '(start=latest_vacc_data, end=third_end_date)\n', (70094, 70138), True, 'import pandas as pd\n'), ((70404, 70461), 'pandas.date_range', 'pd.date_range', ([], {'start': 'latest_vacc_data', 'end': 'third_end_date'}), '(start=latest_vacc_data, end=third_end_date)\n', (70417, 70461), True, 'import pandas as pd\n'), ((71194, 71228), 'pandas.to_datetime', 'pd.to_datetime', (['omicron_start_date'], {}), '(omicron_start_date)\n', (71208, 71228), True, 'import pandas as pd\n')] |
import numpy as np
import cv2
def computeH(p1, p2):
"""
INPUTS:
p1 and p2 - Each are size (2 x N) matrices of corresponding (x, y)'
coordinates between two images
OUTPUTS:
H2to1 - a 3 x 3 matrix encoding the homography that best matches the linear
equation
"""
assert p1.shape[1] == p2.shape[1]
assert p1.shape[0] == 2
#############################
# TO DO ...
n = p1.shape[1]
homo_p2 = np.concatenate((p2, np.ones((1, n))), axis=0).T
seg1 = np.zeros((2 * n, 3))
seg1[::2] = -homo_p2
seg2 = np.zeros((2 * n, 3))
seg2[1::2] = -homo_p2
pp2 = np.repeat(p2.T, 2, axis=0)
col_p1 = p1.T.flatten()
pp1 = np.repeat(col_p1[np.newaxis, :], 2, 0).T
A = np.concatenate((seg1, seg2, pp1 * pp2, col_p1.reshape(-1, 1)), axis=1)
# print("A", A.shape)
e_value, e_vector = np.linalg.eig(np.dot(A.T, A))
H2to1 = e_vector[:, np.argmin(e_value)]
H2to1 = H2to1.reshape((3, 3))
return H2to1
| [
"numpy.repeat",
"numpy.ones",
"numpy.dot",
"numpy.zeros",
"numpy.argmin"
] | [((530, 550), 'numpy.zeros', 'np.zeros', (['(2 * n, 3)'], {}), '((2 * n, 3))\n', (538, 550), True, 'import numpy as np\n'), ((588, 608), 'numpy.zeros', 'np.zeros', (['(2 * n, 3)'], {}), '((2 * n, 3))\n', (596, 608), True, 'import numpy as np\n'), ((646, 672), 'numpy.repeat', 'np.repeat', (['p2.T', '(2)'], {'axis': '(0)'}), '(p2.T, 2, axis=0)\n', (655, 672), True, 'import numpy as np\n'), ((711, 749), 'numpy.repeat', 'np.repeat', (['col_p1[np.newaxis, :]', '(2)', '(0)'], {}), '(col_p1[np.newaxis, :], 2, 0)\n', (720, 749), True, 'import numpy as np\n'), ((897, 911), 'numpy.dot', 'np.dot', (['A.T', 'A'], {}), '(A.T, A)\n', (903, 911), True, 'import numpy as np\n'), ((937, 955), 'numpy.argmin', 'np.argmin', (['e_value'], {}), '(e_value)\n', (946, 955), True, 'import numpy as np\n'), ((490, 505), 'numpy.ones', 'np.ones', (['(1, n)'], {}), '((1, n))\n', (497, 505), True, 'import numpy as np\n')] |
import numpy as np
from scipy import stats
import pandas as pd
xbar = 990
h0 = 1000
s = 12.5
n = 30
st = (xbar - h0) / (s / np.sqrt(float(n)))
#print(st)
# t-table
alpha = 0.05
t_alpha = stats.t.ppf(alpha, n-1) # 신뢰수준, df
#print(t_alpha) # 임계치보다 검정 통계량이 작다 -> 기각
# P-VALUE
p_val = stats.t.sf(np.abs(st), n-1)
#print(p_val) # 0.05 > 0.0007 --> 기각
## X2 ##
survey = pd.read_csv("Chapter01/survey.csv")
survey_tab = pd.crosstab(survey.Smoke, survey.Exer, margins=True)
observed = survey_tab.ix[:-1, :-1]
contg = stats.chi2_contingency(observed=observed)
p_value = round(contg[1], 3)
# p_value = 0.483, 차이가 없다
fet = pd.read_csv("Chapter01/fetilizers.csv")
anova = stats.f_oneway(fet.fertilizer1, fet.fertilizer2, fet.fertilizer3)
# F_onewayResult(statistic=3.6634935025687523, pvalue=0.05063590143901569)
# 기각 X // 세 집단 중 어느 집단도 차이가 보이지 않는다. | [
"numpy.abs",
"scipy.stats.chi2_contingency",
"pandas.read_csv",
"scipy.stats.f_oneway",
"pandas.crosstab",
"scipy.stats.t.ppf"
] | [((190, 215), 'scipy.stats.t.ppf', 'stats.t.ppf', (['alpha', '(n - 1)'], {}), '(alpha, n - 1)\n', (201, 215), False, 'from scipy import stats\n'), ((369, 404), 'pandas.read_csv', 'pd.read_csv', (['"""Chapter01/survey.csv"""'], {}), "('Chapter01/survey.csv')\n", (380, 404), True, 'import pandas as pd\n'), ((418, 470), 'pandas.crosstab', 'pd.crosstab', (['survey.Smoke', 'survey.Exer'], {'margins': '(True)'}), '(survey.Smoke, survey.Exer, margins=True)\n', (429, 470), True, 'import pandas as pd\n'), ((514, 555), 'scipy.stats.chi2_contingency', 'stats.chi2_contingency', ([], {'observed': 'observed'}), '(observed=observed)\n', (536, 555), False, 'from scipy import stats\n'), ((618, 657), 'pandas.read_csv', 'pd.read_csv', (['"""Chapter01/fetilizers.csv"""'], {}), "('Chapter01/fetilizers.csv')\n", (629, 657), True, 'import pandas as pd\n'), ((666, 731), 'scipy.stats.f_oneway', 'stats.f_oneway', (['fet.fertilizer1', 'fet.fertilizer2', 'fet.fertilizer3'], {}), '(fet.fertilizer1, fet.fertilizer2, fet.fertilizer3)\n', (680, 731), False, 'from scipy import stats\n'), ((296, 306), 'numpy.abs', 'np.abs', (['st'], {}), '(st)\n', (302, 306), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import sys
import os
import argparse
import traceback
import logging
import json
import math
import random
import hashlib
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import data
from board import Board, IllegalMoveError
from genboard_common import Model
class ShuffledDataset(torch.utils.data.IterableDataset):
def __init__(self, dataset, shuffle_buffer_size):
super().__init__()
self.dataset = dataset
self.shuffle_buffer_size = shuffle_buffer_size
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
rand = random.Random(os.urandom(32))
else:
rand = random.Random(os.urandom(32)+ "#ShuffledDataset#".encode() + str(worker_info.id).encode())
shuffle_buffer = []
try:
it = iter(self.dataset)
while len(shuffle_buffer) < self.shuffle_buffer_size:
item = next(it)
if isinstance(item, Exception):
yield item
else:
shuffle_buffer.append(item)
except StopIteration:
self.shuffle_buffer_size = len(shuffle_buffer)
print("Initial shuffle buffer filled", flush=True)
rand.shuffle(shuffle_buffer)
try:
while True:
try:
item = next(it)
if isinstance(item, Exception):
yield item
else:
idx = rand.randint(0, self.shuffle_buffer_size-1)
old_item = shuffle_buffer[idx]
shuffle_buffer[idx] = item
yield old_item
except StopIteration:
break
while len(shuffle_buffer) > 0:
yield shuffle_buffer.pop()
except GeneratorExit:
pass
def rand_triangular(rand,maxvalue):
r = (maxvalue+1) * (1.0 - math.sqrt(rand.random()))
r = int(math.floor(r))
if r <= 0:
return 0
if r >= maxvalue:
return maxvalue
return r
def random_subinterval(rand,size):
# Anchor rectangles near the edge more often
if rand.random() < 0.5:
x0 = rand_triangular(rand,size)-1
x1 = rand_triangular(rand,size)-1
else:
x0 = rand.randint(0,size-1)
x1 = rand.randint(0,size-1)
if rand.random() < 0.5:
x0 = size - x0 - 1
x1 = size - x1 - 1
if x0 > x1:
return (x1,x0)
return (x0,x1)
class SgfDataset(torch.utils.data.IterableDataset):
def __init__(self, files, max_turn, break_prob_per_turn, sample_prob, endless):
self.files = files
self.max_turn = max_turn
self.break_prob_per_turn = break_prob_per_turn
self.sample_prob = sample_prob
self.endless = endless
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
rand = random.Random(os.urandom(32))
else:
rand = random.Random(os.urandom(32)+ "#SgfDataset#".encode() + str(worker_info.id).encode())
files = self.files
cpudevice = torch.device("cpu")
try:
while True:
rand.shuffle(files)
file_count = 0
error_count = 0
print("Iterator beginning reading of files %d / %d" % (file_count, len(files)), flush=True)
for filename in files:
try:
(metadata,setup,moves,rules) = data.load_sgf_moves_exn(filename)
except Exception as e:
error_count += 1
continue
# Only even 19x19 games!
if metadata.size != 19 or len(setup) != 0 or (metadata.handicap is not None and metadata.handicap != 0):
continue
board = Board(size=metadata.size)
turn_number = 0
for (pla,loc) in moves:
if rand.random() < self.sample_prob:
inputs = torch.zeros((8,metadata.size,metadata.size),dtype=torch.float32,device=cpudevice)
result = torch.zeros((3,),dtype=torch.float32,device=cpudevice)
aux = torch.zeros((3,metadata.size,metadata.size),dtype=torch.float32,device=cpudevice)
(alwaysknownxmin,alwaysknownxmax) = random_subinterval(rand,metadata.size)
(alwaysknownymin,alwaysknownymax) = random_subinterval(rand,metadata.size)
if alwaysknownxmin <= 0 and alwaysknownxmax >= metadata.size-1 and alwaysknownymin <= 0 and alwaysknownymax >= metadata.size-1:
pass
else:
# Channel 1: On-board
inputs[1,:,:].fill_(1.0)
num_always_known_poses = 0
if alwaysknownxmax < 0 or alwaysknownxmin >= metadata.size or alwaysknownymax < 0 or alwaysknownymin >= metadata.size:
num_always_known_poses = 0
else:
num_always_known_poses = (
( min(alwaysknownxmax, metadata.size-1) - max(alwaysknownxmin, 0) + 1) *
( min(alwaysknownymax, metadata.size-1) - max(alwaysknownymin, 0) + 1)
)
num_not_always_known_poses = metadata.size * metadata.size - num_always_known_poses
inferenceidx = rand.randint(0,num_not_always_known_poses-1)
flipx = rand.random() < 0.5
flipy = rand.random() < 0.5
swapxy = rand.random() < 0.5
idx = 0
for y in range(metadata.size):
for x in range(metadata.size):
pos = y * metadata.size + x
always_known = (x >= alwaysknownxmin and x <= alwaysknownxmax and y >= alwaysknownymin and y <= alwaysknownymax)
sx = x
sy = y
if flipx:
sx = metadata.size - sx - 1
if flipy:
sy = metadata.size - sy - 1
if swapxy:
tmp = sx
sx = sy
sy = tmp
stone = board.board[board.loc(sx,sy)]
# Channel 4: Unknown
if idx > inferenceidx and not always_known:
inputs[4,y,x] = 1.0
# Channel 0: Next inference point
elif idx == inferenceidx and not always_known:
inputs[0,y,x] = 1.0
result
if stone == Board.BLACK:
result[1] = 1.0
elif stone == Board.WHITE:
result[2] = 1.0
else:
result[0] = 1.0
else:
# Channel 2: Black
if stone == Board.BLACK:
inputs[2,y,x] = 1.0
# Channel 3: White
elif stone == Board.WHITE:
inputs[3,y,x] = 1.0
if stone == Board.BLACK:
aux[1,y,x] = 1.0
elif stone == Board.WHITE:
aux[2,y,x] = 1.0
else:
aux[0,y,x] = 1.0
if not always_known:
idx += 1
assert(idx == num_not_always_known_poses)
if rand.random() < 0.3:
turn_noise_stdev = 0.0
reported_turn = turn_number
else:
turn_noise_stdev = (rand.random() ** 2.0) * 100
reported_turn = turn_number + rand.normalvariate(0.0,turn_noise_stdev)
# Channel 5: Turn number / 100
inputs[5,:,:].fill_(reported_turn / 100.0)
# Channel 6: Noise stdev in turn number / 50
inputs[6,:,:].fill_(turn_noise_stdev / 50.0)
# Channel 7: Source
is_kgs = ("/kgs" in filename) or ("\\KGS" in filename) or ("/KGS" in filename) or ("\\KGS" in filename)
is_fox = ("/fox" in filename) or ("\\fox" in filename) or ("/FOX" in filename) or ("\\FOX" in filename)
if is_kgs:
inputs[7,:,:].fill_(1.0)
elif is_fox:
inputs[7,:,:].fill_(-1.0)
if rand.random() < 0.5:
if rand.random() < 0.5:
inputs = torch.flip(inputs,[1,2])
aux = torch.flip(aux,[1,2])
else:
inputs = torch.flip(inputs,[1])
aux = torch.flip(aux,[1])
else:
if rand.random() < 0.5:
inputs = torch.flip(inputs,[2])
aux = torch.flip(aux,[2])
else:
pass
if rand.random() < 0.5:
inputs = torch.transpose(inputs,1,2)
aux = torch.transpose(aux,1,2)
yield (inputs,result,aux)
try:
board.play(pla,loc)
except IllegalMoveError as e:
# On illegal move in the SGF, don't attempt to recover, just move on to new game
print("Illegal move, skipping file " + filename + ":" + str(e), flush=True)
break
turn_number += 1
if turn_number > self.max_turn:
break
if rand.random() < self.break_prob_per_turn:
break
file_count += 1
if file_count % 200 == 0:
print("Read through file %d / %d (error count %d)" % (file_count, len(files), error_count), flush=True)
if not self.endless:
break
except GeneratorExit:
pass
except Exception as e:
print("EXCEPTION IN GENERATOR: " + str(e))
traceback.print_exc()
print("---",flush=True)
yield e
def save_json(data,filename):
with open(filename,"w") as f:
json.dump(data,f)
f.flush()
os.fsync(f.fileno())
def load_json(filename):
with open(filename) as f:
data = json.load(f)
return data
if __name__ == '__main__':
description = """
Train net to predict Go positions one stone at a time
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-traindir', help='Dir to write to for recording training results', required=True)
parser.add_argument('-datadirs', help='Directory with sgfs', required=True)
parser.add_argument('-testprop', help='Proportion of data for test', type=float, required=True)
parser.add_argument('-lr-scale', help='LR multiplier', type=float, required=False)
parser.add_argument('-channels', help='Channels', type=int, required=True)
parser.add_argument('-blocks', help='Blocks', type=int, required=True)
parser.add_argument('-grad-clip-scale', help='Gradient clip multiplier', type=float, required=False)
parser.add_argument('-num-data-workers', help='Number of processes for data loading', type=int, required=False)
args = vars(parser.parse_args())
traindir = args["traindir"]
datadirs = args["datadirs"]
testprop = args["testprop"]
lr_scale = args["lr_scale"]
num_channels = args["channels"]
num_blocks = args["blocks"]
grad_clip_scale = args["grad_clip_scale"]
num_data_workers = args["num_data_workers"]
logfilemode = "a"
if lr_scale is None:
lr_scale = 1.0
if grad_clip_scale is None:
grad_clip_scale = 1.0
if num_data_workers is None:
num_data_workers = 0
if not os.path.exists(traindir):
os.mkdir(traindir)
bareformatter = logging.Formatter("%(asctime)s %(message)s")
fh = logging.FileHandler(os.path.join(traindir,"train.log"), mode=logfilemode)
fh.setFormatter(bareformatter)
stdouthandler = logging.StreamHandler(sys.stdout)
stdouthandler.setFormatter(bareformatter)
trainlogger = logging.getLogger("trainlogger")
trainlogger.setLevel(logging.INFO)
trainlogger.addHandler(fh)
trainlogger.addHandler(stdouthandler)
trainlogger.propagate=False
np.set_printoptions(linewidth=150)
def trainlog(s):
trainlogger.info(s)
sys.stdout.flush()
shuffle_buffer_size = 100000
files_found = 0
trainfiles = []
testfiles = []
for datadir in datadirs.split(","):
for parent, subdirs, files in os.walk(datadir):
for name in files:
if name.endswith(".sgf"):
files_found += 1
if files_found % 10000 == 0:
trainlog("Found %d sgfs..." % files_found)
r = float.fromhex("0."+hashlib.md5(os.path.join(parent,name).encode()).hexdigest()[:16])
if r < testprop:
testfiles.append(os.path.join(parent,name))
else:
trainfiles.append(os.path.join(parent,name))
trainlog("Found %d training sgfs" % len(trainfiles))
trainlog("Found %d testing sgfs" % len(testfiles))
max_turn = 300
break_prob_per_turn = 0.01
traindataset = ShuffledDataset(SgfDataset(trainfiles,max_turn,break_prob_per_turn,sample_prob=0.5,endless=True),shuffle_buffer_size)
testdataset = SgfDataset(testfiles,max_turn,break_prob_per_turn,sample_prob=0.2,endless=True)
batch_size = 128
trainloader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, shuffle=False, num_workers=num_data_workers, drop_last=True)
testloader = torch.utils.data.DataLoader(testdataset, batch_size=batch_size, shuffle=False, num_workers=num_data_workers, drop_last=True)
trainlog("Made data loaders")
samples_per_epoch = 400000
samples_per_test = 25600
batches_per_epoch = samples_per_epoch // batch_size
batches_per_test = samples_per_test // batch_size
def lossfunc(inputs, results, preds, aux, auxpreds):
assert(preds.size()[1] == 3)
assert(auxpreds.size()[1] == 3)
main_loss = -torch.sum(results * F.log_softmax(preds,dim=1))
aux_loss = -torch.sum(aux * F.log_softmax(auxpreds,dim=1) * inputs[:,4:5,:,:] / torch.sum(inputs[:,1:2,:,:], dim=[2,3], keepdim=True)) * 0.3
return main_loss, aux_loss
cpudevice = torch.device("cpu")
if torch.cuda.is_available():
trainlog("CUDA is available, using it")
gpudevice = torch.device("cuda:0")
else:
gpudevice = cpudevice
modelpath = os.path.join(traindir,"model.data")
optimpath = os.path.join(traindir,"optim.data")
traindatapath = os.path.join(traindir,"traindata.json")
if os.path.exists(modelpath):
trainlog("Loading preexisting model!")
model = Model.load_from_file(modelpath).to(gpudevice)
if model.num_channels != num_channels:
raise Exception("Number of channels in model is %d but command line arg was %d" % (model.num_channels,num_channels))
if model.num_blocks != num_blocks:
raise Exception("Number of blocks in model is %d but command line arg was %d" % (model.num_blocks,num_blocks))
optimizer = optim.SGD(model.parameters(), lr=0.00001*lr_scale, momentum=0.9)
optimizer.load_state_dict(torch.load(optimpath))
traindata = load_json(traindatapath)
else:
model = Model(num_channels=num_channels, num_blocks=num_blocks).to(gpudevice)
optimizer = optim.SGD(model.parameters(), lr=0.00001*lr_scale, momentum=0.9)
traindata = {"samples_so_far":0, "batches_so_far":0}
trainlog("Saving!")
model.save_to_file(modelpath)
torch.save(optimizer.state_dict(), optimpath)
save_json(traindata,traindatapath)
grad_clip_max = 400 * grad_clip_scale
#Loosen gradient clipping as we shift to smaller learning rates
grad_clip_max = grad_clip_max / math.sqrt(lr_scale)
running_batch_count = 0
running_main_loss = 0.0
running_aux_loss = 0.0
running_gnorm = 0.0
running_ewms_exgnorm = 0.0
print_every_batches = 100
trainiter = iter(trainloader)
testiter = iter(testloader)
while True:
for i in range(batches_per_epoch):
inputs, results, auxs = next(trainiter)
inputs = inputs.to(gpudevice)
results = results.to(gpudevice)
auxs = auxs.to(gpudevice)
optimizer.zero_grad()
preds, auxpreds = model(inputs)
main_loss,aux_loss = lossfunc(inputs, results, preds, auxs, auxpreds)
loss = main_loss + aux_loss
loss.backward()
gnorm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip_max)
optimizer.step()
traindata["samples_so_far"] += batch_size
traindata["batches_so_far"] += 1
running_batch_count += 1
running_main_loss += main_loss.item()
running_aux_loss += aux_loss.item()
running_gnorm += gnorm
running_ewms_exgnorm += max(0.0, gnorm - grad_clip_max)
if running_batch_count >= print_every_batches:
trainlog("TRAIN samples: %d, batches: %d, main loss: %.5f, aux loss: %.5f, gnorm: %.2f, ewms_exgnorm: %.3g" % (
traindata["samples_so_far"],
traindata["batches_so_far"],
running_main_loss / (running_batch_count * batch_size),
running_aux_loss / (running_batch_count * batch_size),
running_gnorm / (running_batch_count),
running_ewms_exgnorm / (running_batch_count),
))
running_batch_count = 0
running_main_loss = 0.0
running_aux_loss = 0.0
running_gnorm = 0.0
running_ewms_exgnorm *= 0.5
trainlog("Saving!")
model.save_to_file(modelpath)
torch.save(optimizer.state_dict(), optimpath)
save_json(traindata,traindatapath)
trainlog("Testing!")
test_samples = 0
test_main_loss = 0.0
test_aux_loss = 0.0
with torch.no_grad():
for i in range(batches_per_test):
inputs, results, auxs = next(testiter)
inputs = inputs.to(gpudevice)
results = results.to(gpudevice)
auxs = auxs.to(gpudevice)
preds, auxpreds = model(inputs)
main_loss, aux_loss = lossfunc(inputs, results, preds, auxs, auxpreds)
test_samples += batch_size
test_main_loss += main_loss.item()
test_aux_loss += aux_loss.item()
trainlog("TEST samples %d, main loss: %.5f, aux loss %.5f" % (test_samples, test_main_loss / test_samples, test_aux_loss / test_samples))
trainlog('Finished Training')
| [
"logging.getLogger",
"logging.StreamHandler",
"math.floor",
"data.load_sgf_moves_exn",
"math.sqrt",
"genboard_common.Model.load_from_file",
"torch.cuda.is_available",
"torch.sum",
"torch.flip",
"os.walk",
"os.path.exists",
"argparse.ArgumentParser",
"os.mkdir",
"sys.stdout.flush",
"trace... | [((9998, 10046), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (10021, 10046), False, 'import argparse\n'), ((11346, 11390), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(message)s"""'], {}), "('%(asctime)s %(message)s')\n", (11363, 11390), False, 'import logging\n'), ((11523, 11556), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (11544, 11556), False, 'import logging\n'), ((11617, 11649), 'logging.getLogger', 'logging.getLogger', (['"""trainlogger"""'], {}), "('trainlogger')\n", (11634, 11649), False, 'import logging\n'), ((11788, 11822), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(150)'}), '(linewidth=150)\n', (11807, 11822), True, 'import numpy as np\n'), ((12924, 13054), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['traindataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_data_workers', 'drop_last': '(True)'}), '(traindataset, batch_size=batch_size, shuffle=\n False, num_workers=num_data_workers, drop_last=True)\n', (12951, 13054), False, 'import torch\n'), ((13065, 13194), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testdataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_data_workers', 'drop_last': '(True)'}), '(testdataset, batch_size=batch_size, shuffle=\n False, num_workers=num_data_workers, drop_last=True)\n', (13092, 13194), False, 'import torch\n'), ((13767, 13786), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (13779, 13786), False, 'import torch\n'), ((13792, 13817), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13815, 13817), False, 'import torch\n'), ((13951, 13987), 'os.path.join', 'os.path.join', (['traindir', '"""model.data"""'], {}), "(traindir, 'model.data')\n", (13963, 13987), False, 'import os\n'), ((14001, 14037), 'os.path.join', 'os.path.join', (['traindir', '"""optim.data"""'], {}), "(traindir, 'optim.data')\n", (14013, 14037), False, 'import os\n'), ((14055, 14095), 'os.path.join', 'os.path.join', (['traindir', '"""traindata.json"""'], {}), "(traindir, 'traindata.json')\n", (14067, 14095), False, 'import os\n'), ((14100, 14125), 'os.path.exists', 'os.path.exists', (['modelpath'], {}), '(modelpath)\n', (14114, 14125), False, 'import os\n'), ((596, 630), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (628, 630), False, 'import torch\n'), ((1819, 1832), 'math.floor', 'math.floor', (['r'], {}), '(r)\n', (1829, 1832), False, 'import math\n'), ((2634, 2668), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2666, 2668), False, 'import torch\n'), ((2889, 2908), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2901, 2908), False, 'import torch\n'), ((9725, 9743), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (9734, 9743), False, 'import json\n'), ((9847, 9859), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9856, 9859), False, 'import json\n'), ((11278, 11302), 'os.path.exists', 'os.path.exists', (['traindir'], {}), '(traindir)\n', (11292, 11302), False, 'import os\n'), ((11308, 11326), 'os.mkdir', 'os.mkdir', (['traindir'], {}), '(traindir)\n', (11316, 11326), False, 'import os\n'), ((11418, 11453), 'os.path.join', 'os.path.join', (['traindir', '"""train.log"""'], {}), "(traindir, 'train.log')\n", (11430, 11453), False, 'import os\n'), ((11870, 11888), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11886, 11888), False, 'import sys\n'), ((12047, 12063), 'os.walk', 'os.walk', (['datadir'], {}), '(datadir)\n', (12054, 12063), False, 'import os\n'), ((13879, 13901), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (13891, 13901), False, 'import torch\n'), ((15242, 15261), 'math.sqrt', 'math.sqrt', (['lr_scale'], {}), '(lr_scale)\n', (15251, 15261), False, 'import math\n'), ((14661, 14682), 'torch.load', 'torch.load', (['optimpath'], {}), '(optimpath)\n', (14671, 14682), False, 'import torch\n'), ((17201, 17216), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17214, 17216), False, 'import torch\n'), ((686, 700), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (696, 700), False, 'import os\n'), ((2724, 2738), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (2734, 2738), False, 'import os\n'), ((9591, 9612), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (9610, 9612), False, 'import traceback\n'), ((14182, 14213), 'genboard_common.Model.load_from_file', 'Model.load_from_file', (['modelpath'], {}), '(modelpath)\n', (14202, 14213), False, 'from genboard_common import Model\n'), ((14745, 14800), 'genboard_common.Model', 'Model', ([], {'num_channels': 'num_channels', 'num_blocks': 'num_blocks'}), '(num_channels=num_channels, num_blocks=num_blocks)\n', (14750, 14800), False, 'from genboard_common import Model\n'), ((3507, 3532), 'board.Board', 'Board', ([], {'size': 'metadata.size'}), '(size=metadata.size)\n', (3512, 3532), False, 'from board import Board, IllegalMoveError\n'), ((13548, 13575), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (13561, 13575), True, 'import torch.nn.functional as F\n'), ((739, 753), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (749, 753), False, 'import os\n'), ((2777, 2791), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (2787, 2791), False, 'import os\n'), ((3201, 3234), 'data.load_sgf_moves_exn', 'data.load_sgf_moves_exn', (['filename'], {}), '(filename)\n', (3224, 3234), False, 'import data\n'), ((13660, 13717), 'torch.sum', 'torch.sum', (['inputs[:, 1:2, :, :]'], {'dim': '[2, 3]', 'keepdim': '(True)'}), '(inputs[:, 1:2, :, :], dim=[2, 3], keepdim=True)\n', (13669, 13717), False, 'import torch\n'), ((3666, 3756), 'torch.zeros', 'torch.zeros', (['(8, metadata.size, metadata.size)'], {'dtype': 'torch.float32', 'device': 'cpudevice'}), '((8, metadata.size, metadata.size), dtype=torch.float32, device=\n cpudevice)\n', (3677, 3756), False, 'import torch\n'), ((3771, 3827), 'torch.zeros', 'torch.zeros', (['(3,)'], {'dtype': 'torch.float32', 'device': 'cpudevice'}), '((3,), dtype=torch.float32, device=cpudevice)\n', (3782, 3827), False, 'import torch\n'), ((3846, 3936), 'torch.zeros', 'torch.zeros', (['(3, metadata.size, metadata.size)'], {'dtype': 'torch.float32', 'device': 'cpudevice'}), '((3, metadata.size, metadata.size), dtype=torch.float32, device=\n cpudevice)\n', (3857, 3936), False, 'import torch\n'), ((12400, 12426), 'os.path.join', 'os.path.join', (['parent', 'name'], {}), '(parent, name)\n', (12412, 12426), False, 'import os\n'), ((12473, 12499), 'os.path.join', 'os.path.join', (['parent', 'name'], {}), '(parent, name)\n', (12485, 12499), False, 'import os\n'), ((13608, 13638), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['auxpreds'], {'dim': '(1)'}), '(auxpreds, dim=1)\n', (13621, 13638), True, 'import torch.nn.functional as F\n'), ((8656, 8685), 'torch.transpose', 'torch.transpose', (['inputs', '(1)', '(2)'], {}), '(inputs, 1, 2)\n', (8671, 8685), False, 'import torch\n'), ((8708, 8734), 'torch.transpose', 'torch.transpose', (['aux', '(1)', '(2)'], {}), '(aux, 1, 2)\n', (8723, 8734), False, 'import torch\n'), ((8182, 8208), 'torch.flip', 'torch.flip', (['inputs', '[1, 2]'], {}), '(inputs, [1, 2])\n', (8192, 8208), False, 'import torch\n'), ((8233, 8256), 'torch.flip', 'torch.flip', (['aux', '[1, 2]'], {}), '(aux, [1, 2])\n', (8243, 8256), False, 'import torch\n'), ((8308, 8331), 'torch.flip', 'torch.flip', (['inputs', '[1]'], {}), '(inputs, [1])\n', (8318, 8331), False, 'import torch\n'), ((8357, 8377), 'torch.flip', 'torch.flip', (['aux', '[1]'], {}), '(aux, [1])\n', (8367, 8377), False, 'import torch\n'), ((8470, 8493), 'torch.flip', 'torch.flip', (['inputs', '[2]'], {}), '(inputs, [2])\n', (8480, 8493), False, 'import torch\n'), ((8519, 8539), 'torch.flip', 'torch.flip', (['aux', '[2]'], {}), '(aux, [2])\n', (8529, 8539), False, 'import torch\n'), ((12290, 12316), 'os.path.join', 'os.path.join', (['parent', 'name'], {}), '(parent, name)\n', (12302, 12316), False, 'import os\n')] |
import sys
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from torch.optim.lr_scheduler import LambdaLR
from torchsummary import summary
from tqdm import tqdm
from src.utils import Utils
class TrainModel:
def __init__(self):
self.train_losses = []
self.test_losses = []
self.train_acc = []
self.test_acc = []
self.reg_loss_l1 = []
self.factor = 0 # 0.000005
self.loss_type = self.getlossfunction()
self.t_acc_max = 0 # track change in validation loss
self.optimizer = None
def showmodelsummary(self, model):
summary(model, input_size=(3, 32, 32), device="cuda")
def train(self, model, device, train_loader, optimizer, epoch):
model.train()
pbar = tqdm(train_loader)
correct = 0
processed = 0
self.optimizer = optimizer
for batch_idx, (data, target) in enumerate(pbar):
# get samples
data, target = data.to(device), target.to(device)
# Init
optimizer.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch
# accumulates the gradients on subsequent backward passes. Because of this, when you start your training
# loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred = model(data)
# # Calculate L1 loss
# l1_crit = torch.nn.L1Loss(size_average=False)
# reg_loss = 0
# for param in model.parameters():
# spare_matrix = torch.randn_like(param) * 0
# reg_loss += l1_crit(param, spare_matrix)
#
# self.reg_loss_l1.append(reg_loss)
# Calculate loss
loss = self.loss_type(y_pred, target)
# loss += self.factor * reg_loss
# self.train_losses.append(loss)
# Backpropagation
loss.backward()
optimizer.step()
# Update pbar-tqdm
pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
processed += len(data)
pbar.set_description(
desc=f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100 * correct / processed:0.2f}')
self.train_acc.append(100 * correct / processed)
self.train_losses.append(loss)
def test(self, model, device, test_loader, class_correct, class_total, epoch, lr_data):
model.eval()
test_loss = 0
correct = 0
t_acc = 0
# pbar = tqdm(test_loader)
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += self.loss_type(output, target).item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct_tensor = pred.eq(target.data.view_as(pred))
correct += pred.eq(target.view_as(pred)).sum().item()
correct_new = np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
# for i in range(10):
# label = target.data[i]
# class_correct[label] += correct_new[i].item()
# class_total[label] += 1
test_loss /= len(test_loader.dataset)
self.test_losses.append(test_loss)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
self.test_acc.append(100. * correct / len(test_loader.dataset))
t_acc = 100. * correct / len(test_loader.dataset)
# save model if validation loss has decreased
if self.t_acc_max <= t_acc:
print('Validation accuracy increased ({:.6f} --> {:.6f}). Saving model ...'.format(
self.t_acc_max,
t_acc))
from src.utils import Utils
Utils.savemodel(model=model, epoch=epoch, path="savedmodels/checkpoint.pt",
optimizer_state_dict=self.optimizer.state_dict
, train_losses=self.train_losses, train_acc=self.train_acc, test_acc=self.test_acc,
test_losses=self.test_losses, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
self.t_acc_max = t_acc
return t_acc
def getlossfunction(self):
return CrossEntropyLoss()
def gettraindata(self):
return self.train_losses, self.train_acc
def gettestdata(self):
return self.test_losses, self.test_acc
def getinferredimagesfromdataset(dataiterator, model, classes, batch_size, number=25):
try:
misclassifiedcount = 0
classifiedcount = 0
misclassified = {}
classified = {}
loop = 0
while misclassifiedcount < number or classifiedcount < number:
loop += 1
# print("loop = {}".format(loop))
img, labels = dataiterator.next()
# images = img.numpy()
# move model inputs to cuda
images = img.cuda()
# print(len(img))
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.cpu().numpy())
for idx in np.arange(batch_size):
# print("for")
key = "Pred={} (Act={}) ".format(classes[preds[idx]], classes[labels[idx]])
# print("m-" + str(misclassifiedcount))
# print("c-" + str(classifiedcount))
# print("mlen-" + str(len(misclassified)))
# print("clen-" + str(len(classified)))
# print(preds[idx])
# print(labels[idx].item())
# print(key)
if preds[idx] != labels[idx].item():
if misclassifiedcount < number:
key = key + str(misclassifiedcount)
misclassified[key] = images[idx].unsqueeze(0)
misclassifiedcount += 1
else:
if classifiedcount < number:
key = key + str(classifiedcount)
classified[key] = images[idx].unsqueeze(0)
# images[idx].cpu()
classifiedcount += 1
if misclassifiedcount >= number and classifiedcount >= number:
break
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer.")
except:
print(sys.exc_info()[0])
return classified, misclassified
def start_training_cyclic_lr(self, epochs, model, device, test_loader, train_loader, max_lr_epoch, weight_decay
, min_lr=None,
max_lr=None,
cycles=1, annealing=False):
lr_data = []
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
optimizer = self.get_optimizer(model=model, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer=optimizer, base_lr=min_lr, max_lr=max_lr,
mode='triangular2',
cycle_momentum=True, step_size_up=max_lr_epoch,
step_size_down=epochs - max_lr_epoch, )
self.start_training(epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
class_correct, class_total, path="savedmodels/finalmodelwithdata.pt")
# scheduler = self.get_cyclic_scheduler(optimizer, epochs=epochs, max_lr_epoch=max_lr_epoch, min_lr=min_lr,
# max_lr=max_lr)
#
# optimizer_state_dict = optimizer.state_dict()
# scheduler_state_dict = scheduler.state_dict()
# for count in range(0, cycles):
# print("Starting cycle: {}".format(count + 1))
# self.start_training(epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
# class_correct, class_total, path="savedmodels/finalmodelwithdata.pt")
# print("Completed cycle: {}".format(count + 1))
#
# if annealing:
# diff = max_lr - min_lr
# diff = diff / 2
# max_lr = diff + min_lr
# print("New max_lr: {}".format(max_lr))
#
# min_lr += ((max_lr - min_lr) / max_lr_epoch)
#
# if cycles > 1:
# optimizer.load_state_dict(optimizer_state_dict)
# scheduler.load_state_dict(scheduler_state_dict)
return lr_data, class_correct, class_total
def start_training(self, epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
class_correct, class_total, path):
for epoch in range(0, epochs):
print("EPOCH:", epoch)
for param_groups in optimizer.param_groups:
print("Learning rate =", param_groups['lr'], " for epoch: ", epoch) # print LR for different epochs
lr_data.append(param_groups['lr'])
self.train(model, device, train_loader, optimizer, epoch)
t_acc_epoch = self.test(model=model, device=device, test_loader=test_loader,
class_correct=class_correct,
class_total=class_total, epoch=epoch, lr_data=lr_data)
scheduler.step()
print('Saving final model after training cycle completion')
self.save_model(model, epochs, optimizer.state_dict, lr_data, class_correct, class_total,
path=path)
return lr_data, class_correct, class_total
def get_optimizer(self, model, lr=1, momentum=0.9, weight_decay=0):
optimizer = Utils.createoptimizer(model, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True)
return optimizer
def get_cyclic_scheduler(self, optimizer, epochs=25, max_lr_epoch=5, min_lr=0.01, max_lr=0.1):
from src.train import TrainHelper
lambda1 = TrainHelper.cyclical_lr(max_lr_epoch=max_lr_epoch, epochs=epochs, min_lr=min_lr, max_lr=max_lr)
scheduler = LambdaLR(optimizer, lr_lambda=[lambda1])
return scheduler
def save_model(self, model, epochs, optimizer_state_dict, lr_data, class_correct, class_total,
path="savedmodels/finalmodelwithdata.pt"):
train_losses, train_acc = self.gettraindata()
test_losses, test_acc = self.gettestdata()
Utils.savemodel(model=model, epoch=epochs, path=path,
optimizer_state_dict=optimizer_state_dict
, train_losses=train_losses, train_acc=train_acc, test_acc=test_acc,
test_losses=test_losses, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
def start_training_lr_finder(self, epochs, model, device, test_loader, train_loader, lr, weight_decay, lambda_fn):
lr_data = []
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
optimizer = self.get_optimizer(model=model, lr=lr, weight_decay=weight_decay)
scheduler = Utils.create_scheduler_lambda_lr(lambda_fn, optimizer)
return self.start_training(epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
class_correct, class_total, path="savedmodels/lrfinder.pt")
| [
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.CrossEntropyLoss",
"src.train.TrainHelper.cyclical_lr",
"src.utils.Utils.savemodel",
"tqdm.tqdm",
"torch.max",
"torch.optim.lr_scheduler.CyclicLR",
"sys.exc_info",
"src.utils.Utils.create_scheduler_lambda_lr",
"src.utils.Utils.createoptimizer",
"tor... | [((627, 680), 'torchsummary.summary', 'summary', (['model'], {'input_size': '(3, 32, 32)', 'device': '"""cuda"""'}), "(model, input_size=(3, 32, 32), device='cuda')\n", (634, 680), False, 'from torchsummary import summary\n'), ((787, 805), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (791, 805), False, 'from tqdm import tqdm\n'), ((4876, 4894), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (4892, 4894), False, 'from torch.nn import CrossEntropyLoss\n'), ((7950, 8150), 'torch.optim.lr_scheduler.CyclicLR', 'torch.optim.lr_scheduler.CyclicLR', ([], {'optimizer': 'optimizer', 'base_lr': 'min_lr', 'max_lr': 'max_lr', 'mode': '"""triangular2"""', 'cycle_momentum': '(True)', 'step_size_up': 'max_lr_epoch', 'step_size_down': '(epochs - max_lr_epoch)'}), "(optimizer=optimizer, base_lr=min_lr,\n max_lr=max_lr, mode='triangular2', cycle_momentum=True, step_size_up=\n max_lr_epoch, step_size_down=epochs - max_lr_epoch)\n", (7983, 8150), False, 'import torch\n'), ((10865, 10966), 'src.utils.Utils.createoptimizer', 'Utils.createoptimizer', (['model'], {'lr': 'lr', 'momentum': 'momentum', 'weight_decay': 'weight_decay', 'nesterov': '(True)'}), '(model, lr=lr, momentum=momentum, weight_decay=\n weight_decay, nesterov=True)\n', (10886, 10966), False, 'from src.utils import Utils\n'), ((11147, 11247), 'src.train.TrainHelper.cyclical_lr', 'TrainHelper.cyclical_lr', ([], {'max_lr_epoch': 'max_lr_epoch', 'epochs': 'epochs', 'min_lr': 'min_lr', 'max_lr': 'max_lr'}), '(max_lr_epoch=max_lr_epoch, epochs=epochs, min_lr=\n min_lr, max_lr=max_lr)\n', (11170, 11247), False, 'from src.train import TrainHelper\n'), ((11263, 11303), 'torch.optim.lr_scheduler.LambdaLR', 'LambdaLR', (['optimizer'], {'lr_lambda': '[lambda1]'}), '(optimizer, lr_lambda=[lambda1])\n', (11271, 11303), False, 'from torch.optim.lr_scheduler import LambdaLR\n'), ((11604, 11876), 'src.utils.Utils.savemodel', 'Utils.savemodel', ([], {'model': 'model', 'epoch': 'epochs', 'path': 'path', 'optimizer_state_dict': 'optimizer_state_dict', 'train_losses': 'train_losses', 'train_acc': 'train_acc', 'test_acc': 'test_acc', 'test_losses': 'test_losses', 'lr_data': 'lr_data', 'class_correct': 'class_correct', 'class_total': 'class_total'}), '(model=model, epoch=epochs, path=path, optimizer_state_dict=\n optimizer_state_dict, train_losses=train_losses, train_acc=train_acc,\n test_acc=test_acc, test_losses=test_losses, lr_data=lr_data,\n class_correct=class_correct, class_total=class_total)\n', (11619, 11876), False, 'from src.utils import Utils\n'), ((12310, 12364), 'src.utils.Utils.create_scheduler_lambda_lr', 'Utils.create_scheduler_lambda_lr', (['lambda_fn', 'optimizer'], {}), '(lambda_fn, optimizer)\n', (12342, 12364), False, 'from src.utils import Utils\n'), ((2783, 2798), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2796, 2798), False, 'import torch\n'), ((4351, 4675), 'src.utils.Utils.savemodel', 'Utils.savemodel', ([], {'model': 'model', 'epoch': 'epoch', 'path': '"""savedmodels/checkpoint.pt"""', 'optimizer_state_dict': 'self.optimizer.state_dict', 'train_losses': 'self.train_losses', 'train_acc': 'self.train_acc', 'test_acc': 'self.test_acc', 'test_losses': 'self.test_losses', 'lr_data': 'lr_data', 'class_correct': 'class_correct', 'class_total': 'class_total'}), "(model=model, epoch=epoch, path='savedmodels/checkpoint.pt',\n optimizer_state_dict=self.optimizer.state_dict, train_losses=self.\n train_losses, train_acc=self.train_acc, test_acc=self.test_acc,\n test_losses=self.test_losses, lr_data=lr_data, class_correct=\n class_correct, class_total=class_total)\n", (4366, 4675), False, 'from src.utils import Utils\n'), ((5837, 5857), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (5846, 5857), False, 'import torch\n'), ((5949, 5970), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (5958, 5970), True, 'import numpy as np\n'), ((7394, 7408), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7406, 7408), False, 'import sys\n')] |
import datetime as dt
from math import pi as PI
import cairo
import numpy as np
# Helper function
def day_of_month(d):
"""
d: Datetime object
"""
def suffix(d):
return "th" if 10 < d < 14 else {1: "st", 2: "nd", 3: "rd"}.get(d % 10, "th")
def custom_strftime(format, t):
return t.strftime(format).replace("{S}", str(t.day) + suffix(t.day))
return custom_strftime("{S}", d)
def my_example(cr):
# weekly info
year = "2021"
month = "September"
week_of = dt.datetime(2021, 9, 6)
days_of_month = [day_of_month(week_of + dt.timedelta(days=d)) for d in range(6)]
# Colors
GRAY = (0.3, 0.3, 0.3, 1)
BLACK = (0, 0, 0, 1)
LIGHTGRAY = (0.5, 0.5, 0.5, 1)
# Shape Parameters
GRID_HEIGHT, GRID_WIDTH = 4, 4
DOT_RADIUS = 0.2
# Data Parameters
DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
HOURS = [
" 8:00",
" 9:00",
"10:00",
"11:00",
"12:00",
" 1:00",
" 2:00",
" 3:00",
" 4:00",
" 5:00",
" 6:00",
" 7:00",
]
DAYS_INIT = ["M", "T", "W", "H", "F", "S", "U"]
# Anchor coordinates
X, Y = 0, 0
# Font Selection
cr.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
# Left Side
cr.move_to(5 * GRID_WIDTH, 3 * GRID_HEIGHT)
cr.set_font_size(9)
cr.set_source_rgba(*BLACK)
cr.show_text(month)
cr.set_source_rgba(*GRAY)
cr.show_text(" " + year)
cr.set_font_size(3)
cr.set_line_width(0.1)
for line in range(42):
x, y = X + GRID_WIDTH, Y + (5 + line) * GRID_HEIGHT
if line % 14:
# Day Lines
cr.move_to(x, y)
cr.set_source_rgba(*LIGHTGRAY)
cr.rel_line_to(21 * GRID_WIDTH, 0)
cr.stroke()
# Goals
for dot in range(12):
cr.arc(x + (22 + dot) * GRID_WIDTH, y, DOT_RADIUS, 0, 2 * PI)
cr.fill()
cr.stroke()
else:
# Day
cr.move_to((x + 1), y + 0.5 * GRID_HEIGHT)
cr.set_source_rgba(*BLACK)
cr.show_text(DAYS[line // 14] + f" {days_of_month[line//14]}")
# Goals
cr.move_to((x + 1) + 26 * GRID_WIDTH, y + 0.5 * GRID_HEIGHT)
cr.set_source_rgba(*GRAY)
cr.show_text("Goals")
# Vertical Lines
x, y = X + 8 * GRID_WIDTH, Y + 5 * GRID_HEIGHT
cr.set_line_width(0.2)
cr.select_font_face("Consolas", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
for _ in range(3):
cr.move_to(x, y + GRID_HEIGHT / 2)
cr.set_source_rgba(*GRAY)
cr.rel_line_to(0, 12.5 * GRID_HEIGHT)
cr.stroke()
# Meeting Hours
for hour in range(12):
cr.move_to(x - 5, y + (hour + 1.45) * GRID_HEIGHT)
cr.set_font_size(1.5)
cr.set_source_rgba(*GRAY)
cr.show_text(HOURS[hour])
y += 14 * GRID_HEIGHT
# Exercise boxes
cr.set_line_width(0.17)
x, y = X + 23 * GRID_WIDTH, Y + 15 * GRID_HEIGHT
for _ in range(3):
cr.move_to(x, y)
cr.set_source_rgba(*BLACK)
for i in np.arange(0, 6 * GRID_WIDTH, GRID_WIDTH):
for j in np.arange(0, 3 * GRID_HEIGHT, GRID_HEIGHT):
if not i:
cr.set_source_rgba(*LIGHTGRAY)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.fill()
cr.stroke()
cr.set_source_rgba(*BLACK)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.stroke()
y += 14 * GRID_HEIGHT
# Right Side
# Font Selection
cr.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
# Anchor coordinates
X, Y = 34 * GRID_WIDTH + 1, 0
# Days
cr.set_font_size(3)
cr.set_line_width(0.1)
for line in range(42):
x, y = X + GRID_WIDTH, Y + (5 + line) * GRID_HEIGHT
if line % 14:
# Day Lines
cr.move_to(x, y)
cr.set_source_rgba(*LIGHTGRAY)
cr.rel_line_to(21 * GRID_WIDTH, 0)
cr.stroke()
# Goals
for dot in range(12):
cr.arc(x + (22 + dot) * GRID_WIDTH, y, DOT_RADIUS, 0, 2 * PI)
cr.fill()
cr.stroke()
else:
# Day
cr.move_to((x + 1), y + 0.5 * GRID_HEIGHT)
cr.set_source_rgba(*BLACK)
cr.show_text(DAYS[3 + line // 14] + f" {days_of_month[3+line//14]}")
# Goals
cr.move_to((x + 1) + 26 * GRID_WIDTH, y + 0.5 * GRID_HEIGHT)
cr.set_source_rgba(*GRAY)
cr.show_text("Goals")
# Vertical Lines
x, y = X + 8 * GRID_WIDTH, Y + 5 * GRID_HEIGHT
cr.set_line_width(0.2)
cr.select_font_face("Consolas", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
for _ in range(3):
cr.move_to(x, y + GRID_HEIGHT / 2)
cr.set_source_rgba(*GRAY)
cr.rel_line_to(0, 12.5 * GRID_HEIGHT)
cr.stroke()
# Meeting Hours
for hour in range(12):
cr.move_to(x - 5, y + (hour + 1.45) * GRID_HEIGHT)
cr.set_font_size(1.5)
cr.set_source_rgba(*GRAY)
cr.show_text(HOURS[hour])
y += 14 * GRID_HEIGHT
# Exercise boxes
cr.set_line_width(0.17)
x, y = X + 23 * GRID_WIDTH, Y + 15 * GRID_HEIGHT
for _ in range(2):
cr.move_to(x, y)
cr.set_source_rgba(*BLACK)
for i in np.arange(0, 6 * GRID_WIDTH, GRID_WIDTH):
for j in np.arange(0, 3 * GRID_HEIGHT, GRID_HEIGHT):
if not i:
cr.set_source_rgba(*LIGHTGRAY)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.fill()
cr.stroke()
cr.set_source_rgba(*BLACK)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.stroke()
y += 14 * GRID_HEIGHT
# Weekly Goals
x, y = X + 23 * GRID_WIDTH, Y + 35 * GRID_HEIGHT
# Job Searching
for i in np.arange(0, 11 * GRID_WIDTH, GRID_WIDTH):
for j in np.arange(0, 3 * GRID_HEIGHT, GRID_HEIGHT):
if not i:
cr.set_source_rgba(*LIGHTGRAY)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.fill()
cr.stroke()
cr.set_source_rgba(*BLACK)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.stroke()
# Daily Goals
x, y = X + 23 * GRID_WIDTH, Y + 39 * GRID_HEIGHT
# Font Selection
cr.select_font_face("Courier New", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(3)
for i in np.arange(0, 8 * GRID_WIDTH, GRID_WIDTH):
# Hash marks separating goals
cr.set_source_rgba(*BLACK)
cr.move_to(x + i + GRID_WIDTH, y)
cr.rel_line_to(0, -GRID_HEIGHT / 2)
cr.stroke()
for j in np.arange(0, 7 * GRID_HEIGHT, GRID_HEIGHT):
# Identify first column
if not i:
# Shade box
cr.set_source_rgba(*LIGHTGRAY)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.fill()
cr.stroke()
# Daily initials
text = DAYS_INIT[j // GRID_HEIGHT]
cr.move_to(x + 1, y + j + GRID_HEIGHT - 1)
cr.set_source_rgba(*BLACK)
cr.show_text(text)
cr.set_source_rgba(*BLACK)
cr.rectangle(x + i, y + j, GRID_WIDTH, GRID_HEIGHT)
cr.stroke()
def draw(cr):
cr.set_line_width(0.04)
utf8 = "cairo"
cr.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(0.2)
x_bearing, y_bearing, width, height, x_advance, y_advance = cr.text_extents(utf8)
x = 0.5 - (width / 2 + x_bearing)
y = 0.5 - (height / 2 + y_bearing)
cr.move_to(x, y)
cr.show_text(utf8)
# draw helping lines
cr.set_source_rgba(1, 0.2, 0.2, 0.6)
cr.arc(x, y, 0.05, 0, 2 * PI)
cr.fill()
cr.move_to(0.5, 0)
cr.rel_line_to(0, 1)
cr.move_to(0, 0.5)
cr.rel_line_to(1, 0)
cr.stroke()
| [
"datetime.datetime",
"datetime.timedelta",
"numpy.arange"
] | [((516, 539), 'datetime.datetime', 'dt.datetime', (['(2021)', '(9)', '(6)'], {}), '(2021, 9, 6)\n', (527, 539), True, 'import datetime as dt\n'), ((6230, 6271), 'numpy.arange', 'np.arange', (['(0)', '(11 * GRID_WIDTH)', 'GRID_WIDTH'], {}), '(0, 11 * GRID_WIDTH, GRID_WIDTH)\n', (6239, 6271), True, 'import numpy as np\n'), ((6874, 6914), 'numpy.arange', 'np.arange', (['(0)', '(8 * GRID_WIDTH)', 'GRID_WIDTH'], {}), '(0, 8 * GRID_WIDTH, GRID_WIDTH)\n', (6883, 6914), True, 'import numpy as np\n'), ((3240, 3280), 'numpy.arange', 'np.arange', (['(0)', '(6 * GRID_WIDTH)', 'GRID_WIDTH'], {}), '(0, 6 * GRID_WIDTH, GRID_WIDTH)\n', (3249, 3280), True, 'import numpy as np\n'), ((5633, 5673), 'numpy.arange', 'np.arange', (['(0)', '(6 * GRID_WIDTH)', 'GRID_WIDTH'], {}), '(0, 6 * GRID_WIDTH, GRID_WIDTH)\n', (5642, 5673), True, 'import numpy as np\n'), ((6290, 6332), 'numpy.arange', 'np.arange', (['(0)', '(3 * GRID_HEIGHT)', 'GRID_HEIGHT'], {}), '(0, 3 * GRID_HEIGHT, GRID_HEIGHT)\n', (6299, 6332), True, 'import numpy as np\n'), ((7113, 7155), 'numpy.arange', 'np.arange', (['(0)', '(7 * GRID_HEIGHT)', 'GRID_HEIGHT'], {}), '(0, 7 * GRID_HEIGHT, GRID_HEIGHT)\n', (7122, 7155), True, 'import numpy as np\n'), ((3303, 3345), 'numpy.arange', 'np.arange', (['(0)', '(3 * GRID_HEIGHT)', 'GRID_HEIGHT'], {}), '(0, 3 * GRID_HEIGHT, GRID_HEIGHT)\n', (3312, 3345), True, 'import numpy as np\n'), ((5696, 5738), 'numpy.arange', 'np.arange', (['(0)', '(3 * GRID_HEIGHT)', 'GRID_HEIGHT'], {}), '(0, 3 * GRID_HEIGHT, GRID_HEIGHT)\n', (5705, 5738), True, 'import numpy as np\n'), ((584, 604), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'd'}), '(days=d)\n', (596, 604), True, 'import datetime as dt\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import setuptools
from distutils.version import LooseVersion
if LooseVersion(setuptools.__version__) < "30.3":
sys.stderr.write("ERROR: setuptools 30.3 or later is required by gammapy\n")
sys.exit(1)
# TODO: check if setuptools_scm, numpy, ... are OK
# Exit with good error message telling people to install those first if not
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
def make_cython_extension(filename):
return Extension(
filename.strip(".pyx").replace("/", "."),
[filename],
include_dirs=[np.get_include()],
)
cython_files = [
"gammapy/detect/_test_statistics_cython.pyx",
"gammapy/stats/fit_statistics_cython.pyx",
]
ext_modules = cythonize([make_cython_extension(_) for _ in cython_files])
setuptools.setup(use_scm_version=True, ext_modules=ext_modules)
| [
"sys.exit",
"setuptools.setup",
"sys.stderr.write",
"numpy.get_include",
"distutils.version.LooseVersion"
] | [((882, 945), 'setuptools.setup', 'setuptools.setup', ([], {'use_scm_version': '(True)', 'ext_modules': 'ext_modules'}), '(use_scm_version=True, ext_modules=ext_modules)\n', (898, 945), False, 'import setuptools\n'), ((140, 176), 'distutils.version.LooseVersion', 'LooseVersion', (['setuptools.__version__'], {}), '(setuptools.__version__)\n', (152, 176), False, 'from distutils.version import LooseVersion\n'), ((191, 267), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: setuptools 30.3 or later is required by gammapy\n"""'], {}), "('ERROR: setuptools 30.3 or later is required by gammapy\\n')\n", (207, 267), False, 'import sys\n'), ((272, 283), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (280, 283), False, 'import sys\n'), ((663, 679), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (677, 679), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 09:40:28 2018
@author: <NAME>
"""
import numpy as np
#from numpy import fft
import matplotlib.pyplot as plt
#import scipy.signal as sig
import os
import random
import emgReaderClass as erc
import threading
import multiprocessing
import dataPlotter
bias=0 # If bias = 1, every cromossome will have a non frequency dependant DNA
maxGen=2000 # The max number of generations
startOver=True # If True, the code will not consider the last simulation
tamPop=100 # Population number
maxFreq=180 # This is the max Frequency to consider #240
freqStep=3 # For freqStep=3 -> The code will consider [1,2,3],[3,4,5], etc# 3
taxaMut=0.01 # The mutation rate
taxaMutMin=0.01 # Minimum mutation rate
taxaMutMax=10.0 # Maximum mutation rate
chanceMut=4 # The chance of mutation (only for the "absolute" mutation)
bestTypes=[] # Logging variable
continuous=False # If True, the code will use a continuous fitness function (not recommended)
binaryFit=False # If True, the fitness of each individual will be 1 for each right guess
# If False, it will be continuous if "continuous" is True, or 1 point if
# it guesses correctly, and 1.5 if it guesses with an confidence above
# a "multFac" threshold
multFac=1.5 #
binaryCrossChance=0.5 # The chance of ocurring a binary cross. 1 minus this
# is the chance of ans mean crossing
vectorialMutationChance=0.5 # The chance of vectorial mutation. 1 minus this is
# chance of an absolute mutation
taxaMutMult=4.0 # The factor by which taxaMut will be multiplied
##############################################################################
guid=0 # Individual ID (logging variable)
real=[] # DATA
origin=[] # DATA
fv=[] # DATA
frv=[] # DATA
nArq=0 # DATA
# lastValues, botThs and topThs to be used in each archive
parameters={'bicepsinteiro.txt': [400,20,10],\
'bicepsmetade.txt': [400,20,10],\
'emgwk.txt': [400,20,10],\
'emgmed.txt':[400,20,10],\
# 'xoxoxo.txt':[300,40,30],\
'emgabrindo.txt':[500,20,20],\
'emgapertando.txt':[400,20,20]}
# Method that return the number of right guesses of and individual
def countGuesses(indiv):
arqVec=getArqs()
nArq=len(arqVec)
score=0
for arq in range(0,nArq):
for i in range(0,len(real[arq])):
tam=len(real[arq][i])
x= getFreqVector(fv[arq][i])
x=np.array(x)
pont=x*indiv.cromo.freqFactor
# test.append(pont)
if np.argmax(pont[0]) == arq:
score+=1
return score
# This function just multiplies the chromossome of an individual by the frequency
# vector of an signal, return the result. The position that gets the higher
# number represent from which archive it thinks this signal belongs
def sayWho(indiv,real,fv):
tam=len(fv)
x= getFreqVector(fv)
x=np.array(x)
pont=x*indiv.cromo.freqFactor
return pont
# Gets the *.txt files
def getArqs():
arqVec=[]
for arq in os.listdir('.'):
if os.path.splitext(arq)[1]=='.txt':
arqVec.append(arq)
arqVec.reverse()
return arqVec
# Chromossome class each chromossome mainly consists of an nArqs x (maxFreq/freqStep)
# matrix. Each column represent an archive, and each line represent a set of
# freqStep frequencies
class cromossome:
def getRandomVec(self,n):
v=[]
for i in range(0,n):
v.append(random.random()*2-1)
return v
def __init__(self):
self.freqFactor=[]
n=len(getArqs())
for i in range(0,maxFreq/freqStep+bias):
self.freqFactor.append(self.getRandomVec(n))
self.freqFactor=np.matrix(self.freqFactor)
# Individual class
class ind:
def __init__(self):
global guid
self.uid=guid
guid+=1
self.cromo = cromossome()
self.fit=0
self.marker='none'
# This function takes the fft data od an signal, and returns a similar vector,
# but instead of getting one element per frequency it take a number of freqStep
# frequencies, sum it and divide by freqStep
def getFreqVector(fv):
x=[]
tam=len(fv)
for j in range(0,tam/2-5):
k=int(round(float(j)*1000/float(tam)))
if(k % 3 == 0):
if len(x)==maxFreq/freqStep:
##### BIAS ######
if bias==1:
x.append(-1)
#################
break
x.append(sum(fv[k:k+freqStep])*2/tam)
return x
# Read the data archives. The original signal is stored in origin. Each signal
# Is stored in real. real[arq][5] will contain the 5th signal of the arq'th file
# (as read by getArqs). The fft data will be stored at "fv" (indexes works the
# the same as for "real"). The frequency vector as got by getFrequencyVector
# is stored at frv
def readArqs():
arqVec=getArqs()
nArq=len(arqVec)
reader=erc.emgReader()
for arq in range(0,nArq):
origin.append([])
real.append([])
fv.append([])
frv.append([])
reader.lastValues=parameters[arqVec[arq]][0]
reader.topThs=parameters[arqVec[arq]][1]
reader.botThs=parameters[arqVec[arq]][2]
origin[arq],real[arq],fv[arq] = reader.analyzeEmg(arqVec[arq],1000)
for arq in range(0,nArq):
for i in range(0,len(fv[arq])):
frv[arq].append(getFreqVector(fv[arq][i]))
# Fitness method. Each signal frequency vector is multiplied by indiv
# chromossome. The numbers got are reconized as the score of each archive.
# Let's say that the 0th element gets the largest number. That mean this
# individual "thinks" that that signal belongs to archive 4 (getArqs()[0])
# The fitnnes is then calculated by the number of right guesses of each
# individual
def fitness(indiv):
global nArq
score=0
for arq in range(0,nArq):
for i in range(0,len(fv[arq])):
tam=len(real[arq][i])
pont=np.array(frv[arq][i])*indiv.cromo.freqFactor
# print pont
test=pont
if np.argmax(pont) == arq:
if not binaryFit:
###############################################################################
if continuous:
score+=(np.max(pont[0])-np.min(pont[0]))/np.mean(pont[0]-np.min(pont[0]))
###############################################################################
else:
if np.max(np.array(pont)) >=multFac*np.mean(np.array(pont)):
score+=1.5
else:
score+=1
###########################################################################
else:
score+=1
return score
# Population class
class population:
def __init__(self):
self.population=[]
def initPop(self,tamPop):
for i in range(0,tamPop):
self.population.append(ind())
def evaluateAll(self):
for ind in self.population:
ind.fit=fitness(ind)
def getBest(self):
return self.population[np.argmax(self.population)]
# Mutation method. The mutation can be vetorial or absolute.
def mutate(indiv):
global taxaMut,chanceMut
if random.random()<vectorialMutationChance:
vec=ind().cromo.freqFactor
amp=np.sqrt(np.sum(pow(i,2) for i in vec.A1))
vec/=amp
vec*=taxaMut*random.random()
indiv.cromo.freqFactor+=vec
indiv.marker='vectorial'
# for line in indiv.cromo.freqFactor:
# for i in range(0,len(np.array(line)[0])):
# if random.random()*1000<chanceMut:
# line[0,i]+=mut*random.random()
else:
for line in indiv.cromo.freqFactor:
for i in range(0,len(np.array(line)[0])):
if random.random()*1000<chanceMut:
if random.random()<0.5:
mut = taxaMut
else:
mut = -taxaMut
line[0,i]+=mut*random.random()
indiv.marker='absolute'
# Crossover by adding different chromossomes and dividing by the number of
# fathers
def meanCrossover(pais):
filho= ind()
somaFreqs = sum([pai.cromo.freqFactor for pai in pais])
tam= len(pais)
filho.cromo.freqFactor=somaFreqs/tam
mutate(filho)
filho.marker+=' meaned '
return filho
# Crossover by replacing the sons genes by his mother's or his father's, with
# 50% chance
def binaryCrossover(pais):
filho=ind()
for i in range(0,len(filho.cromo.freqFactor)):
for j in range(0,len(filho.cromo.freqFactor[0].A1)):
if random.random()<0.5:
filho.cromo.freqFactor[i,j]=pais[0].cromo.freqFactor[i,j]
else:
filho.cromo.freqFactor[i,j]=pais[1].cromo.freqFactor[i,j]
mutate(filho)
filho.marker+=' binerized '
return filho
# Mixed crossover
def weightedCrossover(pais):
if random.random()<binaryCrossChance:
return binaryCrossover(pais)
else:
return meanCrossover(pais)
# Tournament. Returns the best fitted individual
def torneio(pop):
bestIndiv=pop.population[0]
for indiv in pop.population:
if indiv.fit>=bestIndiv.fit:
bestIndiv=indiv
return bestIndiv
# Generate a new population by performing crossovers with best and the reminder
# population
def genNewPop(best,pop):
newpop=population()
for indiv in pop.population:
if indiv == best:
newpop.population.append(indiv)
continue
else:
temp=weightedCrossover([best,indiv])
newpop.population.append(temp)
return newpop
# Remove the n less fitted individuals, replacing them by new ones
def removeSuckers(pop,n):
def getFit(indiv):
return indiv.fit
pop.population.sort(reverse=False,key=getFit)
for i in range(0,n):
pop.population[i]=ind()
# Returns the mean fitness of poppulation in pop
def getPopMean(pop):
temp=0.0
tam=len(pop.population)
for indiv in pop.population:
temp+=indiv.fit
return temp/tam
# Not used. Divide all chromossomes of a population by the highest number
# amongst them
def normalizePop(pop):
for indiv in pop.population:
maxF=0
for line in indiv.cromo.freqFactor:
for i in range(0,len(np.array(line)[0])):
if abs(line[0,i]) > maxF:
maxF=abs(line[0,i])
for line in indiv.cromo.freqFactor:
for i in range(0,len(np.array(line)[0])):
line[0,i]/=maxF
# Plot a graph
def plotGens(best,mean):
plt.plot(best,'go')
plt.plot(mean,'b-')
# Class for controlling the GA variables
class populationControl():
global tamPop,\
taxaMut,\
chanceMut,\
bestAll,\
bias,\
maxGen,\
tamPop,\
taxaMut,\
taxaMutMax,\
chanceMut,\
continuous,\
binaryFit,\
multFac,\
binaryCrossChance,\
taxaMutMult,\
taxaMutMin
def __init__(self):
self._tamPop=tamPop
self._taxaMut=taxaMut
self._chanceMut=chanceMut
self._bias=bias
self._maxGen=maxGen
self._tamPop=tamPop
self._taxaMutMin=taxaMutMin
self._taxaMutMax=taxaMutMax
self._chanceMut=chanceMut
self._continuous=continuous
self._binaryFit=binaryFit
self._multFac=multFac
self._binaryCrossChance=binaryCrossChance
self._taxaMutMult=taxaMutMult
self._counter=0
self._expansion=False
def control(self,gen,counter,best,last):
global taxaMut
# taxaMut=self._taxaMutMax
ascendingCounter=0
if gen>25:
if best.fit<=last.fit*1.001: #If the fitness doesnt grow by 0.1%
self._counter+=1
else:
# taxaMut=self._taxaMut
chanceMut=self._chanceMut
self._expansion=False
self._counter=0
ascendingCounter=0
if self._counter==8: # If the fitness doesnt grow in n generations
if self._expansion: # If it the taxaMut is increasing
if taxaMut<self._taxaMutMax: # If taxaMut is less than the maximum
taxaMut*=self._taxaMutMult
else: # If taxaMut bigger than the maximum
self._expansion=False
else: # If taxaMut is decreasing
if taxaMut>self._taxaMutMin: # If it is bigger than the minimum
taxaMut/=self._taxaMutMult
else: # If it is less than the minimum
self._expansion=True
self._counter=0
def main():
global maxFreq,\
freqStep,\
tamPop,\
taxaMut,\
chanceMut,\
nArq,\
bestAll,\
startOver,\
bestTypes
nArq=len(getArqs())
gen=0
counter=0
last=ind()
bestVec=[]
meanVec=[]
taxaVec=[]
taxaMut=taxaMutMax
if startOver:
pop = population()
pop.initPop(tamPop)
else:
pop=bestAll
# plotter=dataPlotter.dataPlotter('Geracao','Melhor de Todos',bestVec)
# threading.Thread(target=plotter.start).start()
controller=populationControl()
readArqs()
while gen<maxGen:
gen+=1
pop.evaluateAll()
best=torneio(pop)
if not last.uid==best.uid:
bestTypes.append(best.marker)
print(gen,best.fit,':',best.marker,tamPop,taxaMut,chanceMut,maxGen)#,':', [p.fit for p in population]
pop=genNewPop(best,pop)
###########################################################################
controller.control(gen,counter,best,last)
last=best
taxaVec.append(20*np.log(taxaMut))
bestVec.append(last.fit)
meanVec.append(getPopMean(pop))
###########################################################################
# createSuckers(pop.tamPop/3)
removeSuckers(pop,tamPop/5)
# normalizePop(pop)
plotGens(bestVec,meanVec)
plotGens(bestVec,taxaVec)
pop.evaluateAll()
print([p.fit for p in pop.population])
return pop
bestAll=main() | [
"os.listdir",
"matplotlib.pyplot.plot",
"os.path.splitext",
"numpy.argmax",
"numpy.log",
"numpy.max",
"numpy.array",
"emgReaderClass.emgReader",
"random.random",
"numpy.min",
"numpy.matrix"
] | [((3436, 3447), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3444, 3447), True, 'import numpy as np\n'), ((3577, 3592), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3587, 3592), False, 'import os\n'), ((5612, 5627), 'emgReaderClass.emgReader', 'erc.emgReader', ([], {}), '()\n', (5625, 5627), True, 'import emgReaderClass as erc\n'), ((11788, 11808), 'matplotlib.pyplot.plot', 'plt.plot', (['best', '"""go"""'], {}), "(best, 'go')\n", (11796, 11808), True, 'import matplotlib.pyplot as plt\n'), ((11813, 11833), 'matplotlib.pyplot.plot', 'plt.plot', (['mean', '"""b-"""'], {}), "(mean, 'b-')\n", (11821, 11833), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4313), 'numpy.matrix', 'np.matrix', (['self.freqFactor'], {}), '(self.freqFactor)\n', (4296, 4313), True, 'import numpy as np\n'), ((8147, 8162), 'random.random', 'random.random', ([], {}), '()\n', (8160, 8162), False, 'import random\n'), ((9972, 9987), 'random.random', 'random.random', ([], {}), '()\n', (9985, 9987), False, 'import random\n'), ((2917, 2928), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2925, 2928), True, 'import numpy as np\n'), ((7981, 8007), 'numpy.argmax', 'np.argmax', (['self.population'], {}), '(self.population)\n', (7990, 8007), True, 'import numpy as np\n'), ((8319, 8334), 'random.random', 'random.random', ([], {}), '()\n', (8332, 8334), False, 'import random\n'), ((3048, 3066), 'numpy.argmax', 'np.argmax', (['pont[0]'], {}), '(pont[0])\n', (3057, 3066), True, 'import numpy as np\n'), ((3606, 3627), 'os.path.splitext', 'os.path.splitext', (['arq'], {}), '(arq)\n', (3622, 3627), False, 'import os\n'), ((6756, 6777), 'numpy.array', 'np.array', (['frv[arq][i]'], {}), '(frv[arq][i])\n', (6764, 6777), True, 'import numpy as np\n'), ((6879, 6894), 'numpy.argmax', 'np.argmax', (['pont'], {}), '(pont)\n', (6888, 6894), True, 'import numpy as np\n'), ((9624, 9639), 'random.random', 'random.random', ([], {}), '()\n', (9637, 9639), False, 'import random\n'), ((15418, 15433), 'numpy.log', 'np.log', (['taxaMut'], {}), '(taxaMut)\n', (15424, 15433), True, 'import numpy as np\n'), ((4025, 4040), 'random.random', 'random.random', ([], {}), '()\n', (4038, 4040), False, 'import random\n'), ((8688, 8702), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (8696, 8702), True, 'import numpy as np\n'), ((8729, 8744), 'random.random', 'random.random', ([], {}), '()\n', (8742, 8744), False, 'import random\n'), ((8785, 8800), 'random.random', 'random.random', ([], {}), '()\n', (8798, 8800), False, 'import random\n'), ((8949, 8964), 'random.random', 'random.random', ([], {}), '()\n', (8962, 8964), False, 'import random\n'), ((11463, 11477), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (11471, 11477), True, 'import numpy as np\n'), ((11673, 11687), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (11681, 11687), True, 'import numpy as np\n'), ((7088, 7103), 'numpy.max', 'np.max', (['pont[0]'], {}), '(pont[0])\n', (7094, 7103), True, 'import numpy as np\n'), ((7104, 7119), 'numpy.min', 'np.min', (['pont[0]'], {}), '(pont[0])\n', (7110, 7119), True, 'import numpy as np\n'), ((7297, 7311), 'numpy.array', 'np.array', (['pont'], {}), '(pont)\n', (7305, 7311), True, 'import numpy as np\n'), ((7137, 7152), 'numpy.min', 'np.min', (['pont[0]'], {}), '(pont[0])\n', (7143, 7152), True, 'import numpy as np\n'), ((7331, 7345), 'numpy.array', 'np.array', (['pont'], {}), '(pont)\n', (7339, 7345), True, 'import numpy as np\n')] |
'''
image class for ioplin. Store the patch information in the imgage and carry out corresponding necessary operations
'''
import numpy as np
from random import shuffle
'''
default image cut parameter for 1200*900 image
'''
WIN = [300,300]
S = [300,300]
class pic:
shape = [] #image's shape
pics = [] #the patches after clip
label = 0 #the label of image
labels_bin = [] #0 reperents diseased,1 repernets normal
disease_list = [] #the index list of diseased patch
pre_label = 0 #predicted image label
pres_bin = [] #the list of predicted patch score
img = [] #the list of patchs file
num_imgs = 0 #the number of patchs
patch_weight = [] #patch's weight
def __init__(self,img_filename = "",img = [],label = 0,win = WIN,s = S,shuffle = False):
disease_list = []
self.label = label
self.shape = img.shape
self.pics = self.imgcut(img,win,s)
num_pics = len(self.pics)
self.num_imgs = num_pics
self.pres_bin = np.zeros((num_pics,2),'float16')
if self.label == 1:
self.labels_bin = np.ones((num_pics,1),'int8')
self.pre_label = np.array([0,1],'float16')
else:
self.labels_bin = np.zeros((num_pics,1),'int8')
self.pre_label = np.array([1,0],'float16')
if shuffle:
self.shuffle()
def shuffle(self):
'''
shuffle patches in a image
'''
self.pics = np.array(self.pics)
index_random = [i for i in range(len(self.labels_bin))]
shuffle(index_random)
if len(self.pics) != 0:
self.pics = self.pics[index_random,:,:,:]
self.labels_bin = self.labels_bin[index_random,:]
self.pres_bin = self.pres_bin[index_random,:]
return index_random
def imgcut(self,_img,win,s):
'''
clip the pic by slide the window
Paras:
img np.array shape[height,width,channel]
win list [heighet,width]
s list [height,width]
Return:
imgs list shape[num.height,width,channel] the pics after clip
'''
imgs = []
height_src = 0
width_src = 0
height_des = win[0]
width_des = win[1]
num_row = int((self.shape[1] - width_des) / s[1]) + 1
num_col = int((self.shape[0] - height_des) / s[0]) + 1
for i in range(0,num_col):
width_src = 0
width_des = win[1]
for k in range(0,num_row):
img_temp = _img[height_src:height_des,width_src:width_des,:]
imgs.append(img_temp)
width_src = width_src + s[1]
width_des = width_des + s[1]
height_src = height_src + s[0]
height_des = height_des + s[0]
return imgs
def updateLabel_bin(self,pre = [],_thr = 0.5):
'''
update patch label by pre
Paras:
pre np.array keras.model.predict
_thr float threshold of bin-classfiy
Returns:
num int number of disease
'''
if len(pre)== 0:
pre = self.pres_bin
thr = _thr
num_changed = 0
self.disease_list = []
self.pres_bin = pre
pre_max = 0
labels_tem = self.labels_bin
max_rec = 0
tem = np.hsplit(pre,2)
sorted_pre = tem[1]
sorted_pre = np.sort(sorted_pre,axis = 0)
if self.label == 1:
for i in range(0,len(pre)):
if pre_max < pre[i][1]:
pre_max = pre[i][1]
max_rec = i
if pre[i][1] > thr and self.labels_bin[i] == 0:
self.labels_bin[i] = 1
num_changed = num_changed +1
elif pre[i][1]<= thr and self.labels_bin[i] == 1 and pre[i][1]<= sorted_pre[(int)(len(sorted_pre) * 0.55)]:
self.labels_bin[i] = 0
num_changed = num_changed +1
else:
if self.labels_bin[i] == 1:
self.disease_list.append(i)
self.pre_label[1] = pre_max
self.pre_label[0] = 1 - self.pre_label[1]
return len(self.disease_list),num_changed
def preNor(self,pre = [],_thr = 0.5):
'''
according to the thrshold to detect whether the image is normal
'''
if len(pre)== 0:
pre = self.pres_bin
thr = _thr
num_dis = 0
pre_max = 0
max_rec = 0
for i in range(0,len(pre)):
if pre_max < pre[i][1]:
pre_max = pre[i][1]
max_rec = i
if pre[i][1] > thr:
num_dis = num_dis + 1
self.pre_label[1] = pre_max
self.pre_label[0] = 1 - self.pre_label[1]
if num_dis < 1:
return True
else :
return False
def getSampleWeight(self,thr,pre = []):
'''
return patch's weight
Paras:
thr float thrshold of binary classification
pre list patch's predicted score
'''
if len(pre)== 0:
pre = self.pres_bin
s_weight = []
for i in range(0,len(self.labels_bin)):
tem = 1 * (pre[i][1] / thr)
if tem < 0.1:
tem = 0.1
s_weight.append(tem)
self.patch_weight = s_weight
self.labels_bin_bfLast = self.labels_bin
return s_weight
def cvtData(self,x = [],y = [],is_x=True,is_y=True,is_del = True):
'''
put the patch file to the external variable
Paras:
x list external x
y list external y
is_x bool whether process x
is_y bool whether process y
is_del bool whether delete the inner image
'''
num = len(self.labels_bin)
if is_x:
for i in range(0,num):
x.append(np.tile(self.pics[i]),(1,1,3))
if is_del:
del self.pics
if is_y:
for j in range(0,num):
y.append(self.labels_bin[j])
| [
"numpy.hsplit",
"numpy.tile",
"random.shuffle",
"numpy.ones",
"numpy.sort",
"numpy.array",
"numpy.zeros"
] | [((1078, 1112), 'numpy.zeros', 'np.zeros', (['(num_pics, 2)', '"""float16"""'], {}), "((num_pics, 2), 'float16')\n", (1086, 1112), True, 'import numpy as np\n'), ((1549, 1568), 'numpy.array', 'np.array', (['self.pics'], {}), '(self.pics)\n', (1557, 1568), True, 'import numpy as np\n'), ((1641, 1662), 'random.shuffle', 'shuffle', (['index_random'], {}), '(index_random)\n', (1648, 1662), False, 'from random import shuffle\n'), ((3488, 3505), 'numpy.hsplit', 'np.hsplit', (['pre', '(2)'], {}), '(pre, 2)\n', (3497, 3505), True, 'import numpy as np\n'), ((3554, 3581), 'numpy.sort', 'np.sort', (['sorted_pre'], {'axis': '(0)'}), '(sorted_pre, axis=0)\n', (3561, 3581), True, 'import numpy as np\n'), ((1169, 1199), 'numpy.ones', 'np.ones', (['(num_pics, 1)', '"""int8"""'], {}), "((num_pics, 1), 'int8')\n", (1176, 1199), True, 'import numpy as np\n'), ((1227, 1254), 'numpy.array', 'np.array', (['[0, 1]', '"""float16"""'], {}), "([0, 1], 'float16')\n", (1235, 1254), True, 'import numpy as np\n'), ((1297, 1328), 'numpy.zeros', 'np.zeros', (['(num_pics, 1)', '"""int8"""'], {}), "((num_pics, 1), 'int8')\n", (1305, 1328), True, 'import numpy as np\n'), ((1356, 1383), 'numpy.array', 'np.array', (['[1, 0]', '"""float16"""'], {}), "([1, 0], 'float16')\n", (1364, 1383), True, 'import numpy as np\n'), ((6205, 6226), 'numpy.tile', 'np.tile', (['self.pics[i]'], {}), '(self.pics[i])\n', (6212, 6226), True, 'import numpy as np\n')] |
"""
This script aggregates all the cells in each '*_exp_0106_[auto | corrected].json' file and saves them to a
'*_exp_0106_[auto | corrected]_aggregate.json', then creates a soft links to it that will be read by AIDA.
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
import glob
import cytometer.data
import openslide
import numpy as np
import shapely
import cytometer.utils
histology_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
area2quantile_dir = os.path.join(home, 'Data/cytometer_data/deepcytometer_pipeline_v8')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')
# file with area->quantile map precomputed from all automatically segmented slides in klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py
filename_area2quantile = os.path.join(area2quantile_dir, 'klf14_b6ntac_exp_0098_filename_area2quantile.npz')
# suffixes of annotation filenames
auto_filename_suffix = '_exp_0106_auto.json'
corrected_filename_suffix = '_exp_0106_corrected.json'
# list of annotations
auto_annotation_files_list = os.path.join(annotations_dir, '*' + auto_filename_suffix)
auto_annotation_files_list = glob.glob(auto_annotation_files_list)
corrected_annotation_files_list = os.path.join(annotations_dir, '*' + corrected_filename_suffix)
corrected_annotation_files_list = glob.glob(corrected_annotation_files_list)
# parameters
cell_prob_thr = 0.5 # threshold for objects to be accepted as cells
min_area = 203 / 2 # (pix^2) smaller objects are rejected
max_area = 44879 * 3 # (pix^2) larger objects are rejected
max_inv_compactness = 2.0 # objects less compact than this are rejected (= more compact^-1)
########################################################################################################################
## Colourmap for AIDA
########################################################################################################################
if os.path.isfile(filename_area2quantile):
with np.load(filename_area2quantile, allow_pickle=True) as aux:
f_area2quantile_f = aux['f_area2quantile_f'].item()
f_area2quantile_m = aux['f_area2quantile_m'].item()
else:
raise FileNotFoundError('Cannot find file with area->quantile map precomputed from all automatically segmented' +
' slides in klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py')
# load AIDA's colourmap
cm = cytometer.data.aida_colourmap()
########################################################################################################################
## Process files for segmentation refinement
########################################################################################################################
def process_annotations(annotation_files_list, overwrite_aggregated_annotation_file=False, create_symlink=False):
"""
Helper function to process a list of JSON files with annotations.
:param annotation_files_list: list of JSON filenames containing annotations.
:return:
"""
for i_file, annotation_file in enumerate(annotation_files_list):
print('File ' + str(i_file) + ': ' + os.path.basename(annotation_file))
# name of the file that we are going to save the aggregated annotations to
aggregated_annotation_file = annotation_file.replace('.json', '_aggregated.json')
# name of the original .ndpi file
histo_file = os.path.basename(annotation_file).replace(auto_filename_suffix, '.ndpi')
histo_file = os.path.basename(histo_file).replace(corrected_filename_suffix, '.ndpi')
histo_file = os.path.join(histology_dir, histo_file)
im = openslide.OpenSlide(histo_file)
xres = float(im.properties['openslide.mpp-x']) # um/pixel
yres = float(im.properties['openslide.mpp-y']) # um/pixel
# aggregate cells from all blocks and write/overwrite a file with them
if not os.path.isfile(aggregated_annotation_file) or overwrite_aggregated_annotation_file:
# load contours and their confidence measure from annotation file
cells, props = cytometer.data.aida_get_contours(annotation_file, layer_name='White adipocyte.*', return_props=True)
# compute cell measures
areas = []
inv_compactnesses = []
for cell in cells:
poly_cell = shapely.geometry.Polygon(cell)
area = poly_cell.area
if area > 0:
inv_compactness = poly_cell.length ** 2 / (4 * np.pi * area)
else:
inv_compactness = np.nan
areas.append(area)
inv_compactnesses.append(inv_compactness)
# prepare for removal objects that are too large or too small
idx = (np.array(areas) >= min_area) * (np.array(areas) <= max_area)
# prepare for removal objects that are not compact enough
idx *= np.array(inv_compactnesses) <= max_inv_compactness
# prepare for removal objects unlikely to be cells
idx *= np.array(props['cell_prob']) >= cell_prob_thr
# execute the removal of objects
cells = list(np.array(cells)[idx])
props['cell_prob'] = list(np.array(props['cell_prob'])[idx])
# areas = list(np.array(areas)[idx])
# create AIDA items to contain contours
items = cytometer.data.aida_contour_items(cells, f_area2quantile_m, cm='quantiles_aida',
xres=xres, yres=yres, cell_prob=props['cell_prob'])
# write contours to single layer AIDA file (one to visualise, one to correct manually)
cytometer.data.aida_write_new_items(aggregated_annotation_file, items, mode='w', indent=0)
if create_symlink:
# name expected by AIDA for annotations
symlink_name = os.path.basename(histo_file).replace('.ndpi', '.json')
symlink_name = os.path.join(annotations_dir, symlink_name)
# create symlink to the aggregated annotation file from the name expected by AIDA
if os.path.isfile(symlink_name):
os.remove(symlink_name)
os.symlink(os.path.basename(aggregated_annotation_file), symlink_name)
return
# create aggreagated annotation files for auto segmentations, and link to them
process_annotations(auto_annotation_files_list, overwrite_aggregated_annotation_file=True, create_symlink=False)
process_annotations(corrected_annotation_files_list, overwrite_aggregated_annotation_file=True, create_symlink=True)
| [
"pathlib.Path.home",
"os.path.join",
"os.path.isfile",
"numpy.array",
"shapely.geometry.Polygon",
"os.path.basename",
"openslide.OpenSlide",
"numpy.load",
"glob.glob",
"os.remove"
] | [((653, 696), 'os.path.join', 'os.path.join', (['home', '"""scan_srv2_cox/Maz Yon"""'], {}), "(home, 'scan_srv2_cox/Maz Yon')\n", (665, 696), False, 'import os\n'), ((717, 784), 'os.path.join', 'os.path.join', (['home', '"""Data/cytometer_data/deepcytometer_pipeline_v8"""'], {}), "(home, 'Data/cytometer_data/deepcytometer_pipeline_v8')\n", (729, 784), False, 'import os\n'), ((803, 875), 'os.path.join', 'os.path.join', (['home', '"""Data/cytometer_data/aida_data_Klf14_v8/annotations"""'], {}), "(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')\n", (815, 875), False, 'import os\n'), ((1041, 1128), 'os.path.join', 'os.path.join', (['area2quantile_dir', '"""klf14_b6ntac_exp_0098_filename_area2quantile.npz"""'], {}), "(area2quantile_dir,\n 'klf14_b6ntac_exp_0098_filename_area2quantile.npz')\n", (1053, 1128), False, 'import os\n'), ((1313, 1370), 'os.path.join', 'os.path.join', (['annotations_dir', "('*' + auto_filename_suffix)"], {}), "(annotations_dir, '*' + auto_filename_suffix)\n", (1325, 1370), False, 'import os\n'), ((1400, 1437), 'glob.glob', 'glob.glob', (['auto_annotation_files_list'], {}), '(auto_annotation_files_list)\n', (1409, 1437), False, 'import glob\n'), ((1472, 1534), 'os.path.join', 'os.path.join', (['annotations_dir', "('*' + corrected_filename_suffix)"], {}), "(annotations_dir, '*' + corrected_filename_suffix)\n", (1484, 1534), False, 'import os\n'), ((1569, 1611), 'glob.glob', 'glob.glob', (['corrected_annotation_files_list'], {}), '(corrected_annotation_files_list)\n', (1578, 1611), False, 'import glob\n'), ((2176, 2214), 'os.path.isfile', 'os.path.isfile', (['filename_area2quantile'], {}), '(filename_area2quantile)\n', (2190, 2214), False, 'import os\n'), ((432, 443), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (441, 443), False, 'from pathlib import Path\n'), ((484, 524), 'os.path.join', 'os.path.join', (['home', '"""Software/cytometer"""'], {}), "(home, 'Software/cytometer')\n", (496, 524), False, 'import os\n'), ((2225, 2275), 'numpy.load', 'np.load', (['filename_area2quantile'], {'allow_pickle': '(True)'}), '(filename_area2quantile, allow_pickle=True)\n', (2232, 2275), True, 'import numpy as np\n'), ((3845, 3884), 'os.path.join', 'os.path.join', (['histology_dir', 'histo_file'], {}), '(histology_dir, histo_file)\n', (3857, 3884), False, 'import os\n'), ((3899, 3930), 'openslide.OpenSlide', 'openslide.OpenSlide', (['histo_file'], {}), '(histo_file)\n', (3918, 3930), False, 'import openslide\n'), ((6237, 6280), 'os.path.join', 'os.path.join', (['annotations_dir', 'symlink_name'], {}), '(annotations_dir, symlink_name)\n', (6249, 6280), False, 'import os\n'), ((6391, 6419), 'os.path.isfile', 'os.path.isfile', (['symlink_name'], {}), '(symlink_name)\n', (6405, 6419), False, 'import os\n'), ((3384, 3417), 'os.path.basename', 'os.path.basename', (['annotation_file'], {}), '(annotation_file)\n', (3400, 3417), False, 'import os\n'), ((3657, 3690), 'os.path.basename', 'os.path.basename', (['annotation_file'], {}), '(annotation_file)\n', (3673, 3690), False, 'import os\n'), ((3751, 3779), 'os.path.basename', 'os.path.basename', (['histo_file'], {}), '(histo_file)\n', (3767, 3779), False, 'import os\n'), ((4160, 4202), 'os.path.isfile', 'os.path.isfile', (['aggregated_annotation_file'], {}), '(aggregated_annotation_file)\n', (4174, 4202), False, 'import os\n'), ((4605, 4635), 'shapely.geometry.Polygon', 'shapely.geometry.Polygon', (['cell'], {}), '(cell)\n', (4629, 4635), False, 'import shapely\n'), ((5189, 5216), 'numpy.array', 'np.array', (['inv_compactnesses'], {}), '(inv_compactnesses)\n', (5197, 5216), True, 'import numpy as np\n'), ((5323, 5351), 'numpy.array', 'np.array', (["props['cell_prob']"], {}), "(props['cell_prob'])\n", (5331, 5351), True, 'import numpy as np\n'), ((6437, 6460), 'os.remove', 'os.remove', (['symlink_name'], {}), '(symlink_name)\n', (6446, 6460), False, 'import os\n'), ((6484, 6528), 'os.path.basename', 'os.path.basename', (['aggregated_annotation_file'], {}), '(aggregated_annotation_file)\n', (6500, 6528), False, 'import os\n'), ((5038, 5053), 'numpy.array', 'np.array', (['areas'], {}), '(areas)\n', (5046, 5053), True, 'import numpy as np\n'), ((5070, 5085), 'numpy.array', 'np.array', (['areas'], {}), '(areas)\n', (5078, 5085), True, 'import numpy as np\n'), ((5440, 5455), 'numpy.array', 'np.array', (['cells'], {}), '(cells)\n', (5448, 5455), True, 'import numpy as np\n'), ((5500, 5528), 'numpy.array', 'np.array', (["props['cell_prob']"], {}), "(props['cell_prob'])\n", (5508, 5528), True, 'import numpy as np\n'), ((6155, 6183), 'os.path.basename', 'os.path.basename', (['histo_file'], {}), '(histo_file)\n', (6171, 6183), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME>
# Time: 1/4/2021 12:44 PM
# Copyright 2019. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import time
import json
import numpy as np
import torch
from LBFGS import FullBatchLBFGS
def get_2d_coor(x3d, y3d, z3d=0.2):
cam_mat = np.array(((-207.8461456298828, 525.0000610351562, -120.00001525878906, 1200.0003662109375),
(123.93595886230469, 1.832598354667425e-05, -534.663330078125, 799.9999389648438),
(-0.866025447845459, -3.650024282819686e-08, -0.4999999701976776, 5.000000476837158),
(0, 0, 0, 1)))
pos_3d = np.array([[x3d], [y3d], [z3d], [1.0]], dtype=np.float32)
uv = cam_mat[:3].dot(pos_3d)
pos_2d = uv[:-1] / uv[-1]
return pos_2d
for process_index in range(int(sys.argv[1]), int(sys.argv[2])):
object_dict = json.load(open(f'../data/object_dicts_with_physics/objects_{process_index:05d}.json'))
output_dict = json.load(open(f'../data/object_simulated/sim_{process_index:05d}.json'))
step_88 = output_dict['step_88']
print(f'===============start processing {process_index}==================')
device = 'cpu'
n_balls = len(object_dict)
steps = 210
target_x = torch.zeros((128, n_balls, 2), dtype=torch.float32).to(device) + 1000
shapes = []
shape_dict = {
'sphere': 0,
'cube': 1,
'cylinder': 2
}
for object_index, identity in enumerate(object_dict.keys()):
locations = torch.tensor(object_dict[identity]['trajectory']).to(device)
target_x[:locations.shape[0], object_index, :] = locations
shapes.append(shape_dict[object_dict[identity]['shape']])
target_x = target_x[-40:-19]
for object_index, identity in enumerate(object_dict.keys()):
if target_x[0][object_index][0] > 500:
target_x[0][object_index] = torch.tensor(step_88['x'][object_index])
shape = torch.tensor(shapes, dtype=torch.int8).to(device)
angle0 = torch.tensor(step_88['angle'], dtype=torch.float32).to(device)
angle0.requires_grad = True
interval = 10
dt = 1/350
gravity = 9.806
radius = 0.2
inertia = 0.4 * 0.4 / 6
frictional = torch.tensor(0.03).to(device)
frictional.requires_grad = True
linear_damping = torch.tensor(0.06).to(device)
linear_damping.requires_grad = True
v0 = torch.tensor(step_88['v'], dtype=torch.float32).to(device)
v0.requires_grad = True
restitution = torch.tensor(step_88['restitution'], dtype=torch.float32).to(device)
restitution.requires_grad = True
mass = torch.tensor(step_88['mass'], dtype=torch.float32).to(device)
mass.requires_grad = True
def norm(vector, degree=2, dim=0):
return torch.norm(vector, degree, dim=dim)
def normalized(vector):
return vector / norm(vector)
def collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions):
imp = torch.tensor([0.0, 0.0]).to(device)
x_inc_contrib = torch.tensor([0.0, 0.0]).to(device)
if i != j:
dist = (x[t, i] + dt * v[t, i]) - (x[t, j] + dt * v[t, j])
dist_norm = norm(dist)
rela_v = v[t, i] - v[t, j]
if dist_norm < 2 * radius:
dir = normalized(dist)
projected_v = dir.dot(rela_v)
if projected_v < 0:
if i < j:
repeat = False
for item in collisions:
if json.dumps(item).startswith(json.dumps([i, j])[:-1]):
repeat = True
if not repeat:
collisions.append([i, j, round(t / 10.0)])
imp = -(1 + restitution[i] * restitution[j]) * (mass[j] / (mass[i] + mass[j])) * projected_v * dir
toi = (dist_norm - 2 * radius) / min(
-1e-3, projected_v)
x_inc_contrib = min(toi - dt, 0) * imp
x_inc[t + 1, i] += x_inc_contrib
impulse[t + 1, i] += imp
def sphere_collide_cube(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions):
imp = torch.tensor([0.0, 0.0]).to(device)
x_inc_contrib = torch.tensor([0.0, 0.0]).to(device)
if i != j:
rela_v = v[t, i] - v[t, j]
pos_xy = x[t, i] - x[t, j]
rotate_x = pos_xy.dot(torch.tensor([torch.cos(-angle[t, j]), -torch.sin(-angle[t, j])]))
rotate_y = pos_xy.dot(torch.tensor([torch.sin(-angle[t, j]), torch.cos(-angle[t, j])]))
moving_direction = torch.tensor([0.0, 0.0])
dist_norm = 0.0
collision = True
if torch.abs(rotate_x) > 2 * radius:
collision = False
elif torch.abs(rotate_y) > 2 * radius:
collision = False
elif torch.abs(rotate_x) <= radius:
if rotate_y > 0:
moving_direction = torch.tensor([0.0, 1.0])
dist_norm = rotate_y
elif rotate_y < 0:
moving_direction = torch.tensor([0.0, -1.0])
dist_norm = - rotate_y
elif torch.abs(rotate_y) <= radius:
if rotate_x > 0:
moving_direction = torch.tensor([1.0, 0.0])
dist_norm = rotate_x
elif rotate_x < 0:
moving_direction = torch.tensor([-1.0, 0.0])
dist_norm = - rotate_x
elif (torch.abs(rotate_x) - radius) ** 2 + (torch.abs(rotate_y) - radius) ** 2 <= radius ** 2:
if rotate_x > radius and rotate_y > radius:
moving_direction = normalized(torch.tensor([rotate_x - radius, rotate_y - radius]))
dist_norm = norm(torch.tensor([rotate_x - radius, rotate_y - radius])) + radius
elif rotate_x < -radius and rotate_y > radius:
moving_direction = normalized(torch.tensor([rotate_x + radius, rotate_y - radius]))
dist_norm = norm(torch.tensor([rotate_x + radius, rotate_y - radius])) + radius
elif rotate_x > radius and rotate_y < -radius:
moving_direction = normalized(torch.tensor([rotate_x - radius, rotate_y + radius]))
dist_norm = norm(torch.tensor([rotate_x - radius, rotate_y + radius])) + radius
elif rotate_x < -radius and rotate_y < -radius:
moving_direction = normalized(torch.tensor([rotate_x + radius, rotate_y + radius]))
dist_norm = norm(torch.tensor([rotate_x + radius, rotate_y + radius])) + radius
if collision:
origin_dir = torch.tensor(
[moving_direction.dot(torch.tensor([torch.cos(angle[t, j]), -torch.sin(angle[t, j])])),
moving_direction.dot(torch.tensor([torch.sin(angle[t, j]), torch.cos(angle[t, j])]))]
)
projected_v = origin_dir.dot(rela_v)
if projected_v < 0:
if i < j:
repeat = False
for item in collisions:
if json.dumps(item).startswith(json.dumps([i, j])[:-1]):
repeat = True
if not repeat:
collisions.append([i, j, round(t / 10.0)])
imp = -(1 + restitution[i] * restitution[j]) * (mass[j] / (mass[i] + mass[j])) * projected_v * origin_dir # 冲量,速度变化量
toi = (dist_norm - 2 * radius) / min(
-1e-3, projected_v)
x_inc_contrib = min(toi - dt, 0) * imp
x_inc[t + 1, i] += x_inc_contrib
impulse[t + 1, i] += imp
def cube_collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions):
imp = torch.tensor([0.0, 0.0])
x_inc_contrib = torch.tensor([0.0, 0.0])
a_rotate = 0.0
if i != j:
rela_v = v[t, i] - v[t, j]
pos_xy = x[t, j] - x[t, i]
rotate_x = pos_xy.dot(torch.tensor([torch.cos(-angle[t, i]), -torch.sin(-angle[t, i])]))
rotate_y = pos_xy.dot(torch.tensor([torch.sin(-angle[t, i]), torch.cos(-angle[t, i])]))
moving_direction = torch.tensor([0.0, 0.0])
collision_direction = torch.tensor([0.0, 0.0])
dist_norm = 0.0
r_rotate = 0.0
rotate_dir = False
collision = True
if torch.abs(rotate_x) > 2 * radius:
collision = False
elif torch.abs(rotate_y) > 2 * radius:
collision = False
elif torch.abs(rotate_x) <= radius:
if rotate_y > 0:
moving_direction = torch.tensor([0.0, -1.0])
collision_direction = normalized(torch.tensor([-rotate_x, -radius]))
dist_norm = rotate_y
if rotate_x > 0:
rotate_dir = 1
elif rotate_y < 0:
moving_direction = torch.tensor([0.0, 1.0])
collision_direction = normalized(torch.tensor([-rotate_x, radius]))
dist_norm = - rotate_y
if rotate_x < 0:
rotate_dir = 1
r_rotate = norm(torch.tensor([radius, rotate_x]))
elif torch.abs(rotate_y) <= radius:
if rotate_x > 0:
moving_direction = torch.tensor([-1.0, 0.0])
collision_direction = normalized(torch.tensor([-radius, -rotate_y]))
dist_norm = rotate_x
if rotate_y < 0:
rotate_dir = 1
elif rotate_x < 0:
moving_direction = torch.tensor([1.0, 0.0])
collision_direction = normalized(torch.tensor([radius, -rotate_y]))
dist_norm = - rotate_x
if rotate_y > 0:
rotate_dir = 1
r_rotate = norm(torch.tensor([radius, rotate_y]))
elif (torch.abs(rotate_x) - radius) ** 2 + (torch.abs(rotate_y) - radius) ** 2 <= radius ** 2:
if rotate_x > radius and rotate_y > radius:
moving_direction = - normalized(torch.tensor([rotate_x - radius, rotate_y - radius]))
collision_direction = normalized(torch.tensor([-1.0, -1.0]))
dist_norm = norm(torch.tensor([rotate_x - radius, rotate_y - radius])) + radius
if rotate_y > rotate_x:
rotate_dir = 1
elif rotate_x < -radius and rotate_y > radius:
moving_direction = - normalized(torch.tensor([rotate_x + radius, rotate_y - radius]))
collision_direction = normalized(torch.tensor([1.0, -1.0]))
dist_norm = norm(torch.tensor([rotate_x + radius, rotate_y - radius])) + radius
if -rotate_x > rotate_y:
rotate_dir = 1
elif rotate_x > radius and rotate_y < -radius:
moving_direction = - normalized(torch.tensor([rotate_x - radius, rotate_y + radius]))
collision_direction = normalized(torch.tensor([-1.0, 1.0]))
dist_norm = norm(torch.tensor([rotate_x - radius, rotate_y + radius])) + radius
if rotate_x > -rotate_y:
rotate_dir = 1
elif rotate_x < -radius and rotate_y < -radius:
moving_direction = - normalized(torch.tensor([rotate_x + radius, rotate_y + radius]))
collision_direction = normalized(torch.tensor([1.0, 1.0]))
dist_norm = norm(torch.tensor([rotate_x + radius, rotate_y + radius])) + radius
if -rotate_y > -rotate_x:
rotate_dir = 1
r_rotate = norm(torch.tensor([radius, radius]))
if collision:
origin_moving_dir = torch.tensor(
[moving_direction.dot(torch.tensor([torch.cos(angle[t, i]), -torch.sin(angle[t, i])])),
moving_direction.dot(torch.tensor([torch.sin(angle[t, i]), torch.cos(angle[t, i])]))]
)
origin_collision_dir = torch.tensor(
[collision_direction.dot(torch.tensor([torch.cos(angle[t, i]), -torch.sin(angle[t, i])])),
collision_direction.dot(torch.tensor([torch.sin(angle[t, i]), torch.cos(angle[t, i])]))]
)
projected_v = origin_moving_dir.dot(rela_v)
if projected_v < 0:
if i < j:
repeat = False
for item in collisions:
if json.dumps(item).startswith(json.dumps([i, j])[:-1]):
repeat = True
if not repeat:
collisions.append([i, j, round(t / 10.0)])
imp = -(1 + restitution[i] * restitution[j]) * (mass[j] / (mass[i] + mass[j])) * projected_v * origin_moving_dir
toi = (dist_norm - 2 * radius) / min(
-1e-3, projected_v)
x_inc_contrib = min(toi - dt, 0) * imp
f_rotate = (origin_moving_dir - origin_collision_dir.dot(origin_moving_dir) * origin_collision_dir).dot(-projected_v * origin_moving_dir)
a_rotate = f_rotate * r_rotate / inertia
if rotate_dir:
a_rotate = -a_rotate
x_inc[t + 1, i] += x_inc_contrib
impulse[t + 1, i] += imp
angle_impulse[t + 1, i] += a_rotate
def collide(shape, x, v, x_inc, impulse, t, angle, angle_impulse, collisions):
for i in range(n_balls):
for j in range(i):
if shape[i] != 1 and shape[j] != 1:
collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
elif shape[i] != 1 and shape[j] == 1:
sphere_collide_cube(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
elif shape[i] == 1 and shape[j] != 1:
cube_collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
elif shape[i] == 1 and shape[j] == 1:
collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
for i in range(n_balls):
for j in range(i + 1, n_balls):
if shape[i] != 1 and shape[j] != 1:
collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
elif shape[i] != 1 and shape[j] == 1:
sphere_collide_cube(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
elif shape[i] == 1 and shape[j] != 1:
cube_collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
elif shape[i] == 1 and shape[j] == 1:
collide_sphere(x, v, x_inc, impulse, t, i, j, angle, angle_impulse, collisions)
def friction(shape, x, v, x_inc, impulse, v_old, t, i):
if shape[i] == 0:
if v_old[0] > 0.0:
v[t, i][0] = max(0, v_old[0] - linear_damping * dt * v_old[0] * norm(v_old))
elif v_old[0] < 0.0:
v[t, i][0] = min(0, v_old[0] - linear_damping * dt * v_old[0] * norm(v_old))
if v_old[1] > 0.0:
v[t, i][1] = max(0, v_old[1] - linear_damping * dt * v_old[1] * norm(v_old))
elif v_old[1] < 0.0:
v[t, i][1] = min(0, v_old[1] - linear_damping * dt * v_old[1] * norm(v_old))
else:
if v_old[0] > 0.0:
v[t, i][0] = max(0, v_old[0] - gravity * frictional * dt * normalized(v_old)[0] - linear_damping * dt * v_old[0] * norm(v_old))
elif v_old[0] < 0.0:
v[t, i][0] = min(0, v_old[0] - gravity * frictional * dt * normalized(v_old)[0] - linear_damping * dt * v_old[0] * norm(v_old))
if v_old[1] > 0.0:
v[t, i][1] = max(0, v_old[1] - gravity * frictional * dt * normalized(v_old)[1] - linear_damping * dt * v_old[1] * norm(v_old))
elif v_old[1] < 0.0:
v[t, i][1] = min(0, v_old[1] - gravity * frictional * dt * normalized(v_old)[1] - linear_damping * dt * v_old[1] * norm(v_old))
def advance(shape, x, v, x_inc, impulse, t, angle, delta_angle, angle_impulse):
for i in range(n_balls):
v_old = v[t - 1, i] + impulse[t, i]
friction(shape, x, v, x_inc, impulse, v_old, t, i)
x[t, i] = x[t - 1, i] + dt * (v[t, i] + v_old)/2 + x_inc[t, i]
delta_angle[t, i] = delta_angle[t - 1, i] + angle_impulse[t, i]
if delta_angle[t, i] > 0.0:
delta_angle[t, i] = max(0, delta_angle[t, i] - dt * gravity / 2)
elif delta_angle[t, i] < 0.0:
delta_angle[t, i] = min(0, delta_angle[t, i] + dt * gravity / 2)
angle[t, i] = angle[t - 1, i] + dt * delta_angle[t, i]
def init():
x = torch.zeros((steps, n_balls, 2), dtype=torch.float32).to(device)
v = torch.zeros((steps, n_balls, 2), dtype=torch.float32).to(device)
x_inc = torch.zeros((steps, n_balls, 2), dtype=torch.float32).to(device)
impulse = torch.zeros((steps, n_balls, 2), dtype=torch.float32).to(device)
angle = torch.zeros((steps, n_balls), dtype=torch.float32).to(device)
delta_angle = torch.zeros((steps, n_balls), dtype=torch.float32).to(device)
angle_impulse = torch.zeros((steps, n_balls), dtype=torch.float32).to(device)
x[0, :] = target_x[0]
v[0, :] = v0
angle[0, :] = angle0
return x, v, x_inc, impulse, angle, delta_angle, angle_impulse
def closure():
optimizer.zero_grad()
x, v, x_inc, impulse, angle, delta_angle, angle_impulse = init()
loss = 0
collisions = []
for t in range(1, 210):
collide(shape, x, v, x_inc, impulse, t - 1, angle, angle_impulse, collisions)
advance(shape, x, v, x_inc, impulse, t, angle, delta_angle, angle_impulse)
if t % interval == 0:
loss += (((x[t, :] - target_x[int(t/interval), :]) * (target_x[int(t/interval), :] < 100)) ** 2).mean()
return loss
def init_inference():
x = torch.zeros((210, n_balls, 2), dtype=torch.float32).to(device)
v = torch.zeros((210, n_balls, 2), dtype=torch.float32).to(device)
x_inc = torch.zeros((210, n_balls, 2), dtype=torch.float32).to(device)
impulse = torch.zeros((210, n_balls, 2), dtype=torch.float32).to(device)
angle = torch.zeros((210, n_balls), dtype=torch.float32).to(device)
delta_angle = torch.zeros((210, n_balls), dtype=torch.float32).to(device)
angle_impulse = torch.zeros((210, n_balls), dtype=torch.float32).to(device)
x[0, :] = target_x[0]
v[0, :] = v0
angle[0, :] = angle0
return x, v, x_inc, impulse, angle, delta_angle, angle_impulse
# if __name__ == '__main__':
optimizer = FullBatchLBFGS([v0, mass, restitution])
start = time.time()
loss = closure()
loss.backward()
for i in range(15):
options = {'closure': closure, 'current_loss': loss, 'max_ls': 10}
loss, _, lr, _, F_eval, G_eval, _, _ = optimizer.step(options)
print(loss, lr, v0, mass, restitution)
if loss < 0.0002 or lr == 0:
break
time_cost = time.time() - start
print(f'----- learned, cost {time_cost}s')
collisions = []
x, v, x_inc, impulse, angle, delta_angle, angle_impulse = init_inference()
for t in range(1, 210):
collide(shape, x, v, x_inc, impulse, t - 1, angle, angle_impulse, collisions) # 计算碰撞
advance(shape, x, v, x_inc, impulse, t, angle, delta_angle, angle_impulse) # 更新速度和位置
# ==================================================================================
shapes = []
shape_dict = {
'sphere': 0,
'cube': 1,
'cylinder': 2
}
reverse_shape_dict = {
0: 'sphere',
1: 'cube',
2: 'cylinder'
}
colors = []
materials = []
for object_index, identity in enumerate(object_dict.keys()):
shapes.append(shape_dict[object_dict[identity]['shape']])
colors.append(object_dict[identity]['color'])
materials.append(object_dict[identity]['material'])
gt_objects = list(object_dict.keys())
old_collisions = output_dict['predictions'][0]['collisions'].copy()
uniq_collisions = []
for item in old_collisions:
if item['frame'] > 88:
output_dict['predictions'][0]['collisions'].remove(item)
print('remove collision', item['frame'])
else:
uniq_collisions.append([gt_objects.index(item['objects'][0]['color'] + item['objects'][0]['material'] + item['objects'][0]['shape']),
gt_objects.index(item['objects'][1]['color'] + item['objects'][1]['material'] + item['objects'][1]['shape']),
item['frame']])
for collision_index, item in enumerate(collisions):
i, j, frame = item
repeat = False
for colli_item in uniq_collisions:
if json.dumps(colli_item).startswith(json.dumps([i, j])[:-1]):
repeat = True
if not repeat:
output_dict['predictions'][0]['collisions'].append({
'frame': 88 + frame,
'objects': [{
'color': colors[i],
'material': materials[i],
'shape': reverse_shape_dict[shapes[i]],
}, {
'color': colors[j],
'material': materials[j],
'shape': reverse_shape_dict[shapes[j]],
}]
})
print('add collision', 88 + frame)
output_dict['predictions'][0]['trajectory'] = output_dict['predictions'][0]['trajectory'][:18]
print('keep trajectory from 0 to', output_dict['predictions'][0]['trajectory'][-1]['frame_index'])
for frame_index, locations in enumerate(x):
if frame_index % 50 == 20:
frame_info = {'frame_index': 88 + frame_index // 10,
'objects': []}
for object_index, location in enumerate(locations):
xy = get_2d_coor(location[0].cpu().item(), location[1].cpu().item())
xy1 = get_2d_coor(location[0].cpu().item() + radius * 0.7071, location[1].cpu().item(), z3d=radius * (1 - 0.7071))
xy2 = get_2d_coor(location[0].cpu().item() - radius * 0.7071, location[1].cpu().item(), z3d=radius * (1 + 0.7071))
xy3 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item() + radius)
xy4 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item() - radius)
xy5 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item(), z3d=0)
xy6 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item(), z3d=2 * radius)
if (-10 < xy[0] < 490 and -10 < xy[1] < 330) \
or (0 < xy1[0] < 480 and 0 < xy1[1] < 320) \
or (0 < xy2[0] < 480 and 0 < xy2[1] < 320) \
or (0 < xy3[0] < 480 and 0 < xy3[1] < 320) \
or (0 < xy4[0] < 480 and 0 < xy3[1] < 320) \
or (0 < xy5[0] < 480 and 0 < xy3[1] < 320) \
or (0 < xy6[0] < 480 and 0 < xy4[1] < 320):
frame_info['objects'].append({
'x': float(xy[1]) / 3.2,
'y': float(xy[0]) / 3.2,
'color': colors[object_index],
'material': materials[object_index],
'shape': reverse_shape_dict[shapes[object_index]],
})
output_dict['predictions'][0]['trajectory'].append(frame_info)
print('add trajectory', frame_info['frame_index'])
n_balls = len(object_dict)
steps = 200
target_x = torch.zeros((128, n_balls, 2), dtype=torch.float32).to(device) + 1000
shapes = []
shape_dict = {
'sphere': 0,
'cube': 1,
'cylinder': 2
}
for object_index, identity in enumerate(object_dict.keys()):
locations = torch.tensor(object_dict[identity]['trajectory']).to(device)
target_x[:locations.shape[0], object_index, :] = locations
shapes.append(shape_dict[object_dict[identity]['shape']])
target_x = target_x[-20:]
for object_index, identity in enumerate(object_dict.keys()):
if target_x[0][object_index][0] > 500:
target_x[0][object_index] = torch.tensor(x[-1].detach()[object_index])
shape = torch.tensor(shapes, dtype=torch.int8).to(device)
angle0 = angle[-1].detach()
angle0.requires_grad = True
interval = 10
dt = 1/350
gravity = 9.806
radius = 0.2
inertia = 0.4 * 0.4 / 6
frictional = torch.tensor(0.03).to(device)
frictional.requires_grad = True
linear_damping = torch.tensor(0.06).to(device)
linear_damping.requires_grad = True
v0 = torch.tensor(v[-1].detach(), dtype=torch.float32).to(device)
v0.requires_grad = True
restitution = torch.tensor(restitution.detach(), dtype=torch.float32).to(device)
restitution.requires_grad = True
mass = torch.tensor(mass.detach(), dtype=torch.float32).to(device)
mass.requires_grad = True
def closure_108():
optimizer.zero_grad()
x, v, x_inc, impulse, angle, delta_angle, angle_impulse = init()
loss = 0
collisions = []
for t in range(1, 200):
collide(shape, x, v, x_inc, impulse, t - 1, angle, angle_impulse, collisions)
advance(shape, x, v, x_inc, impulse, t, angle, delta_angle, angle_impulse)
if t % interval == 0:
loss += (((x[t, :] - target_x[int(t/interval), :]) * (target_x[int(t/interval), :] < 100)) ** 2).mean()
return loss
def init_inference_108():
x = torch.zeros((780, n_balls, 2), dtype=torch.float32).to(device)
v = torch.zeros((780, n_balls, 2), dtype=torch.float32).to(device)
x_inc = torch.zeros((780, n_balls, 2), dtype=torch.float32).to(device)
impulse = torch.zeros((780, n_balls, 2), dtype=torch.float32).to(device)
angle = torch.zeros((780, n_balls), dtype=torch.float32).to(device)
delta_angle = torch.zeros((780, n_balls), dtype=torch.float32).to(device)
angle_impulse = torch.zeros((780, n_balls), dtype=torch.float32).to(device)
x[0, :] = target_x[0]
v[0, :] = v0
angle[0, :] = angle0
return x, v, x_inc, impulse, angle, delta_angle, angle_impulse
optimizer = FullBatchLBFGS([v0, mass, restitution])
start = time.time()
loss = closure_108()
loss.backward()
for i in range(15):
options = {'closure': closure_108, 'current_loss': loss, 'max_ls': 10}
loss, _, lr, _, F_eval, G_eval, _, _ = optimizer.step(options)
print(loss, lr, v0, mass, restitution)
if loss < 0.0002 or lr == 0:
break
time_cost = time.time() - start
print(f'----- learned, cost {time_cost}s')
collisions = []
x, v, x_inc, impulse, angle, delta_angle, angle_impulse = init_inference_108()
for t in range(1, 780):
collide(shape, x, v, x_inc, impulse, t - 1, angle, angle_impulse, collisions)
advance(shape, x, v, x_inc, impulse, t, angle, delta_angle, angle_impulse)
# ==================================================================================
shapes = []
shape_dict = {
'sphere': 0,
'cube': 1,
'cylinder': 2
}
reverse_shape_dict = {
0: 'sphere',
1: 'cube',
2: 'cylinder'
}
colors = []
materials = []
for object_index, identity in enumerate(object_dict.keys()):
shapes.append(shape_dict[object_dict[identity]['shape']])
colors.append(object_dict[identity]['color'])
materials.append(object_dict[identity]['material'])
gt_objects = list(object_dict.keys())
old_collisions = output_dict['predictions'][0]['collisions'].copy()
uniq_collisions = []
for item in old_collisions:
if item['frame'] > 108:
output_dict['predictions'][0]['collisions'].remove(item)
print('remove collision', item['frame'])
else:
uniq_collisions.append([gt_objects.index(item['objects'][0]['color'] + item['objects'][0]['material'] + item['objects'][0]['shape']),
gt_objects.index(item['objects'][1]['color'] + item['objects'][1]['material'] + item['objects'][1]['shape']),
item['frame']])
for collision_index, item in enumerate(collisions):
i, j, frame = item
repeat = False
for colli_item in uniq_collisions:
if json.dumps(colli_item).startswith(json.dumps([i, j])[:-1]):
repeat = True
if not repeat:
output_dict['predictions'][0]['collisions'].append({
'frame': 108 + frame,
'objects': [{
'color': colors[i],
'material': materials[i],
'shape': reverse_shape_dict[shapes[i]],
}, {
'color': colors[j],
'material': materials[j],
'shape': reverse_shape_dict[shapes[j]],
}]
})
print('add collision', 108 + frame)
output_dict['predictions'][0]['trajectory'] = output_dict['predictions'][0]['trajectory'][:22]
print('keep trajectory from 0 to', output_dict['predictions'][0]['trajectory'][-1]['frame_index'])
for frame_index, locations in enumerate(x):
if frame_index % 50 == 20:
frame_info = {'frame_index': 108 + frame_index // 10,
'objects': []}
for object_index, location in enumerate(locations):
xy = get_2d_coor(location[0].cpu().item(), location[1].cpu().item())
xy1 = get_2d_coor(location[0].cpu().item() + radius * 0.7071, location[1].cpu().item(), z3d=radius * (1 - 0.7071))
xy2 = get_2d_coor(location[0].cpu().item() - radius * 0.7071, location[1].cpu().item(), z3d=radius * (1 + 0.7071))
xy3 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item() + radius)
xy4 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item() - radius)
xy5 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item(), z3d=0)
xy6 = get_2d_coor(location[0].cpu().item(), location[1].cpu().item(), z3d=2 * radius)
if (-10 < xy[0] < 490 and -10 < xy[1] < 330) \
or (0 < xy1[0] < 480 and 0 < xy1[1] < 320) \
or (0 < xy2[0] < 480 and 0 < xy2[1] < 320) \
or (0 < xy3[0] < 480 and 0 < xy3[1] < 320) \
or (0 < xy4[0] < 480 and 0 < xy3[1] < 320) \
or (0 < xy5[0] < 480 and 0 < xy3[1] < 320) \
or (0 < xy6[0] < 480 and 0 < xy4[1] < 320):
frame_info['objects'].append({
'x': float(xy[1]) / 3.2,
'y': float(xy[0]) / 3.2,
'color': colors[object_index],
'material': materials[object_index],
'shape': reverse_shape_dict[shapes[object_index]],
})
output_dict['predictions'][0]['trajectory'].append(frame_info)
print('add trajectory', frame_info['frame_index'])
json.dump(output_dict, open(f'../data/object_updated_results/sim_{process_index:05d}.json', 'w'))
| [
"LBFGS.FullBatchLBFGS",
"torch.abs",
"json.dumps",
"torch.sin",
"numpy.array",
"torch.norm",
"torch.tensor",
"torch.cos",
"time.time",
"torch.zeros"
] | [((885, 1180), 'numpy.array', 'np.array', (['((-207.8461456298828, 525.0000610351562, -120.00001525878906, \n 1200.0003662109375), (123.93595886230469, 1.832598354667425e-05, -\n 534.663330078125, 799.9999389648438), (-0.866025447845459, -\n 3.650024282819686e-08, -0.4999999701976776, 5.000000476837158), (0, 0, \n 0, 1))'], {}), '(((-207.8461456298828, 525.0000610351562, -120.00001525878906, \n 1200.0003662109375), (123.93595886230469, 1.832598354667425e-05, -\n 534.663330078125, 799.9999389648438), (-0.866025447845459, -\n 3.650024282819686e-08, -0.4999999701976776, 5.000000476837158), (0, 0, \n 0, 1)))\n', (893, 1180), True, 'import numpy as np\n'), ((1246, 1302), 'numpy.array', 'np.array', (['[[x3d], [y3d], [z3d], [1.0]]'], {'dtype': 'np.float32'}), '([[x3d], [y3d], [z3d], [1.0]], dtype=np.float32)\n', (1254, 1302), True, 'import numpy as np\n'), ((19995, 20034), 'LBFGS.FullBatchLBFGS', 'FullBatchLBFGS', (['[v0, mass, restitution]'], {}), '([v0, mass, restitution])\n', (20009, 20034), False, 'from LBFGS import FullBatchLBFGS\n'), ((20047, 20058), 'time.time', 'time.time', ([], {}), '()\n', (20056, 20058), False, 'import time\n'), ((27767, 27806), 'LBFGS.FullBatchLBFGS', 'FullBatchLBFGS', (['[v0, mass, restitution]'], {}), '([v0, mass, restitution])\n', (27781, 27806), False, 'from LBFGS import FullBatchLBFGS\n'), ((27819, 27830), 'time.time', 'time.time', ([], {}), '()\n', (27828, 27830), False, 'import time\n'), ((3353, 3388), 'torch.norm', 'torch.norm', (['vector', 'degree'], {'dim': 'dim'}), '(vector, degree, dim=dim)\n', (3363, 3388), False, 'import torch\n'), ((8546, 8570), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8558, 8570), False, 'import torch\n'), ((8595, 8619), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8607, 8619), False, 'import torch\n'), ((20390, 20401), 'time.time', 'time.time', ([], {}), '()\n', (20399, 20401), False, 'import time\n'), ((28170, 28181), 'time.time', 'time.time', ([], {}), '()\n', (28179, 28181), False, 'import time\n'), ((2487, 2527), 'torch.tensor', 'torch.tensor', (["step_88['x'][object_index]"], {}), "(step_88['x'][object_index])\n", (2499, 2527), False, 'import torch\n'), ((2541, 2579), 'torch.tensor', 'torch.tensor', (['shapes'], {'dtype': 'torch.int8'}), '(shapes, dtype=torch.int8)\n', (2553, 2579), False, 'import torch\n'), ((2604, 2655), 'torch.tensor', 'torch.tensor', (["step_88['angle']"], {'dtype': 'torch.float32'}), "(step_88['angle'], dtype=torch.float32)\n", (2616, 2655), False, 'import torch\n'), ((2816, 2834), 'torch.tensor', 'torch.tensor', (['(0.03)'], {}), '(0.03)\n', (2828, 2834), False, 'import torch\n'), ((2903, 2921), 'torch.tensor', 'torch.tensor', (['(0.06)'], {}), '(0.06)\n', (2915, 2921), False, 'import torch\n'), ((2982, 3029), 'torch.tensor', 'torch.tensor', (["step_88['v']"], {'dtype': 'torch.float32'}), "(step_88['v'], dtype=torch.float32)\n", (2994, 3029), False, 'import torch\n'), ((3088, 3145), 'torch.tensor', 'torch.tensor', (["step_88['restitution']"], {'dtype': 'torch.float32'}), "(step_88['restitution'], dtype=torch.float32)\n", (3100, 3145), False, 'import torch\n'), ((3205, 3255), 'torch.tensor', 'torch.tensor', (["step_88['mass']"], {'dtype': 'torch.float32'}), "(step_88['mass'], dtype=torch.float32)\n", (3217, 3255), False, 'import torch\n'), ((5229, 5253), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5241, 5253), False, 'import torch\n'), ((8973, 8997), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8985, 8997), False, 'import torch\n'), ((9032, 9056), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (9044, 9056), False, 'import torch\n'), ((25750, 25788), 'torch.tensor', 'torch.tensor', (['shapes'], {'dtype': 'torch.int8'}), '(shapes, dtype=torch.int8)\n', (25762, 25788), False, 'import torch\n'), ((25981, 25999), 'torch.tensor', 'torch.tensor', (['(0.03)'], {}), '(0.03)\n', (25993, 25999), False, 'import torch\n'), ((26068, 26086), 'torch.tensor', 'torch.tensor', (['(0.06)'], {}), '(0.06)\n', (26080, 26086), False, 'import torch\n'), ((1847, 1898), 'torch.zeros', 'torch.zeros', (['(128, n_balls, 2)'], {'dtype': 'torch.float32'}), '((128, n_balls, 2), dtype=torch.float32)\n', (1858, 1898), False, 'import torch\n'), ((2107, 2156), 'torch.tensor', 'torch.tensor', (["object_dict[identity]['trajectory']"], {}), "(object_dict[identity]['trajectory'])\n", (2119, 2156), False, 'import torch\n'), ((3561, 3585), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3573, 3585), False, 'import torch\n'), ((3621, 3645), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3633, 3645), False, 'import torch\n'), ((4804, 4828), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (4816, 4828), False, 'import torch\n'), ((4864, 4888), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (4876, 4888), False, 'import torch\n'), ((5327, 5346), 'torch.abs', 'torch.abs', (['rotate_x'], {}), '(rotate_x)\n', (5336, 5346), False, 'import torch\n'), ((9188, 9207), 'torch.abs', 'torch.abs', (['rotate_x'], {}), '(rotate_x)\n', (9197, 9207), False, 'import torch\n'), ((17962, 18015), 'torch.zeros', 'torch.zeros', (['(steps, n_balls, 2)'], {'dtype': 'torch.float32'}), '((steps, n_balls, 2), dtype=torch.float32)\n', (17973, 18015), False, 'import torch\n'), ((18039, 18092), 'torch.zeros', 'torch.zeros', (['(steps, n_balls, 2)'], {'dtype': 'torch.float32'}), '((steps, n_balls, 2), dtype=torch.float32)\n', (18050, 18092), False, 'import torch\n'), ((18120, 18173), 'torch.zeros', 'torch.zeros', (['(steps, n_balls, 2)'], {'dtype': 'torch.float32'}), '((steps, n_balls, 2), dtype=torch.float32)\n', (18131, 18173), False, 'import torch\n'), ((18203, 18256), 'torch.zeros', 'torch.zeros', (['(steps, n_balls, 2)'], {'dtype': 'torch.float32'}), '((steps, n_balls, 2), dtype=torch.float32)\n', (18214, 18256), False, 'import torch\n'), ((18284, 18334), 'torch.zeros', 'torch.zeros', (['(steps, n_balls)'], {'dtype': 'torch.float32'}), '((steps, n_balls), dtype=torch.float32)\n', (18295, 18334), False, 'import torch\n'), ((18368, 18418), 'torch.zeros', 'torch.zeros', (['(steps, n_balls)'], {'dtype': 'torch.float32'}), '((steps, n_balls), dtype=torch.float32)\n', (18379, 18418), False, 'import torch\n'), ((18454, 18504), 'torch.zeros', 'torch.zeros', (['(steps, n_balls)'], {'dtype': 'torch.float32'}), '((steps, n_balls), dtype=torch.float32)\n', (18465, 18504), False, 'import torch\n'), ((19257, 19308), 'torch.zeros', 'torch.zeros', (['(210, n_balls, 2)'], {'dtype': 'torch.float32'}), '((210, n_balls, 2), dtype=torch.float32)\n', (19268, 19308), False, 'import torch\n'), ((19332, 19383), 'torch.zeros', 'torch.zeros', (['(210, n_balls, 2)'], {'dtype': 'torch.float32'}), '((210, n_balls, 2), dtype=torch.float32)\n', (19343, 19383), False, 'import torch\n'), ((19411, 19462), 'torch.zeros', 'torch.zeros', (['(210, n_balls, 2)'], {'dtype': 'torch.float32'}), '((210, n_balls, 2), dtype=torch.float32)\n', (19422, 19462), False, 'import torch\n'), ((19492, 19543), 'torch.zeros', 'torch.zeros', (['(210, n_balls, 2)'], {'dtype': 'torch.float32'}), '((210, n_balls, 2), dtype=torch.float32)\n', (19503, 19543), False, 'import torch\n'), ((19571, 19619), 'torch.zeros', 'torch.zeros', (['(210, n_balls)'], {'dtype': 'torch.float32'}), '((210, n_balls), dtype=torch.float32)\n', (19582, 19619), False, 'import torch\n'), ((19653, 19701), 'torch.zeros', 'torch.zeros', (['(210, n_balls)'], {'dtype': 'torch.float32'}), '((210, n_balls), dtype=torch.float32)\n', (19664, 19701), False, 'import torch\n'), ((19737, 19785), 'torch.zeros', 'torch.zeros', (['(210, n_balls)'], {'dtype': 'torch.float32'}), '((210, n_balls), dtype=torch.float32)\n', (19748, 19785), False, 'import torch\n'), ((25057, 25108), 'torch.zeros', 'torch.zeros', (['(128, n_balls, 2)'], {'dtype': 'torch.float32'}), '((128, n_balls, 2), dtype=torch.float32)\n', (25068, 25108), False, 'import torch\n'), ((25317, 25366), 'torch.tensor', 'torch.tensor', (["object_dict[identity]['trajectory']"], {}), "(object_dict[identity]['trajectory'])\n", (25329, 25366), False, 'import torch\n'), ((27057, 27108), 'torch.zeros', 'torch.zeros', (['(780, n_balls, 2)'], {'dtype': 'torch.float32'}), '((780, n_balls, 2), dtype=torch.float32)\n', (27068, 27108), False, 'import torch\n'), ((27132, 27183), 'torch.zeros', 'torch.zeros', (['(780, n_balls, 2)'], {'dtype': 'torch.float32'}), '((780, n_balls, 2), dtype=torch.float32)\n', (27143, 27183), False, 'import torch\n'), ((27211, 27262), 'torch.zeros', 'torch.zeros', (['(780, n_balls, 2)'], {'dtype': 'torch.float32'}), '((780, n_balls, 2), dtype=torch.float32)\n', (27222, 27262), False, 'import torch\n'), ((27292, 27343), 'torch.zeros', 'torch.zeros', (['(780, n_balls, 2)'], {'dtype': 'torch.float32'}), '((780, n_balls, 2), dtype=torch.float32)\n', (27303, 27343), False, 'import torch\n'), ((27371, 27419), 'torch.zeros', 'torch.zeros', (['(780, n_balls)'], {'dtype': 'torch.float32'}), '((780, n_balls), dtype=torch.float32)\n', (27382, 27419), False, 'import torch\n'), ((27453, 27501), 'torch.zeros', 'torch.zeros', (['(780, n_balls)'], {'dtype': 'torch.float32'}), '((780, n_balls), dtype=torch.float32)\n', (27464, 27501), False, 'import torch\n'), ((27537, 27585), 'torch.zeros', 'torch.zeros', (['(780, n_balls)'], {'dtype': 'torch.float32'}), '((780, n_balls), dtype=torch.float32)\n', (27548, 27585), False, 'import torch\n'), ((5412, 5431), 'torch.abs', 'torch.abs', (['rotate_y'], {}), '(rotate_y)\n', (5421, 5431), False, 'import torch\n'), ((9273, 9292), 'torch.abs', 'torch.abs', (['rotate_y'], {}), '(rotate_y)\n', (9282, 9292), False, 'import torch\n'), ((22189, 22211), 'json.dumps', 'json.dumps', (['colli_item'], {}), '(colli_item)\n', (22199, 22211), False, 'import json\n'), ((22223, 22241), 'json.dumps', 'json.dumps', (['[i, j]'], {}), '([i, j])\n', (22233, 22241), False, 'import json\n'), ((29957, 29979), 'json.dumps', 'json.dumps', (['colli_item'], {}), '(colli_item)\n', (29967, 29979), False, 'import json\n'), ((29991, 30009), 'json.dumps', 'json.dumps', (['[i, j]'], {}), '([i, j])\n', (30001, 30009), False, 'import json\n'), ((5045, 5068), 'torch.cos', 'torch.cos', (['(-angle[t, j])'], {}), '(-angle[t, j])\n', (5054, 5068), False, 'import torch\n'), ((5146, 5169), 'torch.sin', 'torch.sin', (['(-angle[t, j])'], {}), '(-angle[t, j])\n', (5155, 5169), False, 'import torch\n'), ((5171, 5194), 'torch.cos', 'torch.cos', (['(-angle[t, j])'], {}), '(-angle[t, j])\n', (5180, 5194), False, 'import torch\n'), ((5497, 5516), 'torch.abs', 'torch.abs', (['rotate_x'], {}), '(rotate_x)\n', (5506, 5516), False, 'import torch\n'), ((8788, 8811), 'torch.cos', 'torch.cos', (['(-angle[t, i])'], {}), '(-angle[t, i])\n', (8797, 8811), False, 'import torch\n'), ((8889, 8912), 'torch.sin', 'torch.sin', (['(-angle[t, i])'], {}), '(-angle[t, i])\n', (8898, 8912), False, 'import torch\n'), ((8914, 8937), 'torch.cos', 'torch.cos', (['(-angle[t, i])'], {}), '(-angle[t, i])\n', (8923, 8937), False, 'import torch\n'), ((9358, 9377), 'torch.abs', 'torch.abs', (['rotate_x'], {}), '(rotate_x)\n', (9367, 9377), False, 'import torch\n'), ((5071, 5094), 'torch.sin', 'torch.sin', (['(-angle[t, j])'], {}), '(-angle[t, j])\n', (5080, 5094), False, 'import torch\n'), ((5600, 5624), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5612, 5624), False, 'import torch\n'), ((5826, 5845), 'torch.abs', 'torch.abs', (['rotate_y'], {}), '(rotate_y)\n', (5835, 5845), False, 'import torch\n'), ((8814, 8837), 'torch.sin', 'torch.sin', (['(-angle[t, i])'], {}), '(-angle[t, i])\n', (8823, 8837), False, 'import torch\n'), ((9461, 9486), 'torch.tensor', 'torch.tensor', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (9473, 9486), False, 'import torch\n'), ((10031, 10063), 'torch.tensor', 'torch.tensor', (['[radius, rotate_x]'], {}), '([radius, rotate_x])\n', (10043, 10063), False, 'import torch\n'), ((10082, 10101), 'torch.abs', 'torch.abs', (['rotate_y'], {}), '(rotate_y)\n', (10091, 10101), False, 'import torch\n'), ((5740, 5765), 'torch.tensor', 'torch.tensor', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (5752, 5765), False, 'import torch\n'), ((5929, 5953), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (5941, 5953), False, 'import torch\n'), ((9540, 9574), 'torch.tensor', 'torch.tensor', (['[-rotate_x, -radius]'], {}), '([-rotate_x, -radius])\n', (9552, 9574), False, 'import torch\n'), ((9767, 9791), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (9779, 9791), False, 'import torch\n'), ((10185, 10210), 'torch.tensor', 'torch.tensor', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (10197, 10210), False, 'import torch\n'), ((10755, 10787), 'torch.tensor', 'torch.tensor', (['[radius, rotate_y]'], {}), '([radius, rotate_y])\n', (10767, 10787), False, 'import torch\n'), ((4130, 4146), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (4140, 4146), False, 'import json\n'), ((4158, 4176), 'json.dumps', 'json.dumps', (['[i, j]'], {}), '([i, j])\n', (4168, 4176), False, 'import json\n'), ((6069, 6094), 'torch.tensor', 'torch.tensor', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (6081, 6094), False, 'import torch\n'), ((7437, 7459), 'torch.cos', 'torch.cos', (['angle[t, j]'], {}), '(angle[t, j])\n', (7446, 7459), False, 'import torch\n'), ((7545, 7567), 'torch.sin', 'torch.sin', (['angle[t, j]'], {}), '(angle[t, j])\n', (7554, 7567), False, 'import torch\n'), ((7569, 7591), 'torch.cos', 'torch.cos', (['angle[t, j]'], {}), '(angle[t, j])\n', (7578, 7591), False, 'import torch\n'), ((7852, 7868), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (7862, 7868), False, 'import json\n'), ((7880, 7898), 'json.dumps', 'json.dumps', (['[i, j]'], {}), '([i, j])\n', (7890, 7898), False, 'import json\n'), ((9845, 9878), 'torch.tensor', 'torch.tensor', (['[-rotate_x, radius]'], {}), '([-rotate_x, radius])\n', (9857, 9878), False, 'import torch\n'), ((10264, 10298), 'torch.tensor', 'torch.tensor', (['[-radius, -rotate_y]'], {}), '([-radius, -rotate_y])\n', (10276, 10298), False, 'import torch\n'), ((10491, 10515), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (10503, 10515), False, 'import torch\n'), ((12658, 12688), 'torch.tensor', 'torch.tensor', (['[radius, radius]'], {}), '([radius, radius])\n', (12670, 12688), False, 'import torch\n'), ((12823, 12845), 'torch.cos', 'torch.cos', (['angle[t, i]'], {}), '(angle[t, i])\n', (12832, 12845), False, 'import torch\n'), ((12931, 12953), 'torch.sin', 'torch.sin', (['angle[t, i]'], {}), '(angle[t, i])\n', (12940, 12953), False, 'import torch\n'), ((12955, 12977), 'torch.cos', 'torch.cos', (['angle[t, i]'], {}), '(angle[t, i])\n', (12964, 12977), False, 'import torch\n'), ((13112, 13134), 'torch.cos', 'torch.cos', (['angle[t, i]'], {}), '(angle[t, i])\n', (13121, 13134), False, 'import torch\n'), ((13223, 13245), 'torch.sin', 'torch.sin', (['angle[t, i]'], {}), '(angle[t, i])\n', (13232, 13245), False, 'import torch\n'), ((13247, 13269), 'torch.cos', 'torch.cos', (['angle[t, i]'], {}), '(angle[t, i])\n', (13256, 13269), False, 'import torch\n'), ((13537, 13553), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (13547, 13553), False, 'import json\n'), ((13565, 13583), 'json.dumps', 'json.dumps', (['[i, j]'], {}), '([i, j])\n', (13575, 13583), False, 'import json\n'), ((6355, 6407), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y - radius]'], {}), '([rotate_x - radius, rotate_y - radius])\n', (6367, 6407), False, 'import torch\n'), ((7462, 7484), 'torch.sin', 'torch.sin', (['angle[t, j]'], {}), '(angle[t, j])\n', (7471, 7484), False, 'import torch\n'), ((10569, 10602), 'torch.tensor', 'torch.tensor', (['[radius, -rotate_y]'], {}), '([radius, -rotate_y])\n', (10581, 10602), False, 'import torch\n'), ((11115, 11141), 'torch.tensor', 'torch.tensor', (['[-1.0, -1.0]'], {}), '([-1.0, -1.0])\n', (11127, 11141), False, 'import torch\n'), ((12848, 12870), 'torch.sin', 'torch.sin', (['angle[t, i]'], {}), '(angle[t, i])\n', (12857, 12870), False, 'import torch\n'), ((13137, 13159), 'torch.sin', 'torch.sin', (['angle[t, i]'], {}), '(angle[t, i])\n', (13146, 13159), False, 'import torch\n'), ((6156, 6175), 'torch.abs', 'torch.abs', (['rotate_x'], {}), '(rotate_x)\n', (6165, 6175), False, 'import torch\n'), ((6194, 6213), 'torch.abs', 'torch.abs', (['rotate_y'], {}), '(rotate_y)\n', (6203, 6213), False, 'import torch\n'), ((6446, 6498), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y - radius]'], {}), '([rotate_x - radius, rotate_y - radius])\n', (6458, 6498), False, 'import torch\n'), ((6622, 6674), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y - radius]'], {}), '([rotate_x + radius, rotate_y - radius])\n', (6634, 6674), False, 'import torch\n'), ((10807, 10826), 'torch.abs', 'torch.abs', (['rotate_x'], {}), '(rotate_x)\n', (10816, 10826), False, 'import torch\n'), ((10845, 10864), 'torch.abs', 'torch.abs', (['rotate_y'], {}), '(rotate_y)\n', (10854, 10864), False, 'import torch\n'), ((11008, 11060), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y - radius]'], {}), '([rotate_x - radius, rotate_y - radius])\n', (11020, 11060), False, 'import torch\n'), ((11180, 11232), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y - radius]'], {}), '([rotate_x - radius, rotate_y - radius])\n', (11192, 11232), False, 'import torch\n'), ((11548, 11573), 'torch.tensor', 'torch.tensor', (['[1.0, -1.0]'], {}), '([1.0, -1.0])\n', (11560, 11573), False, 'import torch\n'), ((6713, 6765), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y - radius]'], {}), '([rotate_x + radius, rotate_y - radius])\n', (6725, 6765), False, 'import torch\n'), ((6889, 6941), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y + radius]'], {}), '([rotate_x - radius, rotate_y + radius])\n', (6901, 6941), False, 'import torch\n'), ((11441, 11493), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y - radius]'], {}), '([rotate_x + radius, rotate_y - radius])\n', (11453, 11493), False, 'import torch\n'), ((11612, 11664), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y - radius]'], {}), '([rotate_x + radius, rotate_y - radius])\n', (11624, 11664), False, 'import torch\n'), ((11981, 12006), 'torch.tensor', 'torch.tensor', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (11993, 12006), False, 'import torch\n'), ((6980, 7032), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y + radius]'], {}), '([rotate_x - radius, rotate_y + radius])\n', (6992, 7032), False, 'import torch\n'), ((7157, 7209), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y + radius]'], {}), '([rotate_x + radius, rotate_y + radius])\n', (7169, 7209), False, 'import torch\n'), ((11874, 11926), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y + radius]'], {}), '([rotate_x - radius, rotate_y + radius])\n', (11886, 11926), False, 'import torch\n'), ((12045, 12097), 'torch.tensor', 'torch.tensor', (['[rotate_x - radius, rotate_y + radius]'], {}), '([rotate_x - radius, rotate_y + radius])\n', (12057, 12097), False, 'import torch\n'), ((12415, 12439), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (12427, 12439), False, 'import torch\n'), ((7248, 7300), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y + radius]'], {}), '([rotate_x + radius, rotate_y + radius])\n', (7260, 7300), False, 'import torch\n'), ((12308, 12360), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y + radius]'], {}), '([rotate_x + radius, rotate_y + radius])\n', (12320, 12360), False, 'import torch\n'), ((12478, 12530), 'torch.tensor', 'torch.tensor', (['[rotate_x + radius, rotate_y + radius]'], {}), '([rotate_x + radius, rotate_y + radius])\n', (12490, 12530), False, 'import torch\n')] |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import numpy as np
import torch
from monai.metrics import MAEMetric, MSEMetric, PSNRMetric, RMSEMetric
from monai.utils import set_determinism
# define a numpy flatten function that only preserves batch dimension
def flatten(data):
return np.reshape(data, [data.shape[0], -1])
# define metrics computation truth functions to check our monai metrics against
def msemetric_np(y_pred, y):
return np.mean((flatten(y_pred) - flatten(y)) ** 2)
def maemetric_np(y_pred, y):
return np.mean(np.abs(flatten(y_pred) - flatten(y)))
def rmsemetric_np(y_pred, y):
return np.mean(np.sqrt(np.mean((flatten(y_pred) - flatten(y)) ** 2, axis=1)))
def psnrmetric_np(max_val, y_pred, y):
mse = np.mean((flatten(y_pred) - flatten(y)) ** 2, axis=1)
return np.mean(20 * np.log10(max_val) - 10 * np.log10(mse))
class TestRegressionMetrics(unittest.TestCase):
def test_shape_reduction(self):
set_determinism(seed=123)
device = "cuda" if torch.cuda.is_available() else "cpu"
# regression metrics to check
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
# define variations in batch/base_dims/spatial_dims
batch_dims = [1, 2, 4, 16]
base_dims = [16, 32, 64]
spatial_dims = [2, 3, 4]
# iterate over all variations and check shapes for different reduction functions
for batch in batch_dims:
for spatial in spatial_dims:
for base in base_dims:
# create random tensors
in_tensor = torch.rand((batch,) + (base,) * (spatial - 1)).to(device)
# iterate over regression metrics, check shape for diff. reduction func
for mt_fn in metrics:
mt = mt_fn(reduction="mean")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 1)
mt = mt_fn(reduction="sum")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 0)
mt = mt_fn(reduction="mean_channel")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 1 and out_tensor.shape[0] == batch)
mt = mt_fn(reduction="sum_channel")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
self.assertTrue(len(out_tensor.shape) == 1 and out_tensor.shape[0] == batch)
def test_compare_numpy(self):
set_determinism(seed=123)
device = "cuda" if torch.cuda.is_available() else "cpu"
# regression metrics to check + truth metric function in numpy
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
metrics_np = [msemetric_np, maemetric_np, rmsemetric_np, partial(psnrmetric_np, max_val=1.0)]
# define variations in batch/base_dims/spatial_dims
batch_dims = [1, 2, 4, 16]
base_dims = [16, 32, 64]
spatial_dims = [2, 3, 4]
# iterate over all variations and check shapes for different reduction functions
for batch in batch_dims:
for spatial in spatial_dims:
for base in base_dims:
# create random tensors
in_tensor_a = torch.rand((batch,) + (base,) * (spatial - 1)).to(device)
in_tensor_b = torch.rand((batch,) + (base,) * (spatial - 1)).to(device)
# check metrics
for mt_fn, mt_fn_np in zip(metrics, metrics_np):
mt = mt_fn(reduction="mean")
mt(y_pred=in_tensor_a, y=in_tensor_b)
out_tensor = mt.aggregate()
out_np = mt_fn_np(y_pred=in_tensor_a.cpu().numpy(), y=in_tensor_b.cpu().numpy())
np.testing.assert_allclose(out_tensor.cpu().numpy(), out_np, atol=1e-4)
def test_ill_shape(self):
set_determinism(seed=123)
device = "cuda" if torch.cuda.is_available() else "cpu"
# regression metrics to check + truth metric function in numpy
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
basedim = 10
# too small shape
with self.assertRaises(ValueError):
in_tensor = torch.rand((basedim,)).to(device)
for mt_fn in metrics:
mt_fn()(in_tensor, in_tensor)
# different shape for pred/target
with self.assertRaises(ValueError):
in_tensor_a = torch.rand((basedim,)).to(device)
in_tensor_b = torch.rand((basedim, basedim)).to(device)
for mt_fn in metrics:
mt_fn()(y_pred=in_tensor_a, y=in_tensor_b)
def test_same_input(self):
set_determinism(seed=123)
device = "cuda" if torch.cuda.is_available() else "cpu"
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
results = [0.0, 0.0, 0.0, float("inf")]
# define variations in batch/base_dims/spatial_dims
batch_dims = [1, 2, 4, 16]
base_dims = [16, 32, 64]
spatial_dims = [2, 3, 4]
# iterate over all variations and check shapes for different reduction functions
for batch in batch_dims:
for spatial in spatial_dims:
for base in base_dims:
# create random tensors
in_tensor = torch.rand((batch,) + (base,) * (spatial - 1)).to(device)
# check metrics
for mt_fn, rs in zip(metrics, results):
mt = mt_fn(reduction="mean")
mt(in_tensor, in_tensor)
out_tensor = mt.aggregate()
np.testing.assert_allclose(out_tensor.cpu(), rs, atol=1e-4)
def test_diff_input(self):
set_determinism(seed=123)
device = "cuda" if torch.cuda.is_available() else "cpu"
metrics = [MSEMetric, MAEMetric, RMSEMetric, partial(PSNRMetric, max_val=1.0)]
results = [1.0, 1.0, 1.0, 0.0]
# define variations in batch/base_dims/spatial_dims
batch_dims = [1, 2, 4, 16]
base_dims = [16, 32, 64]
spatial_dims = [2, 3, 4]
# iterate over all variations and check shapes for different reduction functions
for batch in batch_dims:
for spatial in spatial_dims:
for base in base_dims:
# create random tensors
in_tensor_a = torch.zeros((batch,) + (base,) * (spatial - 1)).to(device)
in_tensor_b = torch.ones((batch,) + (base,) * (spatial - 1)).to(device)
# check metrics
for mt_fn, rs in zip(metrics, results):
mt = mt_fn(reduction="mean")
mt(in_tensor_a, in_tensor_b)
out_tensor = mt.aggregate()
np.testing.assert_allclose(out_tensor.cpu(), rs, atol=1e-4)
if __name__ == "__main__":
unittest.main()
| [
"monai.utils.set_determinism",
"numpy.log10",
"numpy.reshape",
"torch.cuda.is_available",
"functools.partial",
"unittest.main",
"torch.zeros",
"torch.rand",
"torch.ones"
] | [((867, 904), 'numpy.reshape', 'np.reshape', (['data', '[data.shape[0], -1]'], {}), '(data, [data.shape[0], -1])\n', (877, 904), True, 'import numpy as np\n'), ((7970, 7985), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7983, 7985), False, 'import unittest\n'), ((1536, 1561), 'monai.utils.set_determinism', 'set_determinism', ([], {'seed': '(123)'}), '(seed=123)\n', (1551, 1561), False, 'from monai.utils import set_determinism\n'), ((3401, 3426), 'monai.utils.set_determinism', 'set_determinism', ([], {'seed': '(123)'}), '(seed=123)\n', (3416, 3426), False, 'from monai.utils import set_determinism\n'), ((4860, 4885), 'monai.utils.set_determinism', 'set_determinism', ([], {'seed': '(123)'}), '(seed=123)\n', (4875, 4885), False, 'from monai.utils import set_determinism\n'), ((5687, 5712), 'monai.utils.set_determinism', 'set_determinism', ([], {'seed': '(123)'}), '(seed=123)\n', (5702, 5712), False, 'from monai.utils import set_determinism\n'), ((6787, 6812), 'monai.utils.set_determinism', 'set_determinism', ([], {'seed': '(123)'}), '(seed=123)\n', (6802, 6812), False, 'from monai.utils import set_determinism\n'), ((1589, 1614), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1612, 1614), False, 'import torch\n'), ((1718, 1750), 'functools.partial', 'partial', (['PSNRMetric'], {'max_val': '(1.0)'}), '(PSNRMetric, max_val=1.0)\n', (1725, 1750), False, 'from functools import partial\n'), ((3454, 3479), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3477, 3479), False, 'import torch\n'), ((3616, 3648), 'functools.partial', 'partial', (['PSNRMetric'], {'max_val': '(1.0)'}), '(PSNRMetric, max_val=1.0)\n', (3623, 3648), False, 'from functools import partial\n'), ((3715, 3750), 'functools.partial', 'partial', (['psnrmetric_np'], {'max_val': '(1.0)'}), '(psnrmetric_np, max_val=1.0)\n', (3722, 3750), False, 'from functools import partial\n'), ((4913, 4938), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4936, 4938), False, 'import torch\n'), ((5075, 5107), 'functools.partial', 'partial', (['PSNRMetric'], {'max_val': '(1.0)'}), '(PSNRMetric, max_val=1.0)\n', (5082, 5107), False, 'from functools import partial\n'), ((5740, 5765), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5763, 5765), False, 'import torch\n'), ((5830, 5862), 'functools.partial', 'partial', (['PSNRMetric'], {'max_val': '(1.0)'}), '(PSNRMetric, max_val=1.0)\n', (5837, 5862), False, 'from functools import partial\n'), ((6840, 6865), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6863, 6865), False, 'import torch\n'), ((6930, 6962), 'functools.partial', 'partial', (['PSNRMetric'], {'max_val': '(1.0)'}), '(PSNRMetric, max_val=1.0)\n', (6937, 6962), False, 'from functools import partial\n'), ((1402, 1419), 'numpy.log10', 'np.log10', (['max_val'], {}), '(max_val)\n', (1410, 1419), True, 'import numpy as np\n'), ((1427, 1440), 'numpy.log10', 'np.log10', (['mse'], {}), '(mse)\n', (1435, 1440), True, 'import numpy as np\n'), ((5225, 5247), 'torch.rand', 'torch.rand', (['(basedim,)'], {}), '((basedim,))\n', (5235, 5247), False, 'import torch\n'), ((5452, 5474), 'torch.rand', 'torch.rand', (['(basedim,)'], {}), '((basedim,))\n', (5462, 5474), False, 'import torch\n'), ((5512, 5542), 'torch.rand', 'torch.rand', (['(basedim, basedim)'], {}), '((basedim, basedim))\n', (5522, 5542), False, 'import torch\n'), ((2194, 2240), 'torch.rand', 'torch.rand', (['((batch,) + (base,) * (spatial - 1))'], {}), '((batch,) + (base,) * (spatial - 1))\n', (2204, 2240), False, 'import torch\n'), ((4196, 4242), 'torch.rand', 'torch.rand', (['((batch,) + (base,) * (spatial - 1))'], {}), '((batch,) + (base,) * (spatial - 1))\n', (4206, 4242), False, 'import torch\n'), ((4288, 4334), 'torch.rand', 'torch.rand', (['((batch,) + (base,) * (spatial - 1))'], {}), '((batch,) + (base,) * (spatial - 1))\n', (4298, 4334), False, 'import torch\n'), ((6354, 6400), 'torch.rand', 'torch.rand', (['((batch,) + (base,) * (spatial - 1))'], {}), '((batch,) + (base,) * (spatial - 1))\n', (6364, 6400), False, 'import torch\n'), ((7447, 7494), 'torch.zeros', 'torch.zeros', (['((batch,) + (base,) * (spatial - 1))'], {}), '((batch,) + (base,) * (spatial - 1))\n', (7458, 7494), False, 'import torch\n'), ((7540, 7586), 'torch.ones', 'torch.ones', (['((batch,) + (base,) * (spatial - 1))'], {}), '((batch,) + (base,) * (spatial - 1))\n', (7550, 7586), False, 'import torch\n')] |
# Add parent folder to path
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import numpy as np
from numba import vectorize, njit, jit
from time import perf_counter
from src.Equations.Continuity import Continuity
from src.Common import computed_dtype
class test_vectorize(unittest.TestCase):
def test(self):
x = np.arange(1e6)
y = np.arange(1e6)
test_vectorize.sum(test_vectorize.vec(x, y))
start = perf_counter()
v = test_vectorize.sum(test_vectorize.vec(x, y))
vTime = perf_counter() - start
print(f'Vectorize: {vTime:f} [s]')
test_vectorize.non_vec(x, y)
start = perf_counter()
f = test_vectorize.non_vec(x, y)
nTime = perf_counter() - start
print(f'njit: {nTime:f} [s]')
print(f'Vec provides {nTime / vTime}x speed-up.')
def test_continuity(self):
m = np.arange(1e6)
vij = np.transpose(np.vstack((m, m)))
dwij = np.transpose(np.vstack((m, m)))
comp = np.zeros_like(m, dtype=computed_dtype)
comp['m'] = m
comp['vx'] = m
comp['vy'] = m
comp['dw_x'] = m
comp['dw_y'] = m
test_vectorize.sum(test_vectorize.continuity_vec(m, m, m)) + test_vectorize.sum(test_vectorize.continuity_vec(m, m, m))
Continuity(np.array([]), comp)
start = perf_counter()
v = test_vectorize.sum(test_vectorize.continuity_vec(m, m, m)) + test_vectorize.sum(test_vectorize.continuity_vec(m, m, m))
vTime = perf_counter() - start
print(f'Vectorize: {vTime:f} [s]')
start = perf_counter()
f = Continuity(np.array([]), comp)
nTime = perf_counter() - start
print(f'njit: {nTime:f} [s]')
print(f'Vec provides {nTime / vTime}x speed-up.')
@staticmethod
@vectorize('float64(float64, float64, float64)', fastmath=True)
def continuity_vec(m, vij, dwij):
dot = vij * dwij
return m * dot
@staticmethod
@vectorize('float64(float64, float64)', fastmath=True)
def vec(x, y):
return x * y
@staticmethod
@njit('float64(float64[:])', fastmath=True)
def sum(m):
I = len(m); _ = 0.0
for i in range(I):
_ += m[i]
return _
@staticmethod
@njit('float64(float64[:], float64[:])', fastmath=True)
def non_vec(x, y):
J = len(x); s = 0.0
for j in range(J):
s += x[j] * y[j]
return s
if __name__ == "__main__":
test_vectorize().test()
test_vectorize().test_continuity() | [
"numba.vectorize",
"numba.njit",
"os.path.join",
"time.perf_counter",
"numpy.array",
"numpy.vstack",
"numpy.zeros_like",
"numpy.arange"
] | [((62, 93), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (74, 93), False, 'import sys, os\n'), ((1856, 1918), 'numba.vectorize', 'vectorize', (['"""float64(float64, float64, float64)"""'], {'fastmath': '(True)'}), "('float64(float64, float64, float64)', fastmath=True)\n", (1865, 1918), False, 'from numba import vectorize, njit, jit\n'), ((2029, 2082), 'numba.vectorize', 'vectorize', (['"""float64(float64, float64)"""'], {'fastmath': '(True)'}), "('float64(float64, float64)', fastmath=True)\n", (2038, 2082), False, 'from numba import vectorize, njit, jit\n'), ((2147, 2189), 'numba.njit', 'njit', (['"""float64(float64[:])"""'], {'fastmath': '(True)'}), "('float64(float64[:])', fastmath=True)\n", (2151, 2189), False, 'from numba import vectorize, njit, jit\n'), ((2324, 2378), 'numba.njit', 'njit', (['"""float64(float64[:], float64[:])"""'], {'fastmath': '(True)'}), "('float64(float64[:], float64[:])', fastmath=True)\n", (2328, 2378), False, 'from numba import vectorize, njit, jit\n'), ((360, 380), 'numpy.arange', 'np.arange', (['(1000000.0)'], {}), '(1000000.0)\n', (369, 380), True, 'import numpy as np\n'), ((387, 407), 'numpy.arange', 'np.arange', (['(1000000.0)'], {}), '(1000000.0)\n', (396, 407), True, 'import numpy as np\n'), ((481, 495), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (493, 495), False, 'from time import perf_counter\n'), ((689, 703), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (701, 703), False, 'from time import perf_counter\n'), ((925, 945), 'numpy.arange', 'np.arange', (['(1000000.0)'], {}), '(1000000.0)\n', (934, 945), True, 'import numpy as np\n'), ((1050, 1088), 'numpy.zeros_like', 'np.zeros_like', (['m'], {'dtype': 'computed_dtype'}), '(m, dtype=computed_dtype)\n', (1063, 1088), True, 'import numpy as np\n'), ((1392, 1406), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1404, 1406), False, 'from time import perf_counter\n'), ((1638, 1652), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1650, 1652), False, 'from time import perf_counter\n'), ((569, 583), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (581, 583), False, 'from time import perf_counter\n'), ((761, 775), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (773, 775), False, 'from time import perf_counter\n'), ((968, 985), 'numpy.vstack', 'np.vstack', (['(m, m)'], {}), '((m, m))\n', (977, 985), True, 'import numpy as np\n'), ((1015, 1032), 'numpy.vstack', 'np.vstack', (['(m, m)'], {}), '((m, m))\n', (1024, 1032), True, 'import numpy as np\n'), ((1355, 1367), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1363, 1367), True, 'import numpy as np\n'), ((1555, 1569), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1567, 1569), False, 'from time import perf_counter\n'), ((1676, 1688), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1684, 1688), True, 'import numpy as np\n'), ((1712, 1726), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1724, 1726), False, 'from time import perf_counter\n')] |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest import skipUnless
import numpy as np
from numpy.testing import assert_array_equal
from parameterized import parameterized
from monai.data import PatchWSIDataset
from monai.data.wsi_reader import CuCIMWSIReader, OpenSlideWSIReader
from monai.utils import optional_import
from tests.utils import download_url_or_skip_test, testing_data_config
cucim, has_cucim = optional_import("cucim")
has_cucim = has_cucim and hasattr(cucim, "CuImage")
openslide, has_osl = optional_import("openslide")
imwrite, has_tiff = optional_import("tifffile", name="imwrite")
_, has_codec = optional_import("imagecodecs")
has_tiff = has_tiff and has_codec
FILE_KEY = "wsi_img"
FILE_URL = testing_data_config("images", FILE_KEY, "url")
base_name, extension = os.path.basename(f"{FILE_URL}"), ".tiff"
FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", "temp_" + base_name + extension)
TEST_CASE_0 = [
{"data": [{"image": FILE_PATH, "patch_location": [0, 0], "label": [1], "patch_level": 0}], "patch_size": (1, 1)},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([1])},
]
TEST_CASE_0_L1 = [
{"data": [{"image": FILE_PATH, "patch_location": [0, 0], "label": [1]}], "patch_size": (1, 1), "patch_level": 1},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([1])},
]
TEST_CASE_0_L2 = [
{"data": [{"image": FILE_PATH, "patch_location": [0, 0], "label": [1]}], "patch_size": (1, 1), "patch_level": 1},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([1])},
]
TEST_CASE_1 = [
{"data": [{"image": FILE_PATH, "patch_location": [0, 0], "patch_size": 1, "label": [1]}]},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([1])},
]
TEST_CASE_2 = [
{"data": [{"image": FILE_PATH, "patch_location": [0, 0], "label": [1]}], "patch_size": 1, "patch_level": 0},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([1])},
]
TEST_CASE_3 = [
{"data": [{"image": FILE_PATH, "patch_location": [0, 0], "label": [[[0, 1], [1, 0]]]}], "patch_size": 1},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 1], [1, 0]]])},
]
TEST_CASE_4 = [
{
"data": [
{"image": FILE_PATH, "patch_location": [0, 0], "label": [[[0, 1], [1, 0]]]},
{"image": FILE_PATH, "patch_location": [0, 0], "label": [[[1, 0], [0, 0]]]},
],
"patch_size": 1,
},
[
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 1], [1, 0]]])},
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[1, 0], [0, 0]]])},
],
]
TEST_CASE_5 = [
{
"data": [
{
"image": FILE_PATH,
"patch_location": [0, 0],
"label": [[[0, 1], [1, 0]]],
"patch_size": 1,
"patch_level": 1,
},
{
"image": FILE_PATH,
"patch_location": [100, 100],
"label": [[[1, 0], [0, 0]]],
"patch_size": 1,
"patch_level": 1,
},
]
},
[
{"image": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), "label": np.array([[[0, 1], [1, 0]]])},
{"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "label": np.array([[[1, 0], [0, 0]]])},
],
]
@skipUnless(has_cucim or has_osl or has_tiff, "Requires cucim, openslide, or tifffile!")
def setUpModule(): # noqa: N802
hash_type = testing_data_config("images", FILE_KEY, "hash_type")
hash_val = testing_data_config("images", FILE_KEY, "hash_val")
download_url_or_skip_test(FILE_URL, FILE_PATH, hash_type=hash_type, hash_val=hash_val)
class PatchWSIDatasetTests:
class Tests(unittest.TestCase):
backend = None
@parameterized.expand([TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
def test_read_patches_str(self, input_parameters, expected):
dataset = PatchWSIDataset(reader=self.backend, **input_parameters)
sample = dataset[0]
self.assertTupleEqual(sample["label"].shape, expected["label"].shape)
self.assertTupleEqual(sample["image"].shape, expected["image"].shape)
self.assertIsNone(assert_array_equal(sample["label"], expected["label"]))
self.assertIsNone(assert_array_equal(sample["image"], expected["image"]))
@parameterized.expand([TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
def test_read_patches_class(self, input_parameters, expected):
if self.backend == "openslide":
reader = OpenSlideWSIReader
elif self.backend == "cucim":
reader = CuCIMWSIReader
else:
raise ValueError("Unsupported backend: {self.backend}")
dataset = PatchWSIDataset(reader=reader, **input_parameters)
sample = dataset[0]
self.assertTupleEqual(sample["label"].shape, expected["label"].shape)
self.assertTupleEqual(sample["image"].shape, expected["image"].shape)
self.assertIsNone(assert_array_equal(sample["label"], expected["label"]))
self.assertIsNone(assert_array_equal(sample["image"], expected["image"]))
@parameterized.expand([TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
def test_read_patches_object(self, input_parameters, expected):
if self.backend == "openslide":
reader = OpenSlideWSIReader(level=input_parameters.get("patch_level", 0))
elif self.backend == "cucim":
reader = CuCIMWSIReader(level=input_parameters.get("patch_level", 0))
else:
raise ValueError("Unsupported backend: {self.backend}")
dataset = PatchWSIDataset(reader=reader, **input_parameters)
sample = dataset[0]
self.assertTupleEqual(sample["label"].shape, expected["label"].shape)
self.assertTupleEqual(sample["image"].shape, expected["image"].shape)
self.assertIsNone(assert_array_equal(sample["label"], expected["label"]))
self.assertIsNone(assert_array_equal(sample["image"], expected["image"]))
@parameterized.expand([TEST_CASE_4, TEST_CASE_5])
def test_read_patches_str_multi(self, input_parameters, expected):
dataset = PatchWSIDataset(reader=self.backend, **input_parameters)
for i in range(len(dataset)):
self.assertTupleEqual(dataset[i]["label"].shape, expected[i]["label"].shape)
self.assertTupleEqual(dataset[i]["image"].shape, expected[i]["image"].shape)
self.assertIsNone(assert_array_equal(dataset[i]["label"], expected[i]["label"]))
self.assertIsNone(assert_array_equal(dataset[i]["image"], expected[i]["image"]))
@skipUnless(has_cucim, "Requires cucim")
class TestPatchWSIDatasetCuCIM(PatchWSIDatasetTests.Tests):
@classmethod
def setUpClass(cls):
cls.backend = "cucim"
@skipUnless(has_osl, "Requires openslide")
class TestPatchWSIDatasetOpenSlide(PatchWSIDatasetTests.Tests):
@classmethod
def setUpClass(cls):
cls.backend = "openslide"
if __name__ == "__main__":
unittest.main()
| [
"tests.utils.testing_data_config",
"parameterized.parameterized.expand",
"unittest.skipUnless",
"monai.data.PatchWSIDataset",
"os.path.dirname",
"monai.utils.optional_import",
"numpy.array",
"os.path.basename",
"tests.utils.download_url_or_skip_test",
"unittest.main",
"numpy.testing.assert_array... | [((977, 1001), 'monai.utils.optional_import', 'optional_import', (['"""cucim"""'], {}), "('cucim')\n", (992, 1001), False, 'from monai.utils import optional_import\n'), ((1075, 1103), 'monai.utils.optional_import', 'optional_import', (['"""openslide"""'], {}), "('openslide')\n", (1090, 1103), False, 'from monai.utils import optional_import\n'), ((1124, 1167), 'monai.utils.optional_import', 'optional_import', (['"""tifffile"""'], {'name': '"""imwrite"""'}), "('tifffile', name='imwrite')\n", (1139, 1167), False, 'from monai.utils import optional_import\n'), ((1183, 1213), 'monai.utils.optional_import', 'optional_import', (['"""imagecodecs"""'], {}), "('imagecodecs')\n", (1198, 1213), False, 'from monai.utils import optional_import\n'), ((1281, 1327), 'tests.utils.testing_data_config', 'testing_data_config', (['"""images"""', 'FILE_KEY', '"""url"""'], {}), "('images', FILE_KEY, 'url')\n", (1300, 1327), False, 'from tests.utils import download_url_or_skip_test, testing_data_config\n'), ((4110, 4201), 'unittest.skipUnless', 'skipUnless', (['(has_cucim or has_osl or has_tiff)', '"""Requires cucim, openslide, or tifffile!"""'], {}), "(has_cucim or has_osl or has_tiff,\n 'Requires cucim, openslide, or tifffile!')\n", (4120, 4201), False, 'from unittest import skipUnless\n'), ((7689, 7728), 'unittest.skipUnless', 'skipUnless', (['has_cucim', '"""Requires cucim"""'], {}), "(has_cucim, 'Requires cucim')\n", (7699, 7728), False, 'from unittest import skipUnless\n'), ((7864, 7905), 'unittest.skipUnless', 'skipUnless', (['has_osl', '"""Requires openslide"""'], {}), "(has_osl, 'Requires openslide')\n", (7874, 7905), False, 'from unittest import skipUnless\n'), ((1351, 1382), 'os.path.basename', 'os.path.basename', (['f"""{FILE_URL}"""'], {}), "(f'{FILE_URL}')\n", (1367, 1382), False, 'import os\n'), ((1417, 1442), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1432, 1442), False, 'import os\n'), ((4247, 4299), 'tests.utils.testing_data_config', 'testing_data_config', (['"""images"""', 'FILE_KEY', '"""hash_type"""'], {}), "('images', FILE_KEY, 'hash_type')\n", (4266, 4299), False, 'from tests.utils import download_url_or_skip_test, testing_data_config\n'), ((4315, 4366), 'tests.utils.testing_data_config', 'testing_data_config', (['"""images"""', 'FILE_KEY', '"""hash_val"""'], {}), "('images', FILE_KEY, 'hash_val')\n", (4334, 4366), False, 'from tests.utils import download_url_or_skip_test, testing_data_config\n'), ((4371, 4461), 'tests.utils.download_url_or_skip_test', 'download_url_or_skip_test', (['FILE_URL', 'FILE_PATH'], {'hash_type': 'hash_type', 'hash_val': 'hash_val'}), '(FILE_URL, FILE_PATH, hash_type=hash_type,\n hash_val=hash_val)\n', (4396, 4461), False, 'from tests.utils import download_url_or_skip_test, testing_data_config\n'), ((8079, 8094), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8092, 8094), False, 'import unittest\n'), ((1642, 1695), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (1650, 1695), True, 'import numpy as np\n'), ((1706, 1719), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1714, 1719), True, 'import numpy as np\n'), ((1876, 1929), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (1884, 1929), True, 'import numpy as np\n'), ((1940, 1953), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1948, 1953), True, 'import numpy as np\n'), ((2110, 2163), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (2118, 2163), True, 'import numpy as np\n'), ((2174, 2187), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2182, 2187), True, 'import numpy as np\n'), ((2317, 2370), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (2325, 2370), True, 'import numpy as np\n'), ((2381, 2394), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2389, 2394), True, 'import numpy as np\n'), ((2543, 2596), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (2551, 2596), True, 'import numpy as np\n'), ((2607, 2620), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2615, 2620), True, 'import numpy as np\n'), ((2766, 2819), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (2774, 2819), True, 'import numpy as np\n'), ((2830, 2858), 'numpy.array', 'np.array', (['[[[0, 1], [1, 0]]]'], {}), '([[[0, 1], [1, 0]]])\n', (2838, 2858), True, 'import numpy as np\n'), ((4557, 4667), 'parameterized.parameterized.expand', 'parameterized.expand', (['[TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2, TEST_CASE_1, TEST_CASE_2,\n TEST_CASE_3]'], {}), '([TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2,\n TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])\n', (4577, 4667), False, 'from parameterized import parameterized\n'), ((5190, 5300), 'parameterized.parameterized.expand', 'parameterized.expand', (['[TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2, TEST_CASE_1, TEST_CASE_2,\n TEST_CASE_3]'], {}), '([TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2,\n TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])\n', (5210, 5300), False, 'from parameterized import parameterized\n'), ((6079, 6189), 'parameterized.parameterized.expand', 'parameterized.expand', (['[TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2, TEST_CASE_1, TEST_CASE_2,\n TEST_CASE_3]'], {}), '([TEST_CASE_0, TEST_CASE_0_L1, TEST_CASE_0_L2,\n TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])\n', (6099, 6189), False, 'from parameterized import parameterized\n'), ((7061, 7109), 'parameterized.parameterized.expand', 'parameterized.expand', (['[TEST_CASE_4, TEST_CASE_5]'], {}), '([TEST_CASE_4, TEST_CASE_5])\n', (7081, 7109), False, 'from parameterized import parameterized\n'), ((3149, 3202), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (3157, 3202), True, 'import numpy as np\n'), ((3213, 3241), 'numpy.array', 'np.array', (['[[[0, 1], [1, 0]]]'], {}), '([[[0, 1], [1, 0]]])\n', (3221, 3241), True, 'import numpy as np\n'), ((3262, 3315), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (3270, 3315), True, 'import numpy as np\n'), ((3326, 3354), 'numpy.array', 'np.array', (['[[[1, 0], [0, 0]]]'], {}), '([[[1, 0], [0, 0]]])\n', (3334, 3354), True, 'import numpy as np\n'), ((3890, 3943), 'numpy.array', 'np.array', (['[[[239]], [[239]], [[239]]]'], {'dtype': 'np.uint8'}), '([[[239]], [[239]], [[239]]], dtype=np.uint8)\n', (3898, 3943), True, 'import numpy as np\n'), ((3954, 3982), 'numpy.array', 'np.array', (['[[[0, 1], [1, 0]]]'], {}), '([[[0, 1], [1, 0]]])\n', (3962, 3982), True, 'import numpy as np\n'), ((4003, 4056), 'numpy.array', 'np.array', (['[[[243]], [[243]], [[243]]]'], {'dtype': 'np.uint8'}), '([[[243]], [[243]], [[243]]], dtype=np.uint8)\n', (4011, 4056), True, 'import numpy as np\n'), ((4067, 4095), 'numpy.array', 'np.array', (['[[[1, 0], [0, 0]]]'], {}), '([[[1, 0], [0, 0]]])\n', (4075, 4095), True, 'import numpy as np\n'), ((4755, 4811), 'monai.data.PatchWSIDataset', 'PatchWSIDataset', ([], {'reader': 'self.backend'}), '(reader=self.backend, **input_parameters)\n', (4770, 4811), False, 'from monai.data import PatchWSIDataset\n'), ((5650, 5700), 'monai.data.PatchWSIDataset', 'PatchWSIDataset', ([], {'reader': 'reader'}), '(reader=reader, **input_parameters)\n', (5665, 5700), False, 'from monai.data import PatchWSIDataset\n'), ((6632, 6682), 'monai.data.PatchWSIDataset', 'PatchWSIDataset', ([], {'reader': 'reader'}), '(reader=reader, **input_parameters)\n', (6647, 6682), False, 'from monai.data import PatchWSIDataset\n'), ((7207, 7263), 'monai.data.PatchWSIDataset', 'PatchWSIDataset', ([], {'reader': 'self.backend'}), '(reader=self.backend, **input_parameters)\n', (7222, 7263), False, 'from monai.data import PatchWSIDataset\n'), ((5038, 5092), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["sample['label']", "expected['label']"], {}), "(sample['label'], expected['label'])\n", (5056, 5092), False, 'from numpy.testing import assert_array_equal\n'), ((5124, 5178), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["sample['image']", "expected['image']"], {}), "(sample['image'], expected['image'])\n", (5142, 5178), False, 'from numpy.testing import assert_array_equal\n'), ((5927, 5981), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["sample['label']", "expected['label']"], {}), "(sample['label'], expected['label'])\n", (5945, 5981), False, 'from numpy.testing import assert_array_equal\n'), ((6013, 6067), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["sample['image']", "expected['image']"], {}), "(sample['image'], expected['image'])\n", (6031, 6067), False, 'from numpy.testing import assert_array_equal\n'), ((6909, 6963), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["sample['label']", "expected['label']"], {}), "(sample['label'], expected['label'])\n", (6927, 6963), False, 'from numpy.testing import assert_array_equal\n'), ((6995, 7049), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["sample['image']", "expected['image']"], {}), "(sample['image'], expected['image'])\n", (7013, 7049), False, 'from numpy.testing import assert_array_equal\n'), ((7526, 7587), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["dataset[i]['label']", "expected[i]['label']"], {}), "(dataset[i]['label'], expected[i]['label'])\n", (7544, 7587), False, 'from numpy.testing import assert_array_equal\n'), ((7623, 7684), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["dataset[i]['image']", "expected[i]['image']"], {}), "(dataset[i]['image'], expected[i]['image'])\n", (7641, 7684), False, 'from numpy.testing import assert_array_equal\n')] |
# -*- coding: utf-8 -*-
"""
@author: WZM
@time: 2021/1/2 17:52
@function: 测试模型精度
"""
from net.ouy_net import Network
import numpy as np
import torch
import os
def load_net(fname, net):
import h5py
h5f = h5py.File(fname, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def evaluate_model(trained_model, data_loader, index):
net = Network(index)
load_net(trained_model, net)
device = torch.device('cuda:0')
if torch.cuda.is_available():
net = net.to(device)
net.eval()
count = 0
total = 0
lableresultpath = trained_model.replace(".h5", ".txt")
if os.path.exists(lableresultpath):
os.remove(lableresultpath)
valid_loss = 0.0
for blob in data_loader:
im_data = blob[0]
dem_data = blob[2]
img_data = blob[1]
gt_data = blob[3].reshape((blob[3].shape[0], 1))
index = 61
pre_label = net(im_data, dem_data, img_data, index, gt_data)
pre_label = pre_label.data.cpu().numpy()
valid_loss += net.loss.item()
label = pre_label.argmax(axis=1).flatten()
num = len(label)
for i in range(0, num):
if gt_data[i] == label[i]:
count = count + 1
total = total + 1
return 1.0 * count / total, valid_loss
def evaluate_model1(net, data_loader, index):
device = torch.device('cuda:0')
if torch.cuda.is_available():
net = net.to(device)
net.eval()
count = 0
total = 0
# lableresultpath = trained_model.replace(".h5", ".txt")
# if os.path.exists(lableresultpath):
# os.remove(lableresultpath)
valid_loss = 0.0
for blob in data_loader:
im_data = blob[0]
dem_data = blob[2]
img_data = blob[1]
gt_data = blob[3].reshape((blob[3].shape[0], 1))
index = 61
with torch.no_grad():
pre_label = net(im_data, dem_data, img_data, index, gt_data)
pre_label = pre_label.data.cpu().numpy()
valid_loss += net.loss.item()
label = pre_label.argmax(axis=1).flatten()
num = len(label)
for i in range(0, num):
if gt_data[i] == label[i]:
count = count + 1
total = total + 1
return 1.0 * count / total, valid_loss
| [
"os.path.exists",
"numpy.asarray",
"h5py.File",
"os.remove",
"torch.cuda.is_available",
"net.ouy_net.Network",
"torch.no_grad",
"torch.device"
] | [((215, 241), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""r"""'}), "(fname, mode='r')\n", (224, 241), False, 'import h5py\n'), ((427, 441), 'net.ouy_net.Network', 'Network', (['index'], {}), '(index)\n', (434, 441), False, 'from net.ouy_net import Network\n'), ((489, 511), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (501, 511), False, 'import torch\n'), ((519, 544), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (542, 544), False, 'import torch\n'), ((685, 716), 'os.path.exists', 'os.path.exists', (['lableresultpath'], {}), '(lableresultpath)\n', (699, 716), False, 'import os\n'), ((1435, 1457), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1447, 1457), False, 'import torch\n'), ((1465, 1490), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1488, 1490), False, 'import torch\n'), ((726, 752), 'os.remove', 'os.remove', (['lableresultpath'], {}), '(lableresultpath)\n', (735, 752), False, 'import os\n'), ((317, 335), 'numpy.asarray', 'np.asarray', (['h5f[k]'], {}), '(h5f[k])\n', (327, 335), True, 'import numpy as np\n'), ((1925, 1940), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1938, 1940), False, 'import torch\n')] |
import cv2
import numpy as np
from numpy.linalg import inv
class KalmanFilter:
def __init__(self, X, F, Q, Z, H, R, P, B=np.array([0]), M=np.array([0])):
self.X = X
self.P = P
self.F = F
self.B = B
self.M = M
self.Q = Q
self.Z = Z
self.H = H
self.R = R
def predict(self):
# Project the state ahead
self.X = self.F @ self.X + self.B @ self.M
self.P = self.F @ self.P @ self.F.T + self.Q
return self.X
def correct(self, Z):
K = self.P @ self.H.T @ inv(self.H @ self.P @ self.H.T + self.R)
self.X += K @ (Z - self.H @ self.X)
self.P = self.P - K @ self.H @ self.P
return self.X
TITLE = "Kalman Filter"
frame = np.ones((800,800,3),np.uint8)
def mousemove(event, x, y, s, p):
global frame, current_measurement, current_prediction,calculated,predicted
current_measurement = np.array([[np.float32(x)], [np.float32(y)]])
current_prediction = kalman.predict()
cmx, cmy = current_measurement[0], current_measurement[1]
cpx, cpy = current_prediction[0], current_prediction[1]
frame = np.ones((800,800,3),np.uint8)
cv2.putText(frame, "Measurement: ({:.1f}, {:.1f})".format(np.float(cmx), np.float(cmy)),
(30, 30), cv2.FONT_HERSHEY_DUPLEX, 0.8, (50, 150, 0))
cv2.putText(frame, "Kalman: ({:.1f}, {:.1f})".format(np.float(cpx), np.float(cpy)),
(30, 60), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 0, 255))
cv2.circle(frame, (cmx, cmy), 10, (50, 150, 0), -1) # current measured point
cv2.circle(frame, (cpx, cpy), 10, (0, 0, 255), -1) # current predicted point
calculated.append(current_measurement)
for z in range(len(calculated)-1):
p1 = (calculated[z][0],calculated[z][1])
p2 = (calculated[z+1][0],calculated[z+1][1])
cv2.line(frame, p1, p2, (50,150,0), 1)
predicted.append(current_prediction)
for z in range(len(calculated)-1):
p1 = (predicted[z][0],predicted[z][1])
p2 = (predicted[z+1][0],predicted[z+1][1])
cv2.line(frame, p1, p2, (0,0,255), 1)
kalman.correct(current_measurement)
return
calculated=[]
predicted=[]
cv2.namedWindow(TITLE)
cv2.setMouseCallback(TITLE, mousemove)
stateMatrix = np.zeros((4, 1), np.float32) # [x, y, delta_x, delta_y]
estimateCovariance = np.eye(stateMatrix.shape[0])
transitionMatrix = np.array([[1, 0, 1, 0],[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]], np.float32) * 0.001
measurementStateMatrix = np.zeros((2, 1), np.float32)
observationMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
measurementNoiseCov = np.array([[1,0],[0,1]], np.float32) * 1
kalman = KalmanFilter(X=stateMatrix,
P=estimateCovariance,
F=transitionMatrix,
Q=processNoiseCov,
Z=measurementStateMatrix,
H=observationMatrix,
R=measurementNoiseCov)
while True:
cv2.imshow(TITLE,frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"cv2.setMouseCallback",
"numpy.eye",
"numpy.float",
"numpy.ones",
"numpy.float32",
"cv2.line",
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"cv2.circle",
"numpy.linalg.inv",
"cv2.waitKey",
"cv2.namedWindow"
] | [((763, 795), 'numpy.ones', 'np.ones', (['(800, 800, 3)', 'np.uint8'], {}), '((800, 800, 3), np.uint8)\n', (770, 795), True, 'import numpy as np\n'), ((2215, 2237), 'cv2.namedWindow', 'cv2.namedWindow', (['TITLE'], {}), '(TITLE)\n', (2230, 2237), False, 'import cv2\n'), ((2238, 2276), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['TITLE', 'mousemove'], {}), '(TITLE, mousemove)\n', (2258, 2276), False, 'import cv2\n'), ((2292, 2320), 'numpy.zeros', 'np.zeros', (['(4, 1)', 'np.float32'], {}), '((4, 1), np.float32)\n', (2300, 2320), True, 'import numpy as np\n'), ((2370, 2398), 'numpy.eye', 'np.eye', (['stateMatrix.shape[0]'], {}), '(stateMatrix.shape[0])\n', (2376, 2398), True, 'import numpy as np\n'), ((2418, 2496), 'numpy.array', 'np.array', (['[[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]]', 'np.float32'], {}), '([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)\n', (2426, 2496), True, 'import numpy as np\n'), ((2611, 2639), 'numpy.zeros', 'np.zeros', (['(2, 1)', 'np.float32'], {}), '((2, 1), np.float32)\n', (2619, 2639), True, 'import numpy as np\n'), ((2660, 2710), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0]]', 'np.float32'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)\n', (2668, 2710), True, 'import numpy as np\n'), ((1157, 1189), 'numpy.ones', 'np.ones', (['(800, 800, 3)', 'np.uint8'], {}), '((800, 800, 3), np.uint8)\n', (1164, 1189), True, 'import numpy as np\n'), ((1511, 1562), 'cv2.circle', 'cv2.circle', (['frame', '(cmx, cmy)', '(10)', '(50, 150, 0)', '(-1)'], {}), '(frame, (cmx, cmy), 10, (50, 150, 0), -1)\n', (1521, 1562), False, 'import cv2\n'), ((1597, 1647), 'cv2.circle', 'cv2.circle', (['frame', '(cpx, cpy)', '(10)', '(0, 0, 255)', '(-1)'], {}), '(frame, (cpx, cpy), 10, (0, 0, 255), -1)\n', (1607, 1647), False, 'import cv2\n'), ((2514, 2592), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]', 'np.float32'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)\n', (2522, 2592), True, 'import numpy as np\n'), ((2725, 2763), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]', 'np.float32'], {}), '([[1, 0], [0, 1]], np.float32)\n', (2733, 2763), True, 'import numpy as np\n'), ((3082, 3106), 'cv2.imshow', 'cv2.imshow', (['TITLE', 'frame'], {}), '(TITLE, frame)\n', (3092, 3106), False, 'import cv2\n'), ((127, 140), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (135, 140), True, 'import numpy as np\n'), ((144, 157), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (152, 157), True, 'import numpy as np\n'), ((1872, 1912), 'cv2.line', 'cv2.line', (['frame', 'p1', 'p2', '(50, 150, 0)', '(1)'], {}), '(frame, p1, p2, (50, 150, 0), 1)\n', (1880, 1912), False, 'import cv2\n'), ((2097, 2136), 'cv2.line', 'cv2.line', (['frame', 'p1', 'p2', '(0, 0, 255)', '(1)'], {}), '(frame, p1, p2, (0, 0, 255), 1)\n', (2105, 2136), False, 'import cv2\n'), ((575, 615), 'numpy.linalg.inv', 'inv', (['(self.H @ self.P @ self.H.T + self.R)'], {}), '(self.H @ self.P @ self.H.T + self.R)\n', (578, 615), False, 'from numpy.linalg import inv\n'), ((1249, 1262), 'numpy.float', 'np.float', (['cmx'], {}), '(cmx)\n', (1257, 1262), True, 'import numpy as np\n'), ((1264, 1277), 'numpy.float', 'np.float', (['cmy'], {}), '(cmy)\n', (1272, 1277), True, 'import numpy as np\n'), ((1407, 1420), 'numpy.float', 'np.float', (['cpx'], {}), '(cpx)\n', (1415, 1420), True, 'import numpy as np\n'), ((1422, 1435), 'numpy.float', 'np.float', (['cpy'], {}), '(cpy)\n', (1430, 1435), True, 'import numpy as np\n'), ((3113, 3127), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3124, 3127), False, 'import cv2\n'), ((945, 958), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (955, 958), True, 'import numpy as np\n'), ((962, 975), 'numpy.float32', 'np.float32', (['y'], {}), '(y)\n', (972, 975), True, 'import numpy as np\n')] |
import os
import pickle as pkl
import numpy as np
import scipy.io as scio
import SimpleITK as sitk
from sklearn.preprocessing import normalize
from hyperg.utils import minmax_scale
from hyperg.utils import print_log
DATA_DIR = os.path.join(os.path.dirname(__file__), 'datasets')
def load_myocardium(test_idx=[4]):
heart_seg_dir = os.path.join(DATA_DIR, 'myocardiumSeg')
ori = os.listdir(os.path.join(heart_seg_dir, 'ori'))
X = []
y = []
for name in ori:
ori_img = sitk.ReadImage(os.path.join(heart_seg_dir, "ori/{}".format(name)))
ori_ary = minmax_scale(sitk.GetArrayFromImage(ori_img).squeeze()) # (y, x)
X.append(ori_ary)
seg_img = sitk.ReadImage(os.path.join(heart_seg_dir, "seg/{}".format(name)))
seg_ary = sitk.GetArrayFromImage(seg_img).squeeze()
y.append(seg_ary)
X = np.stack(X)
y = np.stack(y)
training_idx = [i for i in range(X.shape[0]) if i not in test_idx]
X_train = X[training_idx]
X_test = X[test_idx]
y_train = y[training_idx]
y_test = y[test_idx]
return X_train, X_test, y_train, y_test
def load_modelnet(selected_mod):
print_log("selected mod:{}".format(str(selected_mod)))
modelnet40_dir = os.path.join(DATA_DIR, "modelnet40")
X_train = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_train_fts.pkl'), 'rb'))
X_test = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_test_fts.pkl'), 'rb'))
y_train = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_train_lbls.pkl'), 'rb'))
y_test = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_test_lbls.pkl'), 'rb'))
X_train = [X_train[imod] for imod in selected_mod]
X_test = [X_test[imod] for imod in selected_mod]
if len(selected_mod) == 1:
X_train = X_train[0]
X_test = X_test[0]
return X_train, X_test, np.array(y_train), np.array(y_test)
def load_MSRGesture3D(i_train=2, i_test = 0):
msr_gesture_dir = os.path.join(DATA_DIR, "MSRGesture3D")
data = scio.loadmat(os.path.join(msr_gesture_dir, 'MSRGesture3D.mat'))
all_indices = scio.loadmat(os.path.join(msr_gesture_dir, 'MSRGesture3DTrainIndex.mat'))['trainIndex']
i_indices = all_indices[i_test, i_train].reshape(-1)
X = data['X']
X = normalize(X)
y = np.array(data['Y'], dtype=np.int).reshape(-1)
y = y - np.min(y)
X_train = X[i_indices == 1]
X_test = X[i_indices == 0]
y_train = y[i_indices == 1]
y_test = y[i_indices == 0]
return X_train, X_test, y_train, y_test
if __name__ == "__main__":
pass
| [
"os.path.join",
"SimpleITK.GetArrayFromImage",
"numpy.array",
"os.path.dirname",
"numpy.stack",
"numpy.min",
"sklearn.preprocessing.normalize"
] | [((243, 268), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (258, 268), False, 'import os\n'), ((339, 378), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""myocardiumSeg"""'], {}), "(DATA_DIR, 'myocardiumSeg')\n", (351, 378), False, 'import os\n'), ((856, 867), 'numpy.stack', 'np.stack', (['X'], {}), '(X)\n', (864, 867), True, 'import numpy as np\n'), ((876, 887), 'numpy.stack', 'np.stack', (['y'], {}), '(y)\n', (884, 887), True, 'import numpy as np\n'), ((1231, 1267), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""modelnet40"""'], {}), "(DATA_DIR, 'modelnet40')\n", (1243, 1267), False, 'import os\n'), ((1963, 2001), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""MSRGesture3D"""'], {}), "(DATA_DIR, 'MSRGesture3D')\n", (1975, 2001), False, 'import os\n'), ((2267, 2279), 'sklearn.preprocessing.normalize', 'normalize', (['X'], {}), '(X)\n', (2276, 2279), False, 'from sklearn.preprocessing import normalize\n'), ((400, 434), 'os.path.join', 'os.path.join', (['heart_seg_dir', '"""ori"""'], {}), "(heart_seg_dir, 'ori')\n", (412, 434), False, 'import os\n'), ((1857, 1874), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1865, 1874), True, 'import numpy as np\n'), ((1876, 1892), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1884, 1892), True, 'import numpy as np\n'), ((2026, 2075), 'os.path.join', 'os.path.join', (['msr_gesture_dir', '"""MSRGesture3D.mat"""'], {}), "(msr_gesture_dir, 'MSRGesture3D.mat')\n", (2038, 2075), False, 'import os\n'), ((2346, 2355), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (2352, 2355), True, 'import numpy as np\n'), ((1296, 1350), 'os.path.join', 'os.path.join', (['modelnet40_dir', '"""modelnet_train_fts.pkl"""'], {}), "(modelnet40_dir, 'modelnet_train_fts.pkl')\n", (1308, 1350), False, 'import os\n'), ((1386, 1439), 'os.path.join', 'os.path.join', (['modelnet40_dir', '"""modelnet_test_fts.pkl"""'], {}), "(modelnet40_dir, 'modelnet_test_fts.pkl')\n", (1398, 1439), False, 'import os\n'), ((1477, 1532), 'os.path.join', 'os.path.join', (['modelnet40_dir', '"""modelnet_train_lbls.pkl"""'], {}), "(modelnet40_dir, 'modelnet_train_lbls.pkl')\n", (1489, 1532), False, 'import os\n'), ((1568, 1622), 'os.path.join', 'os.path.join', (['modelnet40_dir', '"""modelnet_test_lbls.pkl"""'], {}), "(modelnet40_dir, 'modelnet_test_lbls.pkl')\n", (1580, 1622), False, 'import os\n'), ((2108, 2167), 'os.path.join', 'os.path.join', (['msr_gesture_dir', '"""MSRGesture3DTrainIndex.mat"""'], {}), "(msr_gesture_dir, 'MSRGesture3DTrainIndex.mat')\n", (2120, 2167), False, 'import os\n'), ((2288, 2321), 'numpy.array', 'np.array', (["data['Y']"], {'dtype': 'np.int'}), "(data['Y'], dtype=np.int)\n", (2296, 2321), True, 'import numpy as np\n'), ((779, 810), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['seg_img'], {}), '(seg_img)\n', (801, 810), True, 'import SimpleITK as sitk\n'), ((597, 628), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['ori_img'], {}), '(ori_img)\n', (619, 628), True, 'import SimpleITK as sitk\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
from monty.json import MSONable
from pydefect.corrections.abstract_correction import Correction
@dataclass
class ExtendedFnvCorrection(Correction):
"""
species: Species except for the defect. e.g., ["Mg", "Mg", ..., "O", ..]
atomic_coords: Fractional coordinates except for the defect.
pc_pot (list of float):
List of point-charge potential from the defect for all the
atomic sites.
defect_region_radius (float):
Maximum radius of a sphere touching to the lattice plane, used
for defining the outside region of the defect.
Add units of length and potential
"""
charge: int
point_charge_correction: float
defect_region_radius: float
sites: List["PotentialSite"]
defect_coords: Tuple[float, float, float]
additional_correction: float = 0.0
@property
def average_potential_diff(self):
return np.mean([s.diff_pot for s in self.sites
if s.distance > self.defect_region_radius])
@property
def alignment_correction(self) -> float:
return - self.average_potential_diff * self.charge
@property
def correction_energy(self) -> float:
return (self.point_charge_correction + self.alignment_correction
+ self.additional_correction)
@dataclass
class PotentialSite(MSONable):
specie: str
distance: float
potential: float
pc_potential: Optional[float]
@property
def diff_pot(self):
return self.potential - self.pc_potential
| [
"numpy.mean"
] | [((1083, 1171), 'numpy.mean', 'np.mean', (['[s.diff_pot for s in self.sites if s.distance > self.defect_region_radius]'], {}), '([s.diff_pot for s in self.sites if s.distance > self.\n defect_region_radius])\n', (1090, 1171), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.