code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# coding=utf-8
import io
import os
import logging
import tempfile
import unittest
import simplejson as json
from copy import deepcopy
from pathlib import Path
import numpy as np
import numpy.testing as npt
from ruamel.yaml import YAML
from ioos_qc.config import QcConfig
from ioos_qc.qartod import ClimatologyConfig
L = logging.getLogger('ioos_qc')
L.setLevel(logging.INFO)
L.handlers = [logging.StreamHandler()]
yaml = YAML(typ='safe')
class ConfigLoadTest(unittest.TestCase):
def setUp(self):
template = """
qartod:
gross_range_test:
suspect_span: [1, 11]
fail_span:
- 0
- 12
goober:
foo: [1, null]
"""
self.handle, self.yamlfile = tempfile.mkstemp(suffix='.yaml')
with open(self.yamlfile, 'w') as f:
f.write(template)
self.expected_dict = {
'qartod': {
'gross_range_test': {
'suspect_span': [1, 11],
'fail_span': [0, 12],
},
'goober': {
'foo': [1, None]
}
}
}
def tearDown(self):
os.close(self.handle)
os.remove(self.yamlfile)
def test_load_yaml_dict_object(self):
with open(self.yamlfile) as f:
y = yaml.load(f.read())
qc = QcConfig(y)
assert qc.config == self.expected_dict
def test_load_yaml_str(self):
with open(self.yamlfile) as f:
qc = QcConfig(f.read())
assert qc.config == self.expected_dict
def test_load_json_str(self):
with open(self.yamlfile) as f:
js = json.dumps(yaml.load(f.read()))
qc = QcConfig(js)
assert qc.config == self.expected_dict
def test_load_yaml_file_path(self):
qc = QcConfig(self.yamlfile)
assert qc.config == self.expected_dict
def test_load_yaml_path_object(self):
qc = QcConfig(Path(self.yamlfile))
assert qc.config == self.expected_dict
def test_load_json_stringio(self):
st = io.StringIO()
qc = QcConfig(self.yamlfile)
with open(self.yamlfile, 'rt') as f:
js = json.dumps(yaml.load(f.read()))
st.write(js)
qc = QcConfig(st)
st.close()
assert qc.config == self.expected_dict
def test_load_yaml_stringio(self):
st = io.StringIO()
with open(self.yamlfile, 'rt') as f:
st.write(f.read())
qc = QcConfig(st)
st.close()
assert qc.config == self.expected_dict
class ConfigRunTest(unittest.TestCase):
def setUp(self):
self.config = {
'qartod': {
'gross_range_test': {
'suspect_span': [1, 11],
'fail_span': [0, 12],
}
}
}
def test_run(self):
qc = QcConfig(self.config)
r = qc.run(
inp=list(range(13))
)
expected = np.array([3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3])
npt.assert_array_equal(
r['qartod']['gross_range_test'],
expected
)
assert 'aggregate' not in r['qartod']
def test_run_with_agg(self):
qc = QcConfig({'qartod': {
'gross_range_test': {
'fail_span': [0, 12],
},
'spike_test': {
'suspect_threshold': 3,
'fail_threshold': 10,
}
}})
inp = [-1, 0, 1, 2, 10, 3]
expected_gross_range = np.array([4, 1, 1, 1, 1, 1])
expected_spike = np.array([2, 1, 1, 3, 3, 2])
r = qc.run(
inp=inp
)
npt.assert_array_equal(r['qartod']['gross_range_test'], expected_gross_range)
npt.assert_array_equal(r['qartod']['spike_test'], expected_spike)
def test_different_kwargs_run(self):
config = deepcopy(self.config)
config['qartod']['location_test'] = {
'bbox': [-100, -40, 100, 40]
}
xs = [ -101, -100, -99, 0, 99, 100, 101 ]
ys = [ -41, -40, -39, 0, 39, 40, 41 ]
qc = QcConfig(config)
r = qc.run(
inp=list(range(7)),
lat=ys,
lon=xs
)
range_expected = np.array([3, 1, 1, 1, 1, 1, 1])
npt.assert_array_equal(
r['qartod']['gross_range_test'],
range_expected
)
location_expected = np.array([4, 1, 1, 1, 1, 1, 4])
npt.assert_array_equal(
r['qartod']['location_test'],
location_expected
)
def test_with_values_in_config(self):
config = deepcopy(self.config)
config['qartod']['location_test'] = {
'bbox': [-100, -40, 100, 40],
'lat': [ -41, -40, -39, 0, 39, 40, 41 ],
'lon': [ -101, -100, -99, 0, 99, 100, 101 ],
}
config['qartod']['gross_range_test']['inp'] = list(range(7))
qc = QcConfig(config)
r = qc.run()
range_expected = np.array([3, 1, 1, 1, 1, 1, 1])
npt.assert_array_equal(
r['qartod']['gross_range_test'],
range_expected
)
location_expected = np.array([4, 1, 1, 1, 1, 1, 4])
npt.assert_array_equal(
r['qartod']['location_test'],
location_expected
)
def test_with_empty_config(self):
self.config['qartod']['flat_line_test'] = None
qc = QcConfig(self.config)
r = qc.run(
inp=list(range(13))
)
assert 'gross_range_test' in r['qartod']
assert 'flat_line_test' not in r['qartod']
class ClimatologyConfigConversionTest(unittest.TestCase):
# Verify that we can parse and convert configs into a ClimatologyConfig object
def setUp(self):
# Explicitly defined config
self.cc = ClimatologyConfig()
self.cc.add(
tspan=(np.datetime64('2011-01'), np.datetime64('2011-07')),
vspan=(10, 20)
)
self.cc.add(
tspan=(np.datetime64('2011-07'), np.datetime64('2012-01')),
vspan=(30, 40)
)
self.cc.add(
tspan=(np.datetime64('2012-01'), np.datetime64('2013-01')),
vspan=(50, 60),
zspan=(0, 10)
)
self.cc.add(
tspan=(0, 2),
vspan=(10, 20),
period='month'
)
# JSON config, same definition as above
self.json_config = {
'qartod': {
'climatology_test': {
'config': [
{
'vspan': (10, 20),
'tspan': (np.datetime64('2011-01'), np.datetime64('2011-07')),
},
{
'vspan': (30, 40),
'tspan': (np.datetime64('2011-07'), np.datetime64('2012-01')),
},
{
'vspan': (50, 60),
'zspan': (0, 10),
'tspan': (np.datetime64('2012-01'), np.datetime64('2013-01'))
},
{
'vspan': (10, 20),
'tspan': (0, 2),
'period': 'month'
}
]
}
}
}
# YAML config, same definition as above
template = """
qartod:
climatology_test:
config:
- vspan: [10, 20]
tspan:
- !!timestamp 2011-01-01 00:00:00
- !!timestamp 2011-07-01 00:00:00
- vspan: [30, 40]
tspan:
- !!timestamp 2011-07-01
- !!timestamp 2012-01-01
- vspan: [50, 60]
zspan: [0, 10]
tspan:
- !!timestamp 2012-01-01
- !!timestamp 2013-01-01
- vspan: [10, 20]
tspan: [0, 2]
period: month
"""
self.handle, self.yamlfile = tempfile.mkstemp(suffix='.yaml')
with open(self.yamlfile, 'w') as f:
f.write(template)
def tearDown(self):
os.close(self.handle)
os.remove(self.yamlfile)
def test_climatology_config_yaml_conversion(self):
qc = QcConfig(self.yamlfile)
yaml_climatology_config = ClimatologyConfig.convert(qc.config['qartod']['climatology_test']['config'])
self._assert_cc_configs_equal(self.cc, yaml_climatology_config)
def test_climatology_json_conversion(self):
qc = QcConfig(self.json_config)
json_climatology_config = ClimatologyConfig.convert(qc.config['qartod']['climatology_test']['config'])
self._assert_cc_configs_equal(self.cc, json_climatology_config)
def _assert_cc_configs_equal(self, c1: ClimatologyConfig, c2: ClimatologyConfig):
assert len(c1.members) == len(c2.members)
for idx in range(0, len(c1.members)):
m1 = c1.members[idx]
m2 = c2.members[idx]
assert m1.tspan == m2.tspan, f"{idx} tspan did not match"
assert m1.vspan == m2.vspan, f"{idx} vspan did not match"
assert m1.zspan == m2.zspan, f"{idx} zspan did not match"
assert m1.period == m2.period, f"{idx} period did not match"
| [
"logging.getLogger",
"logging.StreamHandler",
"ioos_qc.config.QcConfig",
"ioos_qc.qartod.ClimatologyConfig",
"ioos_qc.qartod.ClimatologyConfig.convert",
"pathlib.Path",
"os.close",
"ruamel.yaml.YAML",
"numpy.array",
"numpy.datetime64",
"copy.deepcopy",
"io.StringIO",
"tempfile.mkstemp",
"n... | [((346, 374), 'logging.getLogger', 'logging.getLogger', (['"""ioos_qc"""'], {}), "('ioos_qc')\n", (363, 374), False, 'import logging\n'), ((447, 463), 'ruamel.yaml.YAML', 'YAML', ([], {'typ': '"""safe"""'}), "(typ='safe')\n", (451, 463), False, 'from ruamel.yaml import YAML\n'), ((414, 437), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (435, 437), False, 'import logging\n'), ((812, 844), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".yaml"""'}), "(suffix='.yaml')\n", (828, 844), False, 'import tempfile\n'), ((1259, 1280), 'os.close', 'os.close', (['self.handle'], {}), '(self.handle)\n', (1267, 1280), False, 'import os\n'), ((1289, 1313), 'os.remove', 'os.remove', (['self.yamlfile'], {}), '(self.yamlfile)\n', (1298, 1313), False, 'import os\n'), ((1801, 1813), 'ioos_qc.config.QcConfig', 'QcConfig', (['js'], {}), '(js)\n', (1809, 1813), False, 'from ioos_qc.config import QcConfig\n'), ((1915, 1938), 'ioos_qc.config.QcConfig', 'QcConfig', (['self.yamlfile'], {}), '(self.yamlfile)\n', (1923, 1938), False, 'from ioos_qc.config import QcConfig\n'), ((2172, 2185), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2183, 2185), False, 'import io\n'), ((2199, 2222), 'ioos_qc.config.QcConfig', 'QcConfig', (['self.yamlfile'], {}), '(self.yamlfile)\n', (2207, 2222), False, 'from ioos_qc.config import QcConfig\n'), ((2355, 2367), 'ioos_qc.config.QcConfig', 'QcConfig', (['st'], {}), '(st)\n', (2363, 2367), False, 'from ioos_qc.config import QcConfig\n'), ((2487, 2500), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2498, 2500), False, 'import io\n'), ((2590, 2602), 'ioos_qc.config.QcConfig', 'QcConfig', (['st'], {}), '(st)\n', (2598, 2602), False, 'from ioos_qc.config import QcConfig\n'), ((2986, 3007), 'ioos_qc.config.QcConfig', 'QcConfig', (['self.config'], {}), '(self.config)\n', (2994, 3007), False, 'from ioos_qc.config import QcConfig\n'), ((3090, 3139), 'numpy.array', 'np.array', (['[3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3]'], {}), '([3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3])\n', (3098, 3139), True, 'import numpy as np\n'), ((3148, 3213), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['gross_range_test']", 'expected'], {}), "(r['qartod']['gross_range_test'], expected)\n", (3170, 3213), True, 'import numpy.testing as npt\n'), ((3341, 3473), 'ioos_qc.config.QcConfig', 'QcConfig', (["{'qartod': {'gross_range_test': {'fail_span': [0, 12]}, 'spike_test': {\n 'suspect_threshold': 3, 'fail_threshold': 10}}}"], {}), "({'qartod': {'gross_range_test': {'fail_span': [0, 12]},\n 'spike_test': {'suspect_threshold': 3, 'fail_threshold': 10}}})\n", (3349, 3473), False, 'from ioos_qc.config import QcConfig\n'), ((3648, 3676), 'numpy.array', 'np.array', (['[4, 1, 1, 1, 1, 1]'], {}), '([4, 1, 1, 1, 1, 1])\n', (3656, 3676), True, 'import numpy as np\n'), ((3702, 3730), 'numpy.array', 'np.array', (['[2, 1, 1, 3, 3, 2]'], {}), '([2, 1, 1, 3, 3, 2])\n', (3710, 3730), True, 'import numpy as np\n'), ((3791, 3868), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['gross_range_test']", 'expected_gross_range'], {}), "(r['qartod']['gross_range_test'], expected_gross_range)\n", (3813, 3868), True, 'import numpy.testing as npt\n'), ((3877, 3942), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['spike_test']", 'expected_spike'], {}), "(r['qartod']['spike_test'], expected_spike)\n", (3899, 3942), True, 'import numpy.testing as npt\n'), ((4003, 4024), 'copy.deepcopy', 'deepcopy', (['self.config'], {}), '(self.config)\n', (4011, 4024), False, 'from copy import deepcopy\n'), ((4236, 4252), 'ioos_qc.config.QcConfig', 'QcConfig', (['config'], {}), '(config)\n', (4244, 4252), False, 'from ioos_qc.config import QcConfig\n'), ((4380, 4411), 'numpy.array', 'np.array', (['[3, 1, 1, 1, 1, 1, 1]'], {}), '([3, 1, 1, 1, 1, 1, 1])\n', (4388, 4411), True, 'import numpy as np\n'), ((4420, 4491), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['gross_range_test']", 'range_expected'], {}), "(r['qartod']['gross_range_test'], range_expected)\n", (4442, 4491), True, 'import numpy.testing as npt\n'), ((4554, 4585), 'numpy.array', 'np.array', (['[4, 1, 1, 1, 1, 1, 4]'], {}), '([4, 1, 1, 1, 1, 1, 4])\n', (4562, 4585), True, 'import numpy as np\n'), ((4594, 4665), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['location_test']", 'location_expected'], {}), "(r['qartod']['location_test'], location_expected)\n", (4616, 4665), True, 'import numpy.testing as npt\n'), ((4761, 4782), 'copy.deepcopy', 'deepcopy', (['self.config'], {}), '(self.config)\n', (4769, 4782), False, 'from copy import deepcopy\n'), ((5078, 5094), 'ioos_qc.config.QcConfig', 'QcConfig', (['config'], {}), '(config)\n', (5086, 5094), False, 'from ioos_qc.config import QcConfig\n'), ((5142, 5173), 'numpy.array', 'np.array', (['[3, 1, 1, 1, 1, 1, 1]'], {}), '([3, 1, 1, 1, 1, 1, 1])\n', (5150, 5173), True, 'import numpy as np\n'), ((5182, 5253), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['gross_range_test']", 'range_expected'], {}), "(r['qartod']['gross_range_test'], range_expected)\n", (5204, 5253), True, 'import numpy.testing as npt\n'), ((5316, 5347), 'numpy.array', 'np.array', (['[4, 1, 1, 1, 1, 1, 4]'], {}), '([4, 1, 1, 1, 1, 1, 4])\n', (5324, 5347), True, 'import numpy as np\n'), ((5356, 5427), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["r['qartod']['location_test']", 'location_expected'], {}), "(r['qartod']['location_test'], location_expected)\n", (5378, 5427), True, 'import numpy.testing as npt\n'), ((5569, 5590), 'ioos_qc.config.QcConfig', 'QcConfig', (['self.config'], {}), '(self.config)\n', (5577, 5590), False, 'from ioos_qc.config import QcConfig\n'), ((5974, 5993), 'ioos_qc.qartod.ClimatologyConfig', 'ClimatologyConfig', ([], {}), '()\n', (5991, 5993), False, 'from ioos_qc.qartod import ClimatologyConfig\n'), ((8418, 8450), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".yaml"""'}), "(suffix='.yaml')\n", (8434, 8450), False, 'import tempfile\n'), ((8558, 8579), 'os.close', 'os.close', (['self.handle'], {}), '(self.handle)\n', (8566, 8579), False, 'import os\n'), ((8588, 8612), 'os.remove', 'os.remove', (['self.yamlfile'], {}), '(self.yamlfile)\n', (8597, 8612), False, 'import os\n'), ((8682, 8705), 'ioos_qc.config.QcConfig', 'QcConfig', (['self.yamlfile'], {}), '(self.yamlfile)\n', (8690, 8705), False, 'from ioos_qc.config import QcConfig\n'), ((8740, 8816), 'ioos_qc.qartod.ClimatologyConfig.convert', 'ClimatologyConfig.convert', (["qc.config['qartod']['climatology_test']['config']"], {}), "(qc.config['qartod']['climatology_test']['config'])\n", (8765, 8816), False, 'from ioos_qc.qartod import ClimatologyConfig\n'), ((8951, 8977), 'ioos_qc.config.QcConfig', 'QcConfig', (['self.json_config'], {}), '(self.json_config)\n', (8959, 8977), False, 'from ioos_qc.config import QcConfig\n'), ((9012, 9088), 'ioos_qc.qartod.ClimatologyConfig.convert', 'ClimatologyConfig.convert', (["qc.config['qartod']['climatology_test']['config']"], {}), "(qc.config['qartod']['climatology_test']['config'])\n", (9037, 9088), False, 'from ioos_qc.qartod import ClimatologyConfig\n'), ((1449, 1460), 'ioos_qc.config.QcConfig', 'QcConfig', (['y'], {}), '(y)\n', (1457, 1460), False, 'from ioos_qc.config import QcConfig\n'), ((2051, 2070), 'pathlib.Path', 'Path', (['self.yamlfile'], {}), '(self.yamlfile)\n', (2055, 2070), False, 'from pathlib import Path\n'), ((6034, 6058), 'numpy.datetime64', 'np.datetime64', (['"""2011-01"""'], {}), "('2011-01')\n", (6047, 6058), True, 'import numpy as np\n'), ((6060, 6084), 'numpy.datetime64', 'np.datetime64', (['"""2011-07"""'], {}), "('2011-07')\n", (6073, 6084), True, 'import numpy as np\n'), ((6164, 6188), 'numpy.datetime64', 'np.datetime64', (['"""2011-07"""'], {}), "('2011-07')\n", (6177, 6188), True, 'import numpy as np\n'), ((6190, 6214), 'numpy.datetime64', 'np.datetime64', (['"""2012-01"""'], {}), "('2012-01')\n", (6203, 6214), True, 'import numpy as np\n'), ((6294, 6318), 'numpy.datetime64', 'np.datetime64', (['"""2012-01"""'], {}), "('2012-01')\n", (6307, 6318), True, 'import numpy as np\n'), ((6320, 6344), 'numpy.datetime64', 'np.datetime64', (['"""2013-01"""'], {}), "('2013-01')\n", (6333, 6344), True, 'import numpy as np\n'), ((6806, 6830), 'numpy.datetime64', 'np.datetime64', (['"""2011-01"""'], {}), "('2011-01')\n", (6819, 6830), True, 'import numpy as np\n'), ((6832, 6856), 'numpy.datetime64', 'np.datetime64', (['"""2011-07"""'], {}), "('2011-07')\n", (6845, 6856), True, 'import numpy as np\n'), ((6997, 7021), 'numpy.datetime64', 'np.datetime64', (['"""2011-07"""'], {}), "('2011-07')\n", (7010, 7021), True, 'import numpy as np\n'), ((7023, 7047), 'numpy.datetime64', 'np.datetime64', (['"""2012-01"""'], {}), "('2012-01')\n", (7036, 7047), True, 'import numpy as np\n'), ((7234, 7258), 'numpy.datetime64', 'np.datetime64', (['"""2012-01"""'], {}), "('2012-01')\n", (7247, 7258), True, 'import numpy as np\n'), ((7260, 7284), 'numpy.datetime64', 'np.datetime64', (['"""2013-01"""'], {}), "('2013-01')\n", (7273, 7284), True, 'import numpy as np\n')] |
import numpy as np
class Perceptron:
"""A single neuron with the sigmoid activation function.
Attributes:
inputs: The number of inputs in the perceptron, not counting the bias.
bias: The bias term. By defaul it's 1.0."""
def __init__(self, inputs, bias = 1.0):
"""Return a new Perceptron object with the specified number of inputs (+1 for the bias)."""
self.weights = (np.random.rand(inputs+1) * 2) - 1
self.bias = bias
def run(self, x):
"""Run the perceptron. x is a python list with the input values."""
sum = np.dot(np.append(x,self.bias),self.weights)
return self.sigmoid(sum)
def set_weights(self, w_init):
"""Set the weights. w_init is a python list with the weights."""
self.weights = np.array(w_init)
def sigmoid(self, x):
"""Evaluate the sigmoid function for the floating point input x."""
return 1/(1+np.exp(-x))
class MultiLayerPerceptron:
"""A multilayer perceptron class that uses the Perceptron class above.
Attributes:
layers: A python list with the number of elements per layer.
bias: The bias term. The same bias is used for all neurons.
eta: The learning rate."""
def __init__(self, layers, bias = 1.0, eta = 0.5):
"""Return a new MLP object with the specified parameters."""
self.layers = np.array(layers,dtype=object)
self.bias = bias
self.eta = eta
self.network = [] # The list of lists of neurons
self.values = [] # The list of lists of output values
self.d = [] # The list of lists of error terms (lowercase deltas)
for i in range(len(self.layers)):
self.values.append([])
self.d.append([])
self.network.append([])
self.values[i] = [0.0 for j in range(self.layers[i])]
self.d[i] = [0.0 for j in range(self.layers[i])]
if i > 0: #network[0] is the input layer, so it has no neurons
for j in range(self.layers[i]):
self.network[i].append(Perceptron(inputs = self.layers[i-1], bias = self.bias))
self.network = np.array([np.array(x) for x in self.network],dtype=object)
self.values = np.array([np.array(x) for x in self.values],dtype=object)
self.d = np.array([np.array(x) for x in self.d],dtype=object)
def set_weights(self, w_init):
"""Set the weights.
w_init is a list of lists with the weights for all but the input layer."""
for i in range(len(w_init)):
for j in range(len(w_init[i])):
self.network[i+1][j].set_weights(w_init[i][j])
def printWeights(self):
print()
for i in range(1,len(self.network)):
for j in range(self.layers[i]):
print("Layer",i+1,"Neuron",j,self.network[i][j].weights)
print()
def run(self, x):
"""Feed a sample x into the MultiLayer Perceptron."""
x = np.array(x,dtype=object)
self.values[0] = x
for i in range(1,len(self.network)):
for j in range(self.layers[i]):
self.values[i][j] = self.network[i][j].run(self.values[i-1])
return self.values[-1]
def bp(self, x, y):
"""Run a single (x,y) pair with the backpropagation algorithm."""
x = np.array(x,dtype=object)
y = np.array(y,dtype=object)
# Backpropagation Step by Step:
# STEP 1: Feed a sample to the network
outputs = self.run(x)
# STEP 2: Calculate the MSE
error = (y - outputs)
MSE = sum( error ** 2) / self.layers[-1]
# STEP 3: Calculate the output error terms
self.d[-1] = outputs * (1 - outputs) * (error)
# STEP 4: Calculate the error term of each unit on each layer
for i in reversed(range(1,len(self.network)-1)):
for h in range(len(self.network[i])):
fwd_error = 0.0
for k in range(self.layers[i+1]):
fwd_error += self.network[i+1][k].weights[h] * self.d[i+1][k]
self.d[i][h] = self.values[i][h] * (1-self.values[i][h]) * fwd_error
# STEPS 5 & 6: Calculate the deltas and update the weights
for i in range(1,len(self.network)):
for j in range(self.layers[i]):
for k in range(self.layers[i-1]+1):
if k==self.layers[i-1]:
delta = self.eta * self.d[i][j] * self.bias
else:
delta = self.eta * self.d[i][j] * self.values[i-1][k]
self.network[i][j].weights[k] += delta
return MSE
| [
"numpy.append",
"numpy.array",
"numpy.exp",
"numpy.random.rand"
] | [((807, 823), 'numpy.array', 'np.array', (['w_init'], {}), '(w_init)\n', (815, 823), True, 'import numpy as np\n'), ((1423, 1453), 'numpy.array', 'np.array', (['layers'], {'dtype': 'object'}), '(layers, dtype=object)\n', (1431, 1453), True, 'import numpy as np\n'), ((3057, 3082), 'numpy.array', 'np.array', (['x'], {'dtype': 'object'}), '(x, dtype=object)\n', (3065, 3082), True, 'import numpy as np\n'), ((3423, 3448), 'numpy.array', 'np.array', (['x'], {'dtype': 'object'}), '(x, dtype=object)\n', (3431, 3448), True, 'import numpy as np\n'), ((3460, 3485), 'numpy.array', 'np.array', (['y'], {'dtype': 'object'}), '(y, dtype=object)\n', (3468, 3485), True, 'import numpy as np\n'), ((605, 628), 'numpy.append', 'np.append', (['x', 'self.bias'], {}), '(x, self.bias)\n', (614, 628), True, 'import numpy as np\n'), ((425, 451), 'numpy.random.rand', 'np.random.rand', (['(inputs + 1)'], {}), '(inputs + 1)\n', (439, 451), True, 'import numpy as np\n'), ((947, 957), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (953, 957), True, 'import numpy as np\n'), ((2243, 2254), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2251, 2254), True, 'import numpy as np\n'), ((2324, 2335), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2332, 2335), True, 'import numpy as np\n'), ((2399, 2410), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2407, 2410), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import tempfile
import numpy
import scipy.linalg
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
class TestLsiModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(numpy.allclose(s[:2], model.projection.s)) # singular values must match
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.6594664, 0.142115444]) # scaled LSI version
# expected = numpy.array([-0.1973928, 0.05591352]) # non-scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected))) # transformed entries must be equal up to sign
def testCorpusTransform(self):
"""Test lsi[corpus] transformation."""
model = lsimodel.LsiModel(self.corpus, num_topics=2)
got = numpy.vstack(matutils.sparse2full(doc, 2) for doc in model[corpus])
expected = numpy.array([
[ 0.65946639, 0.14211544],
[ 2.02454305, -0.42088759],
[ 1.54655361, 0.32358921],
[ 1.81114125, 0.5890525 ],
[ 0.9336738 , -0.27138939],
[ 0.01274618, -0.49016181],
[ 0.04888203, -1.11294699],
[ 0.08063836, -1.56345594],
[ 0.27381003, -1.34694159]])
self.assertTrue(numpy.allclose(abs(got), abs(expected))) # must equal up to sign
def testOnlineTransform(self):
corpus = list(self.corpus)
doc = corpus[0] # use the corpus' first document for testing
# create the transformation model
model2 = lsimodel.LsiModel(corpus=corpus, num_topics=5) # compute everything at once
model = lsimodel.LsiModel(corpus=None, id2word=model2.id2word, num_topics=5) # start with no documents, we will add them later
# train model on a single document
model.add_documents([corpus[0]])
# transform the testing document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-1.73205078, 0.0, 0.0, 0.0, 0.0]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on another 4 documents
model.add_documents(corpus[1:5], chunksize=2) # train on 4 extra docs, in chunks of 2 documents, for the lols
# transform a document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on the rest of documents
model.add_documents(corpus[5:])
# make sure the final transformation is the same as if we had decomposed the whole corpus at once
vec1 = matutils.sparse2full(model[doc], model.num_topics)
vec2 = matutils.sparse2full(model2[doc], model2.num_topics)
self.assertTrue(numpy.allclose(abs(vec1), abs(vec2), atol=1e-5)) # the two LSI representations must equal up to sign
def testPersistence(self):
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(testfile())
model2 = lsimodel.LsiModel.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(testfile(), sep_limit=0)
model2 = lsimodel.LsiModel.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
# now load the external arrays via mmap
model2 = lsimodel.LsiModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestLsiModel
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
numpy.random.seed(13) # HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.70710677, 0.70710677])
self.assertTrue(numpy.allclose(vec, expected)) # transformed entries must be equal up to sign
def testPersistence(self):
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(testfile())
model2 = rpmodel.RpModel.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestRpModel
class TestLdaModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamodel.LdaModel(id2word=dictionary, num_topics=2, passes=100)
model.update(corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testTopicSeeding(self):
passed = False
for topic in range(2):
# try seeding it both ways round, check you get the same
# topics out but with which way round they are depending
# on the way round they're seeded
for i in range(5): # restart at most 5 times
eta = numpy.ones((2, len(dictionary))) * 0.5
system = dictionary.token2id[u'system']
trees = dictionary.token2id[u'trees']
# aggressively seed the word 'system', in one of the
# two topics, 10 times higher than the other words
eta[topic, system] *= 10
model = ldamodel.LdaModel(id2word=dictionary, num_topics=2, passes=200, eta=eta)
model.update(corpus)
topics = [dict((word, p) for p, word in model.show_topic(j)) for j in range(2)]
# check that the word system in the topic we seeded, got a high weight,
# and the word 'trees' (the main word in the other topic) a low weight --
# and vice versa for the other topic (which we didn't seed with 'system')
result = [[topics[topic].get(u'system',0), topics[topic].get(u'trees',0)],
[topics[1-topic].get(u'system',0), topics[1-topic].get(u'trees',0)]]
expected = [[0.385, 0.022],
[0.025, 0.157]]
passed = numpy.allclose(result, expected, atol=1e-2)
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, result, expected))
self.assertTrue(passed)
def testPersistence(self):
model = ldamodel.LdaModel(self.corpus, num_topics=2)
model.save(testfile())
model2 = ldamodel.LdaModel.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
model = ldamodel.LdaModel(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
model2 = ldamodel.LdaModel.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestLdaModel
class TestLdaMallet(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
def testTransform(self):
if not self.mallet_path:
return
passed = False
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamallet.LdaMallet(self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=200)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.49, 0.51]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testPersistence(self):
if not self.mallet_path:
return
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(testfile())
model2 = ldamallet.LdaMallet.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
if not self.mallet_path:
return
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
model2 = ldamodel.LdaModel.load(testfile())
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestLdaMallet
class TestTfidfModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(numpy.allclose(transformed, expected))
def testInit(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(corpus)
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dictionary.dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dictionary.dfs, len(corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def testPersistence(self):
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(testfile())
model2 = tfidfmodel.TfidfModel.load(testfile())
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestTfidfModel
class TestLogEntropyModel(unittest.TestCase):
def setUp(self):
self.corpus_small = mmcorpus.MmCorpus(datapath('test_corpus_small.mm'))
self.corpus_ok = mmcorpus.MmCorpus(datapath('test_corpus_ok.mm'))
def testTransform(self):
# create the transformation model
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=False)
# transform one document
doc = list(self.corpus_ok)[0]
transformed = model[doc]
expected = [(0, 0.3748900964125389),
(1, 0.30730215324230725),
(3, 1.20941755462856)]
self.assertTrue(numpy.allclose(transformed, expected))
def testPersistence(self):
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(testfile())
model2 = logentropy_model.LogEntropyModel.load(testfile())
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
#endclass TestLogEntropyModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| [
"gensim.matutils.sparse2full",
"gensim.models.rpmodel.RpModel",
"numpy.array",
"unittest.main",
"gensim.corpora.Dictionary",
"gensim.models.ldamodel.LdaModel",
"numpy.random.seed",
"numpy.allclose",
"gensim.matutils.corpus2dense",
"logging.warning",
"os.path.dirname",
"gensim.models.ldamallet.... | [((543, 568), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (558, 568), False, 'import os\n'), ((1103, 1120), 'gensim.corpora.Dictionary', 'Dictionary', (['texts'], {}), '(texts)\n', (1113, 1120), False, 'from gensim.corpora import mmcorpus, Dictionary\n'), ((660, 705), 'os.path.join', 'os.path.join', (['module_path', '"""test_data"""', 'fname'], {}), "(module_path, 'test_data', fname)\n", (672, 705), False, 'import os\n'), ((17619, 17715), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.DEBUG)\n", (17638, 17715), False, 'import logging\n'), ((17716, 17731), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17729, 17731), False, 'import unittest\n'), ((1266, 1287), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1285, 1287), False, 'import tempfile\n'), ((1574, 1618), 'gensim.models.lsimodel.LsiModel', 'lsimodel.LsiModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (1591, 1618), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((2005, 2041), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', '(2)'], {}), '(transformed, 2)\n', (2025, 2041), False, 'from gensim import matutils\n'), ((2114, 2152), 'numpy.array', 'numpy.array', (['[-0.6594664, 0.142115444]'], {}), '([-0.6594664, 0.142115444])\n', (2125, 2152), False, 'import numpy\n'), ((2470, 2514), 'gensim.models.lsimodel.LsiModel', 'lsimodel.LsiModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (2487, 2514), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((2616, 2882), 'numpy.array', 'numpy.array', (['[[0.65946639, 0.14211544], [2.02454305, -0.42088759], [1.54655361, \n 0.32358921], [1.81114125, 0.5890525], [0.9336738, -0.27138939], [\n 0.01274618, -0.49016181], [0.04888203, -1.11294699], [0.08063836, -\n 1.56345594], [0.27381003, -1.34694159]]'], {}), '([[0.65946639, 0.14211544], [2.02454305, -0.42088759], [\n 1.54655361, 0.32358921], [1.81114125, 0.5890525], [0.9336738, -\n 0.27138939], [0.01274618, -0.49016181], [0.04888203, -1.11294699], [\n 0.08063836, -1.56345594], [0.27381003, -1.34694159]])\n', (2627, 2882), False, 'import numpy\n'), ((3281, 3327), 'gensim.models.lsimodel.LsiModel', 'lsimodel.LsiModel', ([], {'corpus': 'corpus', 'num_topics': '(5)'}), '(corpus=corpus, num_topics=5)\n', (3298, 3327), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((3373, 3441), 'gensim.models.lsimodel.LsiModel', 'lsimodel.LsiModel', ([], {'corpus': 'None', 'id2word': 'model2.id2word', 'num_topics': '(5)'}), '(corpus=None, id2word=model2.id2word, num_topics=5)\n', (3390, 3441), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((3699, 3750), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', 'model.num_topics'], {}), '(transformed, model.num_topics)\n', (3719, 3750), False, 'from gensim import matutils\n'), ((3823, 3869), 'numpy.array', 'numpy.array', (['[-1.73205078, 0.0, 0.0, 0.0, 0.0]'], {}), '([-1.73205078, 0.0, 0.0, 0.0, 0.0])\n', (3834, 3869), False, 'import numpy\n'), ((4284, 4335), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', 'model.num_topics'], {}), '(transformed, model.num_topics)\n', (4304, 4335), False, 'from gensim import matutils\n'), ((4408, 4484), 'numpy.array', 'numpy.array', (['[-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]'], {}), '([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269])\n', (4419, 4484), False, 'import numpy\n'), ((4833, 4883), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['model[doc]', 'model.num_topics'], {}), '(model[doc], model.num_topics)\n', (4853, 4883), False, 'from gensim import matutils\n'), ((4899, 4951), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['model2[doc]', 'model2.num_topics'], {}), '(model2[doc], model2.num_topics)\n', (4919, 4951), False, 'from gensim import matutils\n'), ((5126, 5170), 'gensim.models.lsimodel.LsiModel', 'lsimodel.LsiModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (5143, 5170), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((5648, 5692), 'gensim.models.lsimodel.LsiModel', 'lsimodel.LsiModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (5665, 5692), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((6892, 6913), 'numpy.random.seed', 'numpy.random.seed', (['(13)'], {}), '(13)\n', (6909, 6913), False, 'import numpy\n'), ((7041, 7083), 'gensim.models.rpmodel.RpModel', 'rpmodel.RpModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (7056, 7083), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((7200, 7236), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', '(2)'], {}), '(transformed, 2)\n', (7220, 7236), False, 'from gensim import matutils\n'), ((7310, 7348), 'numpy.array', 'numpy.array', (['[-0.70710677, 0.70710677]'], {}), '([-0.70710677, 0.70710677])\n', (7321, 7348), False, 'import numpy\n'), ((7500, 7542), 'gensim.models.rpmodel.RpModel', 'rpmodel.RpModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (7515, 7542), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((10909, 10953), 'gensim.models.ldamodel.LdaModel', 'ldamodel.LdaModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (10926, 10953), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((11348, 11392), 'gensim.models.ldamodel.LdaModel', 'ldamodel.LdaModel', (['self.corpus'], {'num_topics': '(2)'}), '(self.corpus, num_topics=2)\n', (11365, 11392), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((12366, 12401), 'os.environ.get', 'os.environ.get', (['"""MALLET_HOME"""', 'None'], {}), "('MALLET_HOME', None)\n", (12380, 12401), False, 'import os\n'), ((13532, 13617), 'gensim.models.ldamallet.LdaMallet', 'ldamallet.LdaMallet', (['self.mallet_path', 'self.corpus'], {'num_topics': '(2)', 'iterations': '(100)'}), '(self.mallet_path, self.corpus, num_topics=2, iterations=100\n )\n', (13551, 13617), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((14059, 14144), 'gensim.models.ldamallet.LdaMallet', 'ldamallet.LdaMallet', (['self.mallet_path', 'self.corpus'], {'num_topics': '(2)', 'iterations': '(100)'}), '(self.mallet_path, self.corpus, num_topics=2, iterations=100\n )\n', (14078, 14144), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((15177, 15227), 'gensim.models.tfidfmodel.TfidfModel', 'tfidfmodel.TfidfModel', (['self.corpus'], {'normalize': '(True)'}), '(self.corpus, normalize=True)\n', (15198, 15227), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((15636, 15665), 'gensim.models.tfidfmodel.TfidfModel', 'tfidfmodel.TfidfModel', (['corpus'], {}), '(corpus)\n', (15657, 15665), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((16037, 16081), 'gensim.models.tfidfmodel.TfidfModel', 'tfidfmodel.TfidfModel', ([], {'dictionary': 'dictionary'}), '(dictionary=dictionary)\n', (16058, 16081), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((16182, 16232), 'gensim.models.tfidfmodel.TfidfModel', 'tfidfmodel.TfidfModel', (['self.corpus'], {'normalize': '(True)'}), '(self.corpus, normalize=True)\n', (16203, 16232), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((16832, 16897), 'gensim.models.logentropy_model.LogEntropyModel', 'logentropy_model.LogEntropyModel', (['self.corpus_ok'], {'normalize': '(False)'}), '(self.corpus_ok, normalize=False)\n', (16864, 16897), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((17250, 17314), 'gensim.models.logentropy_model.LogEntropyModel', 'logentropy_model.LogEntropyModel', (['self.corpus_ok'], {'normalize': '(True)'}), '(self.corpus_ok, normalize=True)\n', (17282, 17314), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((1713, 1770), 'gensim.matutils.corpus2dense', 'matutils.corpus2dense', (['self.corpus', 'self.corpus.num_terms'], {}), '(self.corpus, self.corpus.num_terms)\n', (1734, 1770), False, 'from gensim import matutils\n'), ((1817, 1858), 'numpy.allclose', 'numpy.allclose', (['s[:2]', 'model.projection.s'], {}), '(s[:2], model.projection.s)\n', (1831, 1858), False, 'import numpy\n'), ((5340, 5395), 'numpy.allclose', 'numpy.allclose', (['model.projection.u', 'model2.projection.u'], {}), '(model.projection.u, model2.projection.u)\n', (5354, 5395), False, 'import numpy\n'), ((5421, 5476), 'numpy.allclose', 'numpy.allclose', (['model.projection.s', 'model2.projection.s'], {}), '(model.projection.s, model2.projection.s)\n', (5435, 5476), False, 'import numpy\n'), ((5522, 5567), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (5536, 5567), False, 'import numpy\n'), ((5940, 5995), 'numpy.allclose', 'numpy.allclose', (['model.projection.u', 'model2.projection.u'], {}), '(model.projection.u, model2.projection.u)\n', (5954, 5995), False, 'import numpy\n'), ((6021, 6076), 'numpy.allclose', 'numpy.allclose', (['model.projection.s', 'model2.projection.s'], {}), '(model.projection.s, model2.projection.s)\n', (6035, 6076), False, 'import numpy\n'), ((6122, 6167), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (6136, 6167), False, 'import numpy\n'), ((6399, 6454), 'numpy.allclose', 'numpy.allclose', (['model.projection.u', 'model2.projection.u'], {}), '(model.projection.u, model2.projection.u)\n', (6413, 6454), False, 'import numpy\n'), ((6480, 6535), 'numpy.allclose', 'numpy.allclose', (['model.projection.s', 'model2.projection.s'], {}), '(model.projection.s, model2.projection.s)\n', (6494, 6535), False, 'import numpy\n'), ((6581, 6626), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (6595, 6626), False, 'import numpy\n'), ((7373, 7402), 'numpy.allclose', 'numpy.allclose', (['vec', 'expected'], {}), '(vec, expected)\n', (7387, 7402), False, 'import numpy\n'), ((7710, 7761), 'numpy.allclose', 'numpy.allclose', (['model.projection', 'model2.projection'], {}), '(model.projection, model2.projection)\n', (7724, 7761), False, 'import numpy\n'), ((7807, 7852), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (7821, 7852), False, 'import numpy\n'), ((8389, 8452), 'gensim.models.ldamodel.LdaModel', 'ldamodel.LdaModel', ([], {'id2word': 'dictionary', 'num_topics': '(2)', 'passes': '(100)'}), '(id2word=dictionary, num_topics=2, passes=100)\n', (8406, 8452), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((8614, 8650), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', '(2)'], {}), '(transformed, 2)\n', (8634, 8650), False, 'from gensim import matutils\n'), ((11123, 11176), 'numpy.allclose', 'numpy.allclose', (['model.expElogbeta', 'model2.expElogbeta'], {}), '(model.expElogbeta, model2.expElogbeta)\n', (11137, 11176), False, 'import numpy\n'), ((11222, 11267), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (11236, 11267), False, 'import numpy\n'), ((11628, 11681), 'numpy.allclose', 'numpy.allclose', (['model.expElogbeta', 'model2.expElogbeta'], {}), '(model.expElogbeta, model2.expElogbeta)\n', (11642, 11681), False, 'import numpy\n'), ((11727, 11772), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (11741, 11772), False, 'import numpy\n'), ((12012, 12065), 'numpy.allclose', 'numpy.allclose', (['model.expElogbeta', 'model2.expElogbeta'], {}), '(model.expElogbeta, model2.expElogbeta)\n', (12026, 12065), False, 'import numpy\n'), ((12111, 12156), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (12125, 12156), False, 'import numpy\n'), ((12429, 12471), 'os.path.join', 'os.path.join', (['mallet_home', '"""bin"""', '"""mallet"""'], {}), "(mallet_home, 'bin', 'mallet')\n", (12441, 12471), False, 'import os\n'), ((12721, 12820), 'gensim.models.ldamallet.LdaMallet', 'ldamallet.LdaMallet', (['self.mallet_path', 'corpus'], {'id2word': 'dictionary', 'num_topics': '(2)', 'iterations': '(200)'}), '(self.mallet_path, corpus, id2word=dictionary,\n num_topics=2, iterations=200)\n', (12740, 12820), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((12945, 12981), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['transformed', '(2)'], {}), '(transformed, 2)\n', (12965, 12981), False, 'from gensim import matutils\n'), ((13784, 13835), 'numpy.allclose', 'numpy.allclose', (['model.wordtopics', 'model2.wordtopics'], {}), '(model.wordtopics, model2.wordtopics)\n', (13798, 13835), False, 'import numpy\n'), ((13881, 13926), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (13895, 13926), False, 'import numpy\n'), ((14375, 14426), 'numpy.allclose', 'numpy.allclose', (['model.wordtopics', 'model2.wordtopics'], {}), '(model.wordtopics, model2.wordtopics)\n', (14389, 14426), False, 'import numpy\n'), ((14472, 14517), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (14486, 14517), False, 'import numpy\n'), ((14757, 14808), 'numpy.allclose', 'numpy.allclose', (['model.wordtopics', 'model2.wordtopics'], {}), '(model.wordtopics, model2.wordtopics)\n', (14771, 14808), False, 'import numpy\n'), ((14854, 14899), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (14868, 14899), False, 'import numpy\n'), ((15454, 15491), 'numpy.allclose', 'numpy.allclose', (['transformed', 'expected'], {}), '(transformed, expected)\n', (15468, 15491), False, 'import numpy\n'), ((16415, 16460), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (16429, 16460), False, 'import numpy\n'), ((17162, 17199), 'numpy.allclose', 'numpy.allclose', (['transformed', 'expected'], {}), '(transformed, expected)\n', (17176, 17199), False, 'import numpy\n'), ((17508, 17553), 'numpy.allclose', 'numpy.allclose', (['model[tstvec]', 'model2[tstvec]'], {}), '(model[tstvec], model2[tstvec])\n', (17522, 17553), False, 'import numpy\n'), ((2542, 2570), 'gensim.matutils.sparse2full', 'matutils.sparse2full', (['doc', '(2)'], {}), '(doc, 2)\n', (2562, 2570), False, 'from gensim import matutils\n'), ((9803, 9875), 'gensim.models.ldamodel.LdaModel', 'ldamodel.LdaModel', ([], {'id2word': 'dictionary', 'num_topics': '(2)', 'passes': '(200)', 'eta': 'eta'}), '(id2word=dictionary, num_topics=2, passes=200, eta=eta)\n', (9820, 9875), False, 'from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamallet\n'), ((10578, 10621), 'numpy.allclose', 'numpy.allclose', (['result', 'expected'], {'atol': '(0.01)'}), '(result, expected, atol=0.01)\n', (10592, 10621), False, 'import numpy\n'), ((10691, 10801), 'logging.warning', 'logging.warning', (["('LDA failed to converge on attempt %i (got %s, expected %s)' % (i, result,\n expected))"], {}), "(\n 'LDA failed to converge on attempt %i (got %s, expected %s)' % (i,\n result, expected))\n", (10706, 10801), False, 'import logging\n')] |
import argparse
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.datasets import cifar10
import numpy as np
import os
parser = argparse.ArgumentParser(description='DeepJudge Seed Selection Process')
parser.add_argument('--model', required=True, type=str, help='victim model path')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset for the seed selection')
parser.add_argument('--num', default=1000, type=int, help='number of selected seeds')
parser.add_argument('--order', default='max', type=str, help='largest certainties or least. choice: max/min')
parser.add_argument('--output', default='./seeds', type=str, help='seeds saved dir')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def seedSelection(model, x, y, num=1000, order='max'):
true_idx = np.where(np.argmax(model(x), axis=1) == np.argmax(y, axis=1))[0]
x, y = x[true_idx], y[true_idx]
ginis = np.sum(np.square(model(x).numpy()), axis=1)
if order == 'max':
ranks = np.argsort(-ginis)
else:
ranks = np.argsort(ginis)
return x[ranks[:num]], y[ranks[:num]]
if __name__ == '__main__':
opt = parser.parse_args()
if opt.dataset == 'cifar10':
cifar10 = tf.keras.datasets.cifar10
(training_images, training_labels), (test_images, test_labels) = cifar10.load_data()
elif opt.dataset == 'mnist':
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
test_images = test_images.reshape(10000, 28, 28, 1)
else:
raise NotImplementedError()
# select seeds from the testing dataset
x_test = test_images / 255.0
y_test = tf.keras.utils.to_categorical(test_labels, 10)
victim_model = load_model(opt.model)
seeds_x, seeds_y = seedSelection(victim_model, x_test, y_test, num=opt.num, order=opt.order)
log_dir = opt.output
if not os.path.exists(log_dir):
os.makedirs(log_dir)
save_path = f"{log_dir}/{opt.dataset}_{opt.order}_{opt.num}seeds.npz"
np.savez(save_path, seeds_x=seeds_x, seeds_y=seeds_y)
print('Selected seeds saved at ' + save_path)
| [
"os.path.exists",
"numpy.savez",
"tensorflow.keras.utils.to_categorical",
"argparse.ArgumentParser",
"os.makedirs",
"tensorflow.config.experimental.set_memory_growth",
"numpy.argmax",
"numpy.argsort",
"tensorflow.keras.datasets.cifar10.load_data",
"tensorflow.keras.models.load_model",
"tensorflo... | [((173, 244), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""DeepJudge Seed Selection Process"""'}), "(description='DeepJudge Seed Selection Process')\n", (196, 244), False, 'import argparse\n'), ((758, 809), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (802, 809), True, 'import tensorflow as tf\n'), ((1937, 1983), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['test_labels', '(10)'], {}), '(test_labels, 10)\n', (1966, 1983), True, 'import tensorflow as tf\n'), ((2004, 2025), 'tensorflow.keras.models.load_model', 'load_model', (['opt.model'], {}), '(opt.model)\n', (2014, 2025), False, 'from tensorflow.keras.models import load_model\n'), ((2305, 2358), 'numpy.savez', 'np.savez', (['save_path'], {'seeds_x': 'seeds_x', 'seeds_y': 'seeds_y'}), '(save_path, seeds_x=seeds_x, seeds_y=seeds_y)\n', (2313, 2358), True, 'import numpy as np\n'), ((1232, 1250), 'numpy.argsort', 'np.argsort', (['(-ginis)'], {}), '(-ginis)\n', (1242, 1250), True, 'import numpy as np\n'), ((1277, 1294), 'numpy.argsort', 'np.argsort', (['ginis'], {}), '(ginis)\n', (1287, 1294), True, 'import numpy as np\n'), ((1552, 1571), 'tensorflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (1569, 1571), False, 'from tensorflow.keras.datasets import cifar10\n'), ((2164, 2187), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (2178, 2187), False, 'import os\n'), ((2197, 2217), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2208, 2217), False, 'import os\n'), ((865, 916), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (905, 916), True, 'import tensorflow as tf\n'), ((1076, 1096), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1085, 1096), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import logging
import math
import cmath
import os
from functools import reduce
from six import string_types
import numpy as np
# Ditto imports
from ditto.readers.abstract_reader import AbstractReader
from ditto.store import Store
from ditto.models.position import Position
from ditto.models.node import Node
from ditto.models.line import Line
from ditto.models.load import Load
from ditto.models.phase_load import PhaseLoad
from ditto.models.regulator import Regulator
from ditto.models.wire import Wire
from ditto.models.capacitor import Capacitor
from ditto.models.phase_capacitor import PhaseCapacitor
from ditto.models.powertransformer import PowerTransformer
from ditto.models.power_source import PowerSource
from ditto.models.winding import Winding
from ditto.models.phase_winding import PhaseWinding
from ditto.models.feeder_metadata import Feeder_metadata
from ditto.models.photovoltaic import Photovoltaic
from ditto.models.storage import Storage
from ditto.models.phase_storage import PhaseStorage
from ditto.models.base import Unicode
from ditto.modify.system_structure import system_structure_modifier
logger = logging.getLogger(__name__)
class Reader(AbstractReader):
"""
CYME-->DiTTo Reader class
Author: <NAME>. October 2017
.. note::
Different versions of CYME might have different header names for the same object.
The reader class has a mapping between the objects and the header names with the default mapping being for CYME version XXX (see table below).
When using another version of CYME, make sure to modify this mapping to have something consistent:
>>> my_reader.update_header_mapping(modifications)
Here, modification is a dictionary {object: header} of updates to apply to the default mapping.
**Default header mapping:**
+-------------------------------------------+--------------------------------------------+
| Object | Header |
+===========================================+============================================+
| NODE PARSER |
+-------------------------------------------+--------------------------------------------+
| 'node' | '[NODE]' |
| 'node_connector' | '[NODE CONNECTOR]' |
+-------------------------------------------+--------------------------------------------+
| LINE PARSER |
+-------------------------------------------+--------------------------------------------+
| 'overhead_unbalanced_line_settings' | '[OVERHEADLINEUNBALANCED SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'overhead_line_settings' | '[OVERHEADLINE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'overhead_byphase_settings' | '[OVERHEAD BYPHASE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'underground_line_settings' | '[UNDERGROUNDLINE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'switch_settings' | '[SWITCH SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'fuse_settings' | '[FUSE SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'recloser_settings' | '[RECLOSER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'section' | '[SECTION]' |
+-------------------------------------------+--------------------------------------------+
| 'line' | '[LINE]' |
+-------------------------------------------+--------------------------------------------+
| 'unbalanced_line' | '[LINE UNBALANCED]' |
+-------------------------------------------+--------------------------------------------+
| 'spacing_table' | '[SPACING TABLE FOR LINE]' |
+-------------------------------------------+--------------------------------------------+
| 'concentric_neutral_cable' | '[CONCENTRIC NEUTRAL CABLE]' |
+-------------------------------------------+--------------------------------------------+
| 'conductor' | '[CONDUCTOR]' |
+-------------------------------------------+--------------------------------------------+
| CAPACITOR PARSER |
+-------------------------------------------+--------------------------------------------+
| 'serie_capacitor_settings' | '[SERIE CAPACITOR SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'shunt_capacitor_settings' | '[SHUNT CAPACITOR SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'serie_capacitor' | '[SERIE CAPACITOR]' |
+-------------------------------------------+--------------------------------------------+
| 'shunt_capacitor' | '[SHUNT CAPACITOR]' |
+-------------------------------------------+--------------------------------------------+
| TRANSFORMER PARSER |
+-------------------------------------------+--------------------------------------------+
| 'auto_transformer_settings' | '[AUTO TRANSFORMER SETTING' |
+-------------------------------------------+--------------------------------------------+
| 'grounding_transformer_settings' | '[GROUNDINGTRANSFORMER SETTINGS]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_auto_transformer_settings' | '[THREE WINDING AUTO TRANSFORMER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_transformer_settings' | '[THREE WINDING TRANSFORMER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'transformer_settings' | '[TRANSFORMER SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'auto_transformer' | '[AUTO TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'grounding_transformer' | '[GROUNDING TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_auto_transformer' | '[THREE WINDING AUTO TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'three_winding_transformer' | '[THREE WINDING TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'transformer' | '[TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| 'phase_shifter_transformer' | '[PHASE SHIFTER TRANSFORMER]' |
+-------------------------------------------+--------------------------------------------+
| REGULATOR PARSER |
+-------------------------------------------+--------------------------------------------+
| 'regulator_settings' | '[REGULATOR SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'regulator' | '[REGULATOR]' |
+-------------------------------------------+--------------------------------------------+
| LOAD PARSER |
+-------------------------------------------+--------------------------------------------+
| 'customer_loads' | '[CUSTOMER LOADS]' |
+-------------------------------------------+--------------------------------------------+
| 'customer_class' | '[CUSTOMER CLASS]' |
+-------------------------------------------+--------------------------------------------+
| 'loads' | '[LOADS]' |
+-------------------------------------------+--------------------------------------------+
| DISTRIBUTED GENERATION PARSER |
+-------------------------------------------+--------------------------------------------+
| 'converter' | '[CONVERTER]' |
+-------------------------------------------+--------------------------------------------+
| 'converter_control_settings' | '[CONVERTER CONTROL SETTING]' |
+-------------------------------------------+--------------------------------------------+
| 'photovoltaic_settings' ' | [PHOTOVOLTAIC SETTINGS]' |
| | [ELECTRONIC CONVERTER GENERATOR SETTING] |
+-------------------------------------------+--------------------------------------------+
| 'long_term_dynamics_curve_ext' | '[LONG TERM DYNAMICS CURVE EXT]' |
+-------------------------------------------+--------------------------------------------+
| 'dggenerationmodel' | '[DGGENERATIONMODEL]' |
+-------------------------------------------+--------------------------------------------+
| 'bess_settings' | '[BESS SETTINGS]' |
+-------------------------------------------+--------------------------------------------+
| 'bess' | '[BESS]' |
+-------------------------------------------+--------------------------------------------+
| NETWORK EQUIVALENT PARSER |
+-------------------------------------------+--------------------------------------------+
| 'network_equivalent_setting' | '[NETWORK EQUIVALENT SETTING]' |
+-------------------------------------------+--------------------------------------------+
"""
register_names = ["cyme", "Cyme", "CYME"]
def __init__(self, **kwargs):
"""
CYME-->DiTTo class constructor
"""
# Call super
super(Reader, self).__init__(**kwargs)
# Setting the file names and path
#
# Set the path to the CYME data files
if "data_folder_path" in kwargs:
self.data_folder_path = kwargs["data_folder_path"]
# Default is current directory
else:
self.data_folder_path = "."
# Set the name of the network file
if "network_filename" in kwargs:
self.network_filename = kwargs["network_filename"]
else:
self.network_filename = "network.txt"
# Set the name of the equipment file
if "equipment_filename" in kwargs:
self.equipment_filename = kwargs["equipment_filename"]
else:
self.equipment_filename = "equipment.txt"
# Set the name of the load file
if "load_filename" in kwargs:
self.load_filename = kwargs["load_filename"]
else:
self.load_filename = "load.txt"
# Set the Network Type to be None. This is set in the parse_sections() function
self.network_type = None
# dictionary of sections to components. Used for identifying elements which are on the same section (which may cause parallel elements to be created)
self.section_duplicates = {}
# Header_mapping.
#
# Modify this structure if the headers of your CYME version are not the default one.
# Modification done by the 'update_header_mapping' method
#
self.header_mapping = { # NODES
"node": ["[NODE]"],
"node_connector": ["[NODE CONNECTOR]"],
# LINES
"overhead_unbalanced_line_settings": ["[OVERHEADLINEUNBALANCED SETTING]"],
"overhead_line_settings": ["[OVERHEADLINE SETTING]"],
"overhead_byphase_settings": ["[OVERHEAD BYPHASE SETTING]"],
"underground_line_settings": ["[UNDERGROUNDLINE SETTING]"],
"switch": ["[SWITCH]"],
"switch_settings": ["[SWITCH SETTING]"],
"sectionalizer": ["[SECTIONALIZER]"],
"sectionalizer_settings": ["[SECTIONALIZER SETTING]"],
"fuse": ["[FUSE]"],
"fuse_settings": ["[FUSE SETTING]"],
"recloser": ["[RECLOSER]"],
"recloser_settings": ["[RECLOSER SETTING]"],
"breaker": ["[BREAKER]"],
"breaker_settings": ["[BREAKER SETTING]"],
"section": ["[SECTION]"],
"line": ["[LINE]"],
"unbalanced_line": ["[LINE UNBALANCED]"],
"spacing_table": ["[SPACING TABLE FOR LINE]"],
"conductor": ["[CONDUCTOR]"],
"cable": ["[CABLE]"],
"concentric_neutral_cable": [
"[CABLE CONCENTRIC NEUTRAL]",
"[CONCENTRIC NEUTRAL CABLE]",
],
"network_protector": ["[NETWORKPROTECTOR]"],
"network_protector_settings": ["[NETWORKPROTECTOR SETTING]"],
# CAPACITORS
"serie_capacitor_settings": ["[SERIES CAPACITOR SETTING]"],
"shunt_capacitor_settings": ["[SHUNT CAPACITOR SETTING]"],
"serie_capacitor": ["[SERIES CAPACITOR]"],
"shunt_capacitor": ["[SHUNT CAPACITOR]"],
# TRANSFORMERS
"auto_transformer_settings": ["[AUTO TRANSFORMER SETTING]"],
"grounding_transformer_settings": ["[GROUNDINGTRANSFORMER SETTINGS]"],
"three_winding_auto_transformer_settings": [
"[THREE WINDING AUTO TRANSFORMER SETTING]"
],
"three_winding_transformer_settings": [
"[THREE WINDING TRANSFORMER SETTING]"
],
"transformer_settings": ["[TRANSFORMER SETTING]"],
"phase_shifter_transformer_settings": [
"[PHASE SHIFTER TRANSFORMER SETTING]"
],
"auto_transformer": ["[AUTO TRANSFORMER]"],
"grounding_transformer": ["[GROUNDING TRANSFORMER]"],
"three_winding_auto_transformer": ["[THREE WINDING AUTO TRANSFORMER]"],
"three_winding_transformer": ["[THREE WINDING TRANSFORMER]"],
"transformer": ["[TRANSFORMER]"],
"phase_shifter_transformer": ["[PHASE SHIFTER TRANSFORMER]"],
# REGULATORS
"regulator_settings": ["[REGULATOR SETTING]"],
"regulator": ["[REGULATOR]"],
# LOADS
"customer_loads": ["[CUSTOMER LOADS]"],
"customer_class": ["[CUSTOMER CLASS]"],
"loads": ["[LOADS]"],
"source": ["[SOURCE]"],
"headnodes": ["[HEADNODES]"],
"source_equivalent": ["[SOURCE EQUIVALENT]"],
# DISTRIBUTED GENERATION
"converter": ["[CONVERTER]"],
"converter_control_settings": ["[CONVERTER CONTROL SETTING]"],
"photovoltaic_settings": [
"[PHOTOVOLTAIC SETTINGS]",
"[ELECTRONIC CONVERTER GENERATOR SETTING]",
],
"long_term_dynamics_curve_ext": ["[LONG TERM DYNAMICS CURVE EXT]"],
"dggenerationmodel": ["[DGGENERATIONMODEL]"],
"bess_settings": ["[BESS SETTINGS]"],
"bess": ["[BESS]"],
# SUBSTATIONS
"substation": ["[SUBSTATION]"],
"subnetwork_connections": ["[SUBNETWORK CONNECTIONS]"],
# NETWORK EQUIVALENT
"network_equivalent_setting": ["[NETWORK EQUIVALENT SETTING]"],
}
def update_header_mapping(self, update):
"""
This method changes the default object<->header mapping.
This can be useful when using a different version of CYME for example.
**Usage:**
>>> my_reader.update_header_mapping(modifications)
:param update: New object<->header mapping
:type update: dict
"""
# Check that the update is a Python dict
if not isinstance(update, dict):
raise ValueError(
"update_header_mapping expects a dictionary. A {type} instance was provided".format(
type(update)
)
)
# Instanciate new header mapping
new_mapping = {k: [] for k in self.header_mapping.keys()}
# Loop over the default header mapping and update as requested
for key, value in self.header_mapping.items():
if key in update and update[key] not in value:
new_mapping[key].append(update[key])
else:
new_mapping[key].append(value)
# Basic safety check
if len(new_mapping) != len(self.header_mapping):
raise ValueError("Error in the update header mapping process.")
# Replace the old mapping by the new one
self.header_mapping = new_mapping
def get_file_content(self, filename):
"""
Open the requested file and returns the content.
For convinience, filename can be either the full file path or:
-'network': Will get the content of the network file given in the constructor
-'equipment': Will get the content of the equipment file given in the constructor
-'load': Will get the content of the load file given in the constructor
"""
# Shortcut mapping
if filename == "network":
filename = os.path.join(self.data_folder_path, self.network_filename)
elif filename == "equipment":
filename = os.path.join(self.data_folder_path, self.equipment_filename)
elif filename == "load":
filename = os.path.join(self.data_folder_path, self.load_filename)
# Open the file and get the content
try:
with open(filename, "r") as f:
content_ = f.readlines()
except:
logger.warning("Unable to open file {name}".format(name=filename))
content_ = []
pass
self.content = iter(content_)
def phase_mapping(self, CYME_value):
"""
Maps the CYME phase value format to a list of ABC phases:
+------------+--------------+
| CYME value | Return value |
+============+==============+
| 0 | [None] |
+------------+--------------+
| 1 | ['A'] |
+------------+--------------+
| 2 | ['B'] |
+------------+--------------+
| 3 | ['C'] |
+------------+--------------+
| 4 | ['A','B'] |
+------------+--------------+
| 5 | ['A','C'] |
+------------+--------------+
| 6 | ['B','C'] |
+------------+--------------+
| 7 | ['A','B','C']|
+------------+--------------+
.. note::
If the value provided is not an integer in [0,7], the function assumes that it receives a string like 'ABC'. In this case, it splits the string in a list of phases ['A','B','C'].
"""
if CYME_value == 0:
return [None]
elif CYME_value == 1:
return ["A"]
elif CYME_value == 2:
return ["B"]
elif CYME_value == 3:
return ["C"]
elif CYME_value == 4:
return ["A", "B"]
elif CYME_value == 5:
return ["A", "C"]
elif CYME_value == 6:
return ["B", "C"]
elif CYME_value == 7:
return ["A", "B", "C"]
else:
return list(CYME_value)
def phase_to_num(self, phase):
"""
Maps phase in 'A', 'B', 'C' format in 1, 2, 3 format.
**Mapping:**
+--------+-------+
| letter | digit |
+========+=======+
| 'A' | 1 |
+--------+-------+
| 'B' | 2 |
+--------+-------+
| 'C' | 3 |
+--------+-------+
"""
if phase == "A" or phase == "a":
return "1"
elif phase == "B" or phase == "b":
return "2"
elif phase == "C" or phase == "c":
return "3"
else:
return phase
def load_value_type_mapping(self, load_type, value1, value2):
"""
CYME customer loads provide two values v1 and v2 as well as a load value type:
This function takes these as inputs and outputs P and Q of the load.
:param load_type: CYME load type
:type load_type: int or str (see table below)
:param value1: Value 1
:type value1: float
:param value2: Value 2
:type value2: float
:returns: P and Q
:rtype: KW and KVAR
**Mapping:**
+-----------+------------+-----------------+------------------------------------------+
| type code | type value | P | Q |
+===========+============+=================+==========================================+
| 0 | KW_KVAR | :math:`v_1` | :math:`v_2` |
+-----------+------------+-----------------+------------------------------------------+
| 1 | KVA_PF | :math:`v_1 v_2` | :math:`v_1 \\sqrt{1-v_2^2}` |
+-----------+------------+-----------------+------------------------------------------+
| 2 | KW_PF | :math:`v_1` | :math:`\\frac{v_1}{v_2} \\sqrt{1-v_2^2}` |
+-----------+------------+-----------------+------------------------------------------+
| 3 | AMP_PF | ?? | ?? |
+-----------+------------+-----------------+------------------------------------------+
"""
if not isinstance(value1, float):
try:
value1 = float(value1)
except:
raise ValueError(
"Value1={} could not be converted to float in load_value_type_mapping.".format(
value1
)
)
if not isinstance(value2, float):
try:
value2 = float(value2)
except:
raise ValueError(
"Value2={} could not be converted to float in load_value_type_mapping.".format(
value2
)
)
if isinstance(load_type, string_types):
if load_type == "0" or load_type.lower() == "kw_kvar":
return value1, value2
if load_type == "1" or load_type.lower() == "kva_pf":
return value1 * value2, value1 * np.sqrt(1 - value2 ** 2)
if load_type == "2" or load_type.lower() == "kw_pf":
return value1, value1 / value2 * np.sqrt(1 - value2 ** 2)
if load_type == "3" or load_type.lower() == "amp_pf":
raise NotImplementedError("AMP_PF load type not implemented yet.")
elif isinstance(load_type, int):
if load_type == 0:
return value1, value2
if load_type == 1:
return value1 * value2, value1 * np.sqrt(1 - value2 ** 2)
if load_type == 2:
return value1, value1 / value2 * np.sqrt(1 - value2 ** 2)
if load_type == 3:
raise NotImplementedError("AMP_PF load type not implemented yet.")
else:
raise ValueError(
"load_value_type_mapping expects a string or integer for load_type. {} was provided.".format(
type(load_type)
)
)
def capacitors_connection_mapping(self, conn):
"""
Maps the capacitors connection in CYME (CAP_CONN) to DiTTo connection_type.
:param conn: Connection in CYME
:type conn: integer or string
:returns: Connection in DiTTo
:rtype: str
**Mapping:**
+---------------+-----------------------+
| CYME CAP_CONN | DiTTo connection_type |
+===============+=======================+
| 0 or 'Y' | 'Y' |
+---------------+-----------------------+
| 1 or 'YNG' | 'Y' |
+---------------+-----------------------+
| 2 or 'D' | 'D' |
+---------------+-----------------------+
"""
if not isinstance(conn, (string_types, int)):
raise ValueError(
"capacitors_connection_mapping only accepts int or string. {} was provided.".format(
type(conn)
)
)
if conn == 0 or conn == "0" or conn == "Y":
return "Y"
elif conn == 1 or conn == "1" or conn == "YNG":
return "Y"
elif conn == 2 or conn == "2" or conn == "D":
return "D"
else:
return conn
def connection_configuration_mapping(self, value):
"""
Map the connection configuration from CYME to DiTTo.
**Mapping:**
+----------+----------------+------------+
| Value | CYME | DiTTo |
+==========+================+============+
| 0 or '0' | 'Yg' | 'Y' |
+----------+----------------+------------+
| 1 or '1' | 'Y' | 'Y' |
+----------+----------------+------------+
| 2 or '2' | 'Delta' | 'D' |
+----------+----------------+------------+
| 3 or '3' | 'Open Delta' | 'D' |
+----------+----------------+------------+
| 4 or '4' | 'Closed Delta' | 'D' |
+----------+----------------+------------+
| 5 or '5' | 'Zg' | 'Z' |
+----------+----------------+------------+
| 6 or '6' | 'CT' | NOT MAPPED |
+----------+----------------+------------+
| 7 or '7' | 'Dg' | NOT MAPPED |
+----------+----------------+------------+
"""
if isinstance(value, int):
if value in [0, 1]:
return "Y"
if value in [2, 3, 4]:
return "D"
if value == 5:
return "Z"
if value in [6, 7]:
raise NotImplementedError(
"Connection {} not implemented.".format(value)
)
elif isinstance(value, string_types):
if (
value == "0"
or value.lower() == "yg"
or value == "1"
or value.lower() == "y"
):
return "Y"
if (
value == "2"
or value.lower() == "delta"
or value == "3"
or value.lower() == "open delta"
or value == "4"
or value.lower() == "closed delta"
):
return "D"
if value == "5" or value.lower() == "zg":
return "Z"
if (
value == "6"
or value.lower() == "ct"
or value == "7"
or value.lower() == "dg"
):
raise NotImplementedError(
"Connection {} not implemented.".format(value)
)
else:
raise ValueError(
"connection_configuration_mapping expects an integer or a string. {} was provided.".format(
type(value)
)
)
def transformer_connection_configuration_mapping(self, value, winding):
"""
Map the connection configuration for transformer (2 windings) objects from CYME to DiTTo.
:param value: CYME value (either string or id)
:type value: int or str
:param winding: Number of the winding (0 or 1)
:type winding: int
:returns: DiTTo connection configuration for the requested winding
:rtype: str
**Mapping:**
+----------+----------------+------------+
| Value | CYME | DiTTo |
+----------+----------------+-----+------+
| | | 1st | 2nd |
+==========+================+=====+======+
| 0 or '0' | 'Y_Y' | 'Y' | 'Y' |
+----------+----------------+-----+------+
| 1 or '1' | 'D_Y' | 'D' | 'Y' |
+----------+----------------+-----+------+
| 2 or '2' | 'Y_D' | 'Y' | 'D' |
+----------+----------------+-----+------+
| 3 or '3' | 'YNG_YNG' | 'Y' | 'Y' |
+----------+----------------+-----+------+
| 4 or '4' | 'D_D' | 'D' | 'D' |
+----------+----------------+-----+------+
| 5 or '5' | 'DO_DO' | 'D' | 'D' |
+----------+----------------+-----+------+
| 6 or '6' | 'YO_DO' | 'Y' | 'D' |
+----------+----------------+-----+------+
| 7 or '7' | 'D_YNG' | 'D' | 'Y' |
+----------+----------------+-----+------+
| 8 or '8' | 'YNG_D' | 'Y' | 'D' |
+----------+----------------+-----+------+
| 9 or '9' | 'Y_YNG' | 'Y' | 'Y' |
+----------+----------------+-----+------+
|10 or '10'| 'YNG_Y' | 'Y' | 'Y' |
+----------+----------------+-----+------+
|11 or '11'| 'Yg_Zg' | 'Y' | 'Z' |
+----------+----------------+-----+------+
|12 or '12'| 'D_Zg' | 'D' | 'Z' |
+----------+----------------+-----+------+
"""
if winding not in [0, 1]:
raise ValueError(
"transformer_connection_configuration_mapping expects an integer 0 or 1 for winding arg. {} was provided.".format(
winding
)
)
res = (None, None)
if isinstance(value, int):
if value == 0 or value == 3 or value == 9 or value == 10:
res = ("Y", "Y")
if value == 1 or value == 7:
res = ("D", "Y")
if value == 2 or value == 6 or value == 8:
res = ("Y", "D")
if value == 4 or value == 5:
res = ("D", "D")
if value == 11:
res = ("Y", "Z")
if value == 12:
res = ("D", "Z")
elif isinstance(value, string_types):
if value == "0" or value.lower() == "y_y":
res = ("Y", "Y")
if value == "1" or value.lower() == "d_y":
res = ("D", "Y")
if value == "2" or value.lower() == "y_d":
res = ("Y", "D")
if value == "3" or value.lower() == "yng_yng":
res = ("Y", "Y")
if value == "4" or value.lower() == "d_d":
res = ("D", "D")
if value == "5" or value.lower() == "do_do":
res = ("D", "D")
if value == "6" or value.lower() == "yo_do":
res = ("Y", "D")
if value == "7" or value.lower() == "d_yng":
res = ("D", "Y")
if value == "8" or value.lower() == "yng_d":
res = ("Y", "D")
if value == "9" or value.lower() == "y_yng":
res = ("Y", "Y")
if value == "10" or value.lower() == "yng_y":
res = ("Y", "Y")
if value == "11" or value.lower() == "yg_zg":
res = ("Y", "Z")
if value == "12" or value.lower() == "d_zg":
res = ("D", "Z")
else:
raise ValueError(
"transformer_connection_configuration_mapping expects an integer or a string. {} was provided.".format(
type(value)
)
)
return res[winding]
def check_object_in_line(self, line, obj):
"""
Check if the header corresponding to object is in the given line.
:param line: Text line from CYME ASCII file
:type line: str
:param obj: Object of interest that exists in the mapping
:type obj: str
:returns: True if the header is in line. False otherwise.
:rtype: bool
"""
# Safety checks
if not isinstance(line, string_types):
raise ValueError(
"check_object_in_line expects a string for both line and object. A {type} instance was provided for line.".format(
type=type(line)
)
)
if not isinstance(obj, string_types):
raise ValueError(
"check_object_in_line expects a string for both line and object. A {type} instance was provided for object.".format(
type=type(obj)
)
)
if not obj in self.header_mapping:
raise ValueError(
"{obj} is not a valid object name for the object<->header mapping.{mapp}".format(
obj=obj, mapp=self.header_mapping
)
)
return np.any([x in line for x in self.header_mapping[obj]])
def parser_helper(self, line, obj_list, attribute_list, mapping, *args, **kwargs):
"""
.. warning:: This is a helper function for the parsers. Do not use directly.
Takes as input the list of objects we want to parse as well as the list of attributes we want to extract.
Also takes the default positions of the attributes (mapping).
The function returns a list of dictionaries, where each dictionary contains the values of the desired attributes of a CYME object.
"""
if isinstance(attribute_list, list):
attribute_list = np.array(attribute_list)
if not isinstance(attribute_list, np.ndarray):
raise ValueError("Could not cast attribute list to Numpy array.")
if args and isinstance(args[0], dict):
additional_information = args[0]
else:
additional_information = {}
# This is in the case of multiple Format= lines
if (
kwargs and "additional_attributes_list" in kwargs
): # Currently assume only one set of additional attributes, but can be modified to allow for multiple attribute lists
additional_attributes = kwargs["additional_attributes_list"]
else:
additional_attributes = []
result = {}
# Check the presence of headers in the given line
checks = [self.check_object_in_line(line, obj) for obj in obj_list]
# If we have a least one
if any(checks):
# Get the next line
next_line = next(self.content)
# If the next line provides the format, then grab it
if "format" in next_line.lower():
try:
mapping = {}
arg_list = next_line.split("=")[1]
arg_list = arg_list.split(",")
# Put everything in lower case
arg_list = map(lambda x: x.lower().strip("\r\n"), arg_list)
arg_list = map(lambda x: x.strip("\n"), arg_list)
arg_list = map(lambda x: x.strip("\r"), arg_list)
# We want the attributes in the attribute list
for idx, arg in enumerate(arg_list):
temp = np.argwhere(arg == attribute_list).flatten()
if len(temp) == 1:
idx2 = temp[0]
mapping[attribute_list[idx2]] = idx
except:
pass
next_line = next(self.content)
# At this point, we should have the mapping for the parameters of interest
# while next_line[0] not in ['[','',' ','\n','\r\n']:
while len(next_line) > 2:
if "=" not in next_line.lower():
data = next_line.split(",")
ID = data[0].strip()
if len(data) > 1:
while ID in result:
ID += "*"
result[ID] = {}
for k in attribute_list:
try:
result[ID][k] = data[mapping[k]]
except:
pass
result[ID].update(additional_information)
elif additional_attributes is not None and additional_attributes != []:
try:
mapping = {}
arg_list = next_line.split("=")[1]
arg_list = arg_list.split(",")
# Put everything in lower case
arg_list = map(lambda x: x.lower().strip("\r\n"), arg_list)
arg_list = map(lambda x: x.strip("\n"), arg_list)
arg_list = map(lambda x: x.strip("\r"), arg_list)
if isinstance(additional_attributes, list):
additional_attributes = np.array(additional_attributes)
if not isinstance(additional_attributes, np.ndarray):
raise ValueError(
"Could not cast attribute list to Numpy array."
)
# We want the attributes in the attribute list
for idx, arg in enumerate(arg_list):
temp = np.argwhere(arg == additional_attributes).flatten()
if len(temp) == 1:
idx2 = temp[0]
mapping[additional_attributes[idx2]] = idx
attribute_list = additional_attributes
additional_attributes = []
except:
logger.warning(
"Attempted to apply additional attributes but failed"
)
pass
try:
next_line = next(self.content)
except StopIteration:
break
return result
def parse(self, model, **kwargs):
"""
Parse the CYME model to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set the verbose mode. Optional. Default=True
:type verbose: bool
"""
if "verbose" in kwargs and isinstance(kwargs["verbose"], bool):
self.verbose = kwargs["verbose"]
else:
self.verbose = False
if self.verbose:
logger.info("Parsing the header...")
self.parse_header()
logger.info("Parsing the sections...")
self.parse_sections(model)
logger.info("Parsing the sources...")
self.parse_sources(model)
# Call parse method of abtract reader
super(Reader, self).parse(model, **kwargs)
logger.info("Parsing the network equivalents...")
self.parse_network_equivalent(model)
# The variable self.network_type is set in the parse_sections() function.
# i.e. parse_sections
if self.network_type == "substation":
logger.info("Parsing the subnetwork connections...")
self.parse_subnetwork_connections(model)
else:
logger.info("Parsing the Headnodes...")
self.parse_head_nodes(model)
self.fix_section_overlaps(model)
model.set_names()
modifier = system_structure_modifier(model)
modifier.set_nominal_voltages_recur()
modifier.set_nominal_voltages_recur_line()
def parse_header(self):
"""
Parse the information available in the header.
Here, we are interested in the version of CYME used in the provided files, as well as the unit system used.
Since the reader was developed using the documentation for CYME v.8.0, give a warning if the version if different.
The user is then responsible to check the differences betweeen the two versions.
"""
cyme_version = None
self.use_SI = None
# Open any file. For example the network file
self.get_file_content("network")
for line in self.content:
if "cyme_version" in line.lower():
try:
cyme_version = line.split("=")[1].strip()
except:
pass
if cyme_version is not None:
logger.info("---| Cyme_version={v} |---".format(v=cyme_version))
if "." in cyme_version:
try:
a, b = cyme_version.split(".")
except:
pass
if a != 8 and b != 0:
logger.warning(
"Warning. The current CYME--->DiTTo reader was developed with documentation of CYME 8.0. Your version is {}. You might want to check the differences between the two.".format(
cyme_version
)
)
if "[si]" in line.lower():
self.use_SI = True
logger.debug("Unit system used = S.I")
if "[imperial]" in line.lower():
self.use_SI = False
logger.debug("Unit system used = Imperial")
self.cyme_version = cyme_version
if self.use_SI is None:
raise ValueError(
"Could not find [SI] or [IMPERIAL] unit system information. Unable to parse."
)
def parse_subnetwork_connections(self, model):
"""Parse the subnetwork connections.
These specify the interconnection points for a substation
"""
model.set_names()
self.get_file_content("network")
mapp_subnetwork_connections = {"nodeid": 1}
self.subnetwork_connections = {}
for line in self.content:
self.subnetwork_connections.update(
self.parser_helper(
line,
["subnetwork_connections"],
["nodeid"],
mapp_subnetwork_connections,
)
)
for key in self.subnetwork_connections:
model[
self.subnetwork_connections[key]["nodeid"]
].is_substation_connection = True
def parse_head_nodes(self, model):
""" This parses the [HEADNODES] objects and is used to build Feeder_metadata DiTTo objects which define the feeder names and feeder headnodes"""
# Open the network file
self.get_file_content("network")
mapp = {
"nodeid": 0,
"networkid": 1,
} # These correspond to the head node name and the feeder name
headnodes = {}
for line in self.content:
headnodes.update(
self.parser_helper(line, ["headnodes"], ["nodeid", "networkid"], mapp)
)
for sid, headnode in headnodes.items():
feeder_metadata = Feeder_metadata(model)
feeder_metadata.name = headnode["networkid"].strip().lower()
feeder_metadata.headnode = headnode["nodeid"].strip().lower()
def parse_sources(self, model):
"""Parse the sources."""
# Open the network file
self.get_file_content("network")
mapp = {"sourceid": 0, "nodeid": 2, "networkid": 3, "desiredvoltage": 4}
mapp_source_equivalent = {
"nodeid": 0,
"voltage": 1,
"operatingangle1": 2,
"operatingangle2": 3,
"operatingangle3": 4,
"positivesequenceresistance": 5,
"positivesequencereactance": 6,
"zerosequenceresistance": 7,
"zerosequencereactance": 8,
"configuration": 9,
}
mapp_sub = {"id": 0, "mva": 1, "kvll": 6, "conn": 14}
sources = {}
subs = {}
source_equivalents = {}
for line in self.content:
sources.update(
self.parser_helper(
line,
["source"],
["sourceid", "nodeid", "networkid", "desiredvoltage"],
mapp,
)
)
source_equivalents.update(
self.parser_helper(
line,
["source_equivalent"],
[
"nodeid",
"voltage",
"operatingangle1",
"operatingangle2",
"operatingangle3",
"usesecondlevelimpedance", # TODO: add logic to look for this parameter and add secondlevel r & x in case it's selected
"positivesequenceresistance",
"positivesequencereactance",
"zerosequencereactance",
"zerosequenceresistance",
"configuration",
"basemva",
"loadmodelname",
"firstlevelr1",
"firstlevelx1",
"firstlevelr0",
"firstlevelx0"
],
mapp_source_equivalent,
)
)
self.get_file_content("equipment")
for line in self.content:
subs.update(
self.parser_helper(
line, ["substation"], ["id", "mva", "kvll", "conn"], mapp_sub
)
)
if len(sources.items()) == 0:
for sid, source_equivalent_data in source_equivalents.items():
if source_equivalent_data["loadmodelname"].lower() != "default":
continue # Want to only use the default source equivalent configuration
for k, v in self.section_phase_mapping.items():
if v["fromnodeid"] == source_equivalent_data["nodeid"]:
sectionID = k
_from = v["fromnodeid"]
_to = v["tonodeid"]
phases = list(v["phase"])
if (
v["tonodeid"] == source_equivalent_data["nodeid"]
): # In case the edge is connected backwards
sectionID = k
_from = v["tonodeid"]
_to = v["fromnodeid"]
phases = list(v["phase"])
try:
api_source = PowerSource(model)
except:
pass
api_source.name = _from + "_src"
try:
api_source.nominal_voltage = (
float(source_equivalent_data["voltage"]) * 10 ** 3
)
except:
pass
try:
api_source.phases = phases
except:
pass
api_source.is_sourcebus = True
try:
api_source.rated_power = 10 ** 3 * float(
source_equivalent_data["mva"]
) # Modified from source cases where substations can be used.
except:
pass
# TODO: connection_type
try:
api_source.phase_angle = source_equivalent_data["operatingangle1"]
except:
pass
# try:
if "positivesequenceresistance" in source_equivalent_data:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["positivesequenceresistance"]),
float(source_equivalent_data["positivesequencereactance"]),
)
else:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr1"]),
float(source_equivalent_data["firstlevelx1"]),
)
# except:
# pass
if "zerosequenceresistance" in source_equivalent_data:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["zerosequenceresistance"]),
float(source_equivalent_data["zerosequencereactance"]),
)
else:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr0"]),
float(source_equivalent_data["firstlevelx0"]),
)
try:
api_source.connecting_element = _from
except:
pass
else:
for sid, sdata in sources.items():
source_equivalent_data = None
if "nodeid" in sdata and sdata["nodeid"] in source_equivalents:
source_equivalent_data = source_equivalents[sdata["nodeid"]]
if sid in subs:
# Find the section
for k, v in self.section_phase_mapping.items():
if v["fromnodeid"] == sdata["nodeid"]:
sectionID = k
_from = v["fromnodeid"]
_to = v["tonodeid"]
phases = list(v["phase"])
if v["tonodeid"] == sdata["nodeid"]: #If it's backwards
sectionID = k
_to = v["fromnodeid"]
_from = v["tonodeid"]
phases = list(v["phase"])
try:
api_source = PowerSource(model)
except:
pass
api_source.name = _from + "_src"
try:
if "desiredvoltage" in sdata:
api_source.nominal_voltage = (
float(sdata["desiredvoltage"]) * 10 ** 3
)
else:
api_source.nominal_voltage = (
float(source_equivalent_data["voltage"]) * 10 ** 3
)
except:
pass
try:
api_source.phases = phases
except:
pass
api_source.is_sourcebus = True
try:
api_source.rated_power = 10 ** 3 * float(subs[sid]["mva"])
except:
pass
# TODO: connection_type
try:
api_source.phase_angle = source_equivalent_data[
"operatingangle1"
]
except:
pass
# try:
if "positivesequenceresistance" in source_equivalent_data:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["positivesequenceresistance"]),
float(source_equivalent_data["positivesequencereactance"]),
)
else:
api_source.positive_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr1"]),
float(source_equivalent_data["firstlevelx1"]),
)
# except:
# pass
if "zerosequenceresistance" in source_equivalent_data:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["zerosequenceresistance"]),
float(source_equivalent_data["zerosequencereactance"]),
)
else:
api_source.zero_sequence_impedance = complex(
float(source_equivalent_data["firstlevelr0"]),
float(source_equivalent_data["firstlevelx0"]),
)
try:
api_source.zero_sequence_impedance = complex(
source_equivalent_data["zerosequenceresistance"],
source_equivalent_data["zerosequencereactance"],
)
except:
pass
try:
api_source.connecting_element = _from
except:
pass
# try:
# api_transformer=PowerTransformer(model)
# except:
# pass
# try:
# api_transformer.is_substation=1
# except:
# pass
# try:
# api_transformer.name=sid
# except:
# pass
# try:
# api_transformer.rated_power=10**3*float(subs[sid]['mva'])
# except:
# pass
# try:
# api_transformer.from_element=_from
# except:
# pass
# try:
# api_transformer.to_element=_to
# except:
# pass
# for w in range(2):
# try:
# api_winding=Winding(model)
# except:
# pass
# try:
# api_winding.connection_type=self.transformer_connection_configuration_mapping(subs[sid]['conn'])
# except:
# pass
# try:
# api_winding.nominal_voltage=10**3*float(subs[sid]['kvll'])
# except:
# pass
# try:
# api_winding.rated_power=10**6*float(subs[sid]['mva'])
# except:
# pass
# for p in phases:
# try:
# api_phase_winding=PhaseWinding(model)
# except:
# pass
# try:
# api_phase_winding.phase=self.phase_mapping(p)
# except:
# pass
# api_winding.phase_windings.append(api_phase_winding)
# api_transformer.windings.append(api_winding)
def parse_nodes(self, model):
"""
Parse the nodes from CYME to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
"""
self._nodes = []
# Open the network file
self.get_file_content("network")
# Default mapp (positions if all fields are present in the format)
mapp = {
"nodeid": 0,
"ratedvoltage": 48,
"coordx": 2,
"coordy": 3,
"coordx1": 2,
"coordy1": 3,
"coordx2": 4,
"coordy2": 5,
}
nodes = {}
node_connectors = {}
kwargs = {
"additional_attributes_list": [
"nodeid",
"coordx1",
"coordy1",
"coordx2",
"coordy2",
"ratedvoltage",
]
} # In case there are buses included in the node list with x1, y1, x2, y2 positions
for line in self.content:
nodes.update(
self.parser_helper(
line,
["node"],
["nodeid", "coordx", "coordy", "ratedvoltage"],
mapp,
**kwargs
)
)
self.get_file_content("network")
for line in self.content:
node_connectors.update(
self.parser_helper(
line, ["node_connector"], ["nodeid", "coordx", "coordy"], mapp
)
)
for ID, node in nodes.items():
# Create a new DiTTo node object
api_node = Node(model)
# Set the name
try:
api_node.name = ID
except:
pass
# Set the coordinates
try:
if "coordx" in node:
position = Position(model)
position.long = float(node["coordx"])
position.lat = float(node["coordy"])
position.elevation = 0
api_node.positions.append(position)
elif "coordx1" in node:
api_node.positions = []
position1 = Position(model)
position1.long = float(node["coordx1"])
position1.lat = float(node["coordy1"])
position1.elevation = 0
api_node.positions.append(position1)
if ID in node_connectors:
ID_inc = ID
while ID_inc in node_connectors:
values = node_connectors[ID_inc]
position_i = Position(model)
position_i.long = float(values["coordx"])
position_i.lat = float(values["coordy"])
position_i.elevation = 0
api_node.positions.append(position_i)
ID_inc += "*"
position2 = Position(model)
position2.long = float(node["coordx2"])
position2.lat = float(node["coordy2"])
position2.elevation = 0
api_node.positions.append(position2)
except:
pass
# Set the nominal voltage
try:
api_node.nominal_voltage = float(node["ratedvoltage"])
except:
pass
# Add the node to the list
self._nodes.append(api_node)
return 1
def configure_wire(
self,
model,
conductor_data,
spacing_data,
phase,
is_switch,
is_fuse,
is_open,
is_network_protector,
is_breaker,
is_recloser,
is_sectionalizer,
):
"""Helper function that creates a DiTTo wire object and configures it."""
# Instanciate the wire DiTTo object
api_wire = Wire(model)
# Set the phase of the wire
try:
api_wire.phase = phase
except:
pass
try:
api_wire.nameclass = conductor_data["id"]
except:
pass
# Set the flags
api_wire.is_switch = is_switch
api_wire.is_open = is_open
api_wire.is_fuse = is_fuse
api_wire.is_network_protector = is_network_protector
api_wire.is_breaker = is_breaker
api_wire.is_recloser = is_recloser
api_wire.is_sectionalizer = is_sectionalizer
# Set the diameter of the wire
try:
api_wire.diameter = float(conductor_data["diameter"])
except:
pass
# Set the nameclass
try:
api.wire.nameclass = conductor_data["nameclass"]
except:
pass
# Set the GMR of the wire
try:
api_wire.gmr = float(conductor_data["gmr"])
except:
pass
# Set the ampacity of the wire
try:
api_wire.ampacity = float(conductor_data["amps"])
except:
pass
# Set the interupting current of the wire if it is a network protectors, a fuse, a sectionalizer, a breaker, or a recloser
if (
is_network_protector
or is_fuse
or is_sectionalizer
or is_breaker
or is_recloser
):
try:
api_wire.interrupting_rating = float(
conductor_data["interruptingrating"]
)
except:
pass
# Set the emergency ampacity of the wire
try:
api_wire.emergency_ampacity = float(conductor_data["withstandrating"])
except:
pass
# Set the X spacing
x_map = {
"A": "posofcond1_x",
"B": "posofcond2_x",
"C": "posofcond3_x",
"N": "posofneutralcond_x",
"N2": "posofneutralcond_n2_x",
}
try:
api_wire.X = spacing_data[x_map[phase]]
except:
pass
# Set the Y spacing
y_map = {
"A": "posofcond1_y",
"B": "posofcond2_y",
"C": "posofcond3_y",
"N": "posofneutralcond_y",
"N2": "posofneutralcond_n2_y",
}
try:
api_wire.Y = spacing[y_map[phase]]
except:
pass
return api_wire
def parse_sections(self, model):
"""
This function is responsible for parsing the sections. It is expecting the following structure:
...
[SECTION]
FORMAT_section=sectionid,fromnodeid,tonodeid,phase
FORMAT_Feeder=networkid,headnodeid
Feeder=feeder_1,head_feeder_1
section_1_feeder_1,node_1,node_2,ABC
...
...
Feeder=feeder_2,head_feeder_2
section_1_feeder_2,node_1,node_2,ABC
...
...
**What is done in this function:**
- We need to create a clear and fast mapping between feeders and sectionids
- Same thing, mapping between sectionids and nodes/phases
- Since we will be using these structures a lot in the reader, we need something fast that does not involve looping like crazy
**Data structures:**
1) feeder_section_mapping: dictionary where keys are network_ids and values are lists of section id_s
2) section_feeder_mapping: dictionary where keys are section ids and values are network_ids
(to perform the opposite query as 1) without having to look in every lists of section ids until we find the good one...)
3) section_phase_mapping: dictionary where keys are section ids and values are tuples (node_1, node_2, phase)
.. warning:: This should be called prior to any other parser because the other parsers rely on these 3 data structures.
"""
self.feeder_section_mapping = {}
self.section_feeder_mapping = {}
self.section_phase_mapping = {}
self.network_data = {}
format_section = None
format_feeder = None
_netID = None
job_is_done = False
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
# This will stop reading the file if we have already worked on the sections
if job_is_done:
break
# Find the section section
if "[SECTION]" in line:
job_is_done = True
line = next(self.content)
# Until we meet the next section header, work...
while len(line) > 2 and (
line[0] != "["
or line[0] != " "
or line[0] != "\n"
or line[0] != "\t\n"
):
# First, we grab the format used to define sections
if "format_section" in line.lower():
format_section = list(
map(
lambda x: x.strip(),
map(lambda x: x.lower(), line.split("=")[1].split(",")),
)
)
# Then, we grab the format used to define feeders
elif (
"format_feeder" in line.lower()
or "format_substation" in line.lower()
or "format_generalnetwork" in line.lower()
):
format_feeder = list(
map(
lambda x: x.strip(),
map(lambda x: x.lower(), line.split("=")[1].split(",")),
)
)
# If we have a new feeder declaration
elif len(line) >= 7 and (
line[:7].lower() == "feeder="
or line[:11].lower() == "substation="
or line[:11].lower() == "substation="
or line[:15].lower() == "generalnetwork="
):
if (
line[:7].lower() == "feeder="
or line[:15].lower() == "generalnetwork="
):
self.network_type = "feeder"
if line[:11].lower() == "substation=":
self.network_type = "substation"
# We should have a format for sections and feeders,
# otherwise, raise an error...
if format_section is None:
raise ValueError("No format for sections.")
if format_feeder is None:
raise ValueError("No format for feeders.")
# Get the feeder data (everything after the '=' symbol)
feeder_data = line.split("=")[1].split(",")
# Check that the data obtained have the same length as the format provided
# otherwise, raise an error...
if len(feeder_data) != len(format_feeder):
raise ValueError(
"Feeder/substation data length {a} does not match feeder format length {b}.".format(
a=len(feeder_data), b=len(format_feeder)
)
)
# Check that we have a networkid in the format
# otherwise, raise an error...
if "networkid" not in format_feeder:
raise ValueError(
"Cannot find the networkid in format: "
+ str(format_feeder)
)
# Check that we have a sectionid in the format
# otherwise, raise an error...
if "sectionid" not in format_section:
raise ValueError(
"Cannot find the sectionid in format: "
+ str(format_section)
)
# We should be able to get the networkid from the feeder data.
_netID = feeder_data[format_feeder.index("networkid")].lower()
# First, we store all the feeder data in the network_data structure
self.network_data[_netID] = {}
for key, value in zip(format_feeder, feeder_data):
self.network_data[_netID][key] = value
# Then, we create a new entry in feeder_section_mapping
self.feeder_section_mapping[_netID] = []
# Otherwise, we should have a new section...
else:
# If we have no networkid at this point, raise an error
# Note: If CYME allows sections to be define without
# a network, remove this safety check
#
if _netID is None:
raise ValueError(
"No network ID available when reading line \n" + line
)
# Extract the data for this section
section_data = list(map(lambda x: x.strip(), line.split(",")))
# Check length coherence...
if len(section_data) != len(format_section):
raise ValueError(
"Section data length {a} does not match section format length {b}.".format(
a=len(section_data), b=len(format_section)
)
)
# Grab the sectionid
_sectionID = section_data[
format_section.index("sectionid")
].lower()
# Create a new entry in section_phase_mapping
self.section_phase_mapping[_sectionID] = {}
# Populate this new entry
for key, value in zip(format_section, section_data):
self.section_phase_mapping[_sectionID][key] = value
# And finally, add a new entry to section_feeder_mapping
self.section_feeder_mapping[_sectionID] = _netID
# Finally, move on to next line
line = next(self.content)
def parse_lines(self, model):
"""
Parse the lines from CYME to DiTTo.
:param model: DiTTo model
:type model: DiTTo model
"""
# Default mapp (positions if all fields are present in the format)
# These numbers come from the CYME documentation (position of the fields)
mapp_overhead = {
"sectionid": 0,
"linecableid": 5,
"length": 6,
"coordx": 8,
"coordy": 9,
}
mapp_overhead_byphase = {
"sectionid": 0,
"devicenumber": 1,
"condid_a": 5,
"condid_b": 6,
"condid_c": 7,
"condid_n1": 8,
"condid_n2": 9,
"spacingid": 10,
"length": 11,
"coordx": 14,
"coordy": 15,
}
mapp_underground = {
"sectionid": 0,
"linecableid": 5,
"length": 6,
"amps": 8,
"coordx": 14,
"coordy": 15,
}
mapp_switch = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"closedphase": 9,
}
mapp_sectionalizer = {"sectionid": 0, "eqid": 2, "coordx": 7, "coordy": 8}
mapp_line = {
"id": 0,
"phasecondid": 1,
"neutralcondid": 2,
"spacingid": 3,
"amps": 11,
"r1": 5,
"r0": 6,
"x1": 7,
"x0": 8,
"b1": 9,
"b0": 10,
}
mapp_section = {"sectionid": 0, "fromnodeid": 1, "tonodeid": 2, "phase": 3}
mapp_line_unbalanced = {
"id": 0,
"condid_a": 1,
"condid_b": 2,
"condid_c": 3,
"condid_n1": 4,
"condid_n2": 5,
"spacingid": 6,
"ra": 8,
"rb": 9,
"rc": 10,
"xa": 11,
"xb": 12,
"xc": 13,
"ba": 14,
"bb": 15,
"bc": 16,
"mutualresistanceab": 36,
"mutualresistancebc": 37,
"mutualresistanceca": 38,
"mutualreactanceab": 39,
"mutualreactancebc": 40,
"mutualreactanceca": 41,
}
mapp_spacing = {
"id": 0,
"posofcond1_x": 5,
"posofcond1_y": 6,
"posofcond2_x": 7,
"posofcond2_y": 8,
"posofcond3_x": 9,
"posofcond3_y": 10,
"posofneutralcond_x": 11,
"posofneutralcond_y": 12,
"posofneutralcond_n2_x": 13,
"posofneutralcond_n2_y": 14,
}
mapp_conductor = {
"id": 0,
"diameter": 1,
"gmr": 2,
"amps": 5,
"withstandrating": 15,
}
mapp_cable = {"id": 0, "r1": 1, "r0": 2, "x1": 3, "x0": 4, "amps": 7}
mapp_concentric_neutral_cable = {
"id": 0,
"r1": 1,
"r0": 2,
"x1": 3,
"x0": 4,
"amps": 5,
"phasecondid": 19,
"neutralcondid": 20,
}
mapp_network_protectors = {
"id": 0,
"amps": 1,
"kvll": 6,
"interruptingrating": 8,
}
mapp_sectionalizers = {"id": 0, "amps": 1, "kvll": 6, "interruptingrating": 20}
mapp_switch_eq = {"id": 0, "amps": 1, "kvll": 6}
# Instanciate the lists for storing objects
self.overhead_lines = []
self.underground_lines = []
self.sections = []
# self.lines=[]
self.lines_unbalanced = []
# self.spacings=[]
# self.conductors=[]
self.overhead_by_phase = []
self.balanced_lines = {}
self.unbalanced_lines = {}
self.settings = {}
self.spacings = {}
self.conductors = {}
self.concentric_neutral_cable = {}
self.cables = {}
self.network_protectors = {}
self.breakers = {}
self.fuses = {}
self.reclosers = {}
self.sectionalizers = {}
self.switches = {}
# Instanciate the list in which we store the DiTTo line objects
self._lines = []
self.section_phase = {}
mapp_closed_phase = {
0: "none",
1: "A",
2: "B",
3: "C",
4: "AB",
5: "AC",
6: "BC",
7: "ABC",
"0": "none",
"1": "A",
"2": "B",
"3": "C",
"4": "AB",
"5": "AC",
"6": "BC",
"7": "ABC",
"none": "none",
"NONE": "none",
"A": "A",
"B": "B",
"C": "C",
"AB": "AB",
"AC": "AC",
"BC": "BC",
"ABC": "ABC",
}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# OVERHEAD UNBALANCED LINES #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["overhead_unbalanced_line_settings"],
["sectionid", "coordx", "coordy", "linecableid", "length"],
mapp_overhead,
{"type": "overhead_unbalanced"},
),
)
#########################################
# #
# OVERHEAD BALANCED LINES #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["overhead_line_settings"],
["sectionid", "coordx", "coordy", "linecableid", "length"],
mapp_overhead,
{"type": "overhead_balanced"},
),
)
#########################################
# #
# OVERHEAD BY PHASE SETTINGS #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["overhead_byphase_settings"],
[
"sectionid",
"devicenumber",
"condid_a",
"condid_b",
"condid_c",
"condid_n",
"condid_n1",
"condid_n2",
"spacingid",
"length",
"coordx",
"coordy",
],
mapp_overhead_byphase,
{"type": "overhead_unbalanced"},
),
)
#########################################
# #
# UNDERGROUND LINES #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["underground_line_settings"],
["sectionid", "coordx", "coordy", "linecableid", "length", "amps"],
mapp_underground,
{"type": "underground"},
),
)
#########################################
# #
# SWITCH. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["switch_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_switch,
{"type": "switch"},
),
)
#########################################
# #
# SECTIONALIZER. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["sectionalizer_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_sectionalizer,
{"type": "sectionalizer"},
),
)
#########################################
# #
# FUSES. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["fuse_settings"],
["sectionid", "coordx", "coordy", "eqid"],
mapp_switch, # Same as switches
{"type": "fuse"},
),
)
#########################################
# #
# RECLOSERS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["recloser_settings"],
["sectionid", "coordx", "coordy", "eqid"],
mapp_switch, # Same as switches
{"type": "recloser"},
),
)
#########################################
# #
# BREAKER. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["breaker_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_switch, # Same as switches
{"type": "breaker"},
),
)
#########################################
# #
# NETWORK PROTECTORS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["network_protector_settings"],
["sectionid", "coordx", "coordy", "eqid", "closedphase"],
mapp_switch, # Same as switches
{"type": "network_protector"},
),
)
#########################################
# #
# SECTIONS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["section"],
["sectionid", "fromnodeid", "tonodeid", "phase"],
mapp_section,
),
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# LINES. #
# #
#########################################
#
self.balanced_lines.update(
self.parser_helper(
line,
["line"],
[
"id",
"phasecondid",
"neutralcondid",
"spacingid",
"amps",
"r1",
"r0",
"x1",
"x0",
"b1",
"b0",
],
mapp_line,
{"type": "balanced_line"},
)
)
#########################################
# #
# UNBALANCED LINES. #
# #
#########################################
#
self.unbalanced_lines.update(
self.parser_helper(
line,
["unbalanced_line"],
[
"id",
"condid_a",
"condid_b",
"condid_c",
"condid_n",
"condid_n1",
"condid_n2",
"spacingid",
"ra",
"rb",
"rc",
"xa",
"xb",
"xc",
"ba",
"bb",
"bc",
"mutualresistanceab",
"mutualresistancebc",
"mutualresistanceca",
"mutualreactanceab",
"mutualreactancebc",
"mutualreactanceca",
],
mapp_line_unbalanced,
{"type": "unbalanced_line"},
)
)
#########################################
# #
# SPACING TABLE #
# #
#########################################
#
self.spacings.update(
self.parser_helper(
line,
["spacing_table"],
[
"id",
"posofcond1_x",
"posofcond1_y",
"posofcond2_x",
"posofcond2_y",
"posofcond3_x",
"posofcond3_y",
"posofneutralcond_x",
"posofneutralcond_y",
"posofneutralcond_n2_x",
"posofneutralcond_n2_y",
],
mapp_spacing,
)
)
#########################################
# #
# CONDUCTOR #
# #
#########################################
#
self.conductors.update(
self.parser_helper(
line,
["conductor"],
["id", "diameter", "gmr", "r25", "amps", "withstandrating"],
mapp_conductor,
)
)
#########################################
# #
# CONCENTRIC NEUTRAL CABLE #
# #
#########################################
#
self.concentric_neutral_cable.update(
self.parser_helper(
line,
["concentric_neutral_cable"],
[
"id",
"r1",
"r0",
"x1",
"x0",
"amps",
"phasecondid",
"neutralcondid",
],
mapp_concentric_neutral_cable,
)
)
#########################################
# #
# CABLE #
# #
#########################################
#
self.cables.update(
self.parser_helper(
line,
["cable"],
["id", "r1", "r0", "x1", "x0", "amps"],
mapp_concentric_neutral_cable,
)
)
#########################################
# #
# SWITCHES #
# #
#########################################
#
self.switches.update(
self.parser_helper(
line, ["switch"], ["id", "amps", "kvll"], mapp_switch_eq
)
)
#########################################
# #
# FUSES #
# #
#########################################
#
self.fuses.update(
self.parser_helper(
line,
["fuse"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors, # Same as network protectors
)
)
#########################################
# #
# RECLOSERS #
# #
#########################################
#
self.reclosers.update(
self.parser_helper(
line,
["recloser"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors, # Same as network protectors
)
)
#########################################
# #
# SECTIONALIZERS #
# #
#########################################
#
self.sectionalizers.update(
self.parser_helper(
line,
["sectionalizer"],
["id", "amps", "kvll", "interruptingrating"],
mapp_sectionalizers,
)
)
#########################################
# #
# BREAKERS #
# #
#########################################
#
self.breakers.update(
self.parser_helper(
line,
["breaker"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors, # Same as network protectors
)
)
#########################################
# #
# NETWORK PROTECTORS #
# #
#########################################
#
self.network_protectors.update(
self.parser_helper(
line,
["network_protector"],
["id", "amps", "kvll", "interruptingrating"],
mapp_network_protectors,
)
)
#####################################################
# #
# JOIN LISTS AND CREATE DITTO OBJECTS #
# #
#####################################################
#
# At this point, we should have all the line data in multiple list of dictionaries.
# We have then to put everything back together using the foreign keys
#
# Loop over the sections
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
# TODO: CLEAN THIS...
if (
"fromnodeid" in settings and (
"load" in settings["fromnodeid"].lower()
or "load" in settings["tonodeid"].lower()
)
):
continue
new_line = {}
# Set the name
try:
new_line["name"] = sectionID
except:
pass
# Set the from_element (info is in the section)
try:
new_line["from_element"] = self.section_phase_mapping[sectionID][
"fromnodeid"
]
except:
pass
# Set the to_element (info is in the section)
try:
new_line["to_element"] = self.section_phase_mapping[sectionID][
"tonodeid"
]
except:
pass
# Set the connection index for the from_element (info is in the section)
try:
new_line["from_element_connection_index"] = int(
self.section_phase_mapping[sectionID]["fromnodeindex"]
)
except:
pass
# Set the connection index for the from_element (info is in the section)
try:
new_line["to_element_connection_index"] = int(
self.section_phase_mapping[sectionID]["tonodeindex"]
)
except:
pass
try:
phases = list(self.section_phase_mapping[sectionID]["phase"])
except:
pass
# Set the length
try:
new_line["length"] = float(settings["length"])
except:
pass
new_line["feeder_name"] = self.section_feeder_mapping[sectionID]
# Set the position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
new_line["position"] = position
except:
pass
# Set the line type
new_line["is_switch"] = False
new_line["is_fuse"] = False
new_line["is_recloser"] = False
new_line["is_breaker"] = False
new_line["is_sectionalizer"] = False
new_line["is_network_protector"] = False
# Set the nameclass of the line as the equipment ID
if "eqid" in settings:
new_line["nameclass"] = settings["eqid"]
if "type" in settings:
# Overhead lines
if "overhead" in settings["type"]:
new_line["line_type"] = "overhead"
# Underground lines
elif "underground" in settings["type"]:
new_line["line_type"] = "underground"
# Switch
elif "switch" in settings["type"]:
new_line["is_switch"] = True
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC" # If no info, then everything is closed by default...
)
# Get the sectionalizer equipment data
if "eqid" in settings and settings["eqid"] in self.switches:
switch_data = self.switches[settings["eqid"]]
else:
switch_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
switch_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = float(switch_data["kvll"]) * 1000
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
switch_data,
{},
p,
True,
False,
False,
False,
False,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
switch_data,
{},
p,
True,
False,
False,
False,
False,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
switch_data,
{},
p,
True,
False,
True,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
continue
# Sectionalizer
elif "sectionalizer" in settings["type"]:
new_line["is_sectionalizer"] = True
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC" # If no info, then everything is closed by default...
)
# Get the sectionalizer equipment data
if "eqid" in settings and settings["eqid"] in self.sectionalizers:
sectionalizer_data = self.sectionalizers[settings["eqid"]]
else:
sectionalizer_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
sectionalizer_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = (
float(sectionalizer_data["kvll"]) * 1000
)
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
sectionalizer_data,
{},
p,
False,
False,
False,
False,
False,
False,
True,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
sectionalizer_data,
{},
p,
False,
False,
False,
False,
False,
False,
True,
)
else:
api_wire = self.configure_wire(
model,
sectionalizer_data,
{},
p,
False,
False,
True,
False,
False,
False,
True,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
continue
# Fuse
elif "fuse" in settings["type"]:
new_line["is_fuse"] = True
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC" # If no info, then everything is closed by default...
)
# Get the fuse equipment data
if "eqid" in settings and settings["eqid"] in self.fuses:
fuse_data = self.fuses[settings["eqid"]]
else:
fuse_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
fuse_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = float(fuse_data["kvll"]) * 1000
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
fuse_data,
{},
p,
False,
True,
False,
False,
False,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
fuse_data,
{},
p,
False,
True,
False,
False,
False,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
fuse_data,
{},
p,
False,
True,
True,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
continue
# recloser
elif "recloser" in settings["type"]:
new_line["is_recloser"] = True
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC" # If no info, then everything is closed by default...
)
# Get the recloser equipment data
if "eqid" in settings and settings["eqid"] in self.reclosers:
recloser_data = self.reclosers[settings["eqid"]]
else:
recloser_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
recloser_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = (
float(recloser_data["kvll"]) * 1000
)
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
recloser_data,
{},
p,
False,
False,
False,
False,
False,
True,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
recloser_data,
{},
p,
False,
False,
False,
False,
False,
True,
False,
)
else:
api_wire = self.configure_wire(
model,
recloser_data,
{},
p,
False,
False,
True,
False,
False,
True,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
continue
# breaker
elif "breaker" in settings["type"]:
new_line["is_breaker"] = True
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC" # If no info, then everything is closed by default...
)
# Get the breaker equipment data
if "eqid" in settings and settings["eqid"] in self.breakers:
breaker_data = self.breakers[settings["eqid"]]
else:
breaker_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
breaker_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = float(breaker_data["kvll"]) * 1000
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
breaker_data,
{},
p,
False,
False,
False,
False,
True,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
breaker_data,
{},
p,
False,
False,
False,
False,
True,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
breaker_data,
{},
p,
False,
False,
True,
False,
True,
False,
False,
)
new_line["wires"].append(api_wire)
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
continue
# Network Protectors
elif "network_protector" in settings["type"]:
new_line["is_network_protector"] = True
new_line["wires"] = []
total_closed = 0
# Get and map the closed phases
if "closedphase" in settings:
closedphase = mapp_closed_phase[settings["closedphase"]]
else:
closedphase = (
"ABC" # If no info, then everything is closed by default...
)
# Get the network protector equipment data
if (
"eqid" in settings
and settings["eqid"] in self.network_protectors
):
network_protector_data = self.network_protectors[
settings["eqid"]
]
else:
network_protector_data = {}
# Pass the nameclass to the wires
if "nameclass" in new_line:
network_protector_data["nameclass"] = new_line["nameclass"]
try:
new_line["nominal_voltage"] = (
float(network_protector_data["kvll"]) * 1000
)
except:
pass
# Create the wires
for p in phases + ["N"]:
if p in closedphase and closedphase.lower() != "none":
total_closed += 1
api_wire = self.configure_wire(
model,
network_protector_data,
{},
p,
False,
False,
False,
True,
False,
False,
False,
)
elif p == "N" and total_closed >= 1:
api_wire = self.configure_wire(
model,
network_protector_data,
{},
p,
False,
False,
False,
True,
False,
False,
False,
)
else:
api_wire = self.configure_wire(
model,
network_protector_data,
{},
p,
False,
False,
True,
True,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Create the line object
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
continue
line_data = None
# If we have a linecableid for the current section
if "linecableid" in settings:
# And if we have line data with the matching ID
if settings["linecableid"] in self.balanced_lines:
# Cache the line data
line_data = self.balanced_lines[settings["linecableid"]]
if settings["linecableid"] in self.unbalanced_lines:
# Cache the line data
line_data = self.unbalanced_lines[settings["linecableid"]]
if settings["linecableid"] in self.concentric_neutral_cable:
# Cache the line data
line_data = self.concentric_neutral_cable[settings["linecableid"]]
line_data["type"] = "balanced_line"
if settings["linecableid"] in self.cables:
logger.debug("cables {}".format(sectionID))
line_data = self.cables[settings["linecableid"]]
line_data["type"] = "balanced_line"
# We might have a device number instead if we are dealing with BY PHASE settings
#
# TODO: Decide if I should remove this or not...
#
elif "devicenumber" in settings:
# if self.balanced_lines.has_key(settings['devicenumber']):
# #Cache the line data
# line_data=self.balanced_lines[settings['devicenumber']]
# elif self.unbalanced_lines.has_key(settings['devicenumber']):
# #Cache the line data
# line_data=self.unbalanced_lines[settings['devicenumber']]
if settings["devicenumber"] in self.concentric_neutral_cable:
line_data = self.concentric_neutral_cable[settings["devicenumber"]]
line_data["type"] = "balanced_line"
elif (
"condid_a" in settings
and "condid_b" in settings
and "condid_c" in settings
and "spacingid" in settings
):
if "condid_n" in settings or "condid_n1" in settings:
line_data = {"type": "unbalanced_spacing_conf"}
if line_data is None:
if not "phase" in settings.keys():
logger.warning("WARNING:: Skipping Line {} !".format(sectionID))
continue
else:
impedance_matrix = None
# We now face two different case:
#
# Case 1: The line is balanced
#
if line_data["type"] == "balanced_line":
# In this case, we build the impedance matrix from Z+ and Z0 in the following way:
# __________________________
# | Z0+2*Z+ Z0-Z+ Z0-Z+ |
# Z= 1/3 | Z0-Z+ Z0+2*Z+ Z0-Z+ |
# | Z0-Z+ Z0-Z+ Z0+2*Z+ |
# --------------------------
try:
coeff = 10 ** -3
# One phase line
if len(phases) == 1:
impedance_matrix = [
[
1.0
/ 3.0
* coeff
* complex(
float(line_data["r0"]), float(line_data["x0"])
)
]
]
# Two phase line
elif len(phases) == 2:
a = (
1.0
/ 3.0
* coeff
* complex(
2 * float(line_data["r1"]) + float(line_data["r0"]),
2 * float(line_data["x1"]) + float(line_data["x0"]),
)
)
b = (
1.0
/ 3.0
* coeff
* complex(
float(line_data["r0"]) - float(line_data["r1"]),
float(line_data["x0"]) - float(line_data["x1"]),
)
)
impedance_matrix = [[a, b], [b, a]]
# Three phase line
else:
a = (
1.0
/ 3.0
* coeff
* complex(
2 * float(line_data["r1"]) + float(line_data["r0"]),
2 * float(line_data["x1"]) + float(line_data["x0"]),
)
)
b = (
1.0
/ 3.0
* coeff
* complex(
float(line_data["r0"]) - float(line_data["r1"]),
float(line_data["x0"]) - float(line_data["x1"]),
)
)
impedance_matrix = [[a, b, b], [b, a, b], [b, b, a]]
except:
pass
# In the balanced case, we should have two conductor IDs: One for the phases and one for the neutral
# Handle the Phase conductors first:
if (
"phasecondid" in line_data
and line_data["phasecondid"] in self.conductors
):
conductor_data = self.conductors[line_data["phasecondid"]]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
if conductor_data == {} and "linecableid" in line_data:
conductor_data = self.conductors[line_data["linecableid"]]
# Loop over the phases and create the wires
new_line["wires"] = []
for phase in phases:
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
phase,
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Handle the neutral conductor
if (
"neutralcondid" in line_data
and line_data["neutralcondid"] in self.conductors
):
conductor_data = self.conductors[line_data["neutralcondid"]]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Case 2: The line is unbalanced
#
elif line_data["type"] == "unbalanced_line":
coeff = 10 ** -3
# In this case, we should have all the information for the impedance matrix (mutual terms)
#
try:
# One phase line
if len(phases) == 1:
p = phases[0].lower()
impedance_matrix = [
[
coeff
* complex(
float(line_data["r{}".format(p)]),
coeff * float(line_data["x{}".format(p)]),
)
]
]
# Two phase line
elif len(phases) == 2:
p1 = phases[0].lower()
p2 = phases[1].lower()
p1, p2 = sorted([p1, p2])
if p1 == "a" and p2 == "c":
impedance_matrix = [
[
coeff
* complex(
float(line_data["ra"]),
float(line_data["xa"]),
),
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
],
[
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
coeff
* complex(
float(line_data["rc"]),
float(line_data["xc"]),
),
],
]
else:
impedance_matrix = [
[
coeff
* complex(
float(line_data["r{}".format(p1)]),
float(line_data["x{}".format(p1)]),
),
coeff
* complex(
float(
line_data[
"mutualresistance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
float(
line_data[
"mutualreactance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
),
],
[
coeff
* complex(
float(
line_data[
"mutualresistance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
float(
line_data[
"mutualreactance{p1}{p2}".format(
p1=p1, p2=p2
)
]
),
),
coeff
* complex(
float(line_data["r{}".format(p2)]),
float(line_data["x{}".format(p2)]),
),
],
]
# Three phase line
else:
impedance_matrix = [
[
coeff
* complex(
float(line_data["ra"]), float(line_data["xa"])
),
coeff
* complex(
float(line_data["mutualresistanceab"]),
float(line_data["mutualreactanceab"]),
),
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
],
[
coeff
* complex(
float(line_data["mutualresistanceab"]),
float(line_data["mutualreactanceab"]),
),
coeff
* complex(
float(line_data["rb"]), float(line_data["xb"])
),
coeff
* complex(
float(line_data["mutualresistancebc"]),
float(line_data["mutualreactancebc"]),
),
],
[
coeff
* complex(
float(line_data["mutualresistanceca"]),
float(line_data["mutualreactanceca"]),
),
coeff
* complex(
float(line_data["mutualresistancebc"]),
float(line_data["mutualreactancebc"]),
),
coeff
* complex(
float(line_data["rc"]), float(line_data["xc"])
),
],
]
except:
pass
# In the unbalanced case, we should have conductor IDs for the phases and neutral
# Handle the Phase conductors first:
# Loop over the phases and create the wires
new_line["wires"] = []
for phase in phases:
if (
"condid_{}".format(phase.lower()) in line_data
and line_data["condid_{}".format(phase.lower())].lower()
!= "none"
and line_data["condid_{}".format(phase.lower())]
in self.conductors
):
conductor_data = self.conductors[
line_data["condid_{}".format(phase.lower())]
]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"].lower() != "none"
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
phase,
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Handle the neutral conductors
# We might have one or two neutral conductors
# If we have valid condid_n1 and condid_n2 ==> create 2 wires
# If we have only condid_n1 or condid_n alone ==> create 1 wire only
#
# In addition, we might have some information on the spacings
if (
"spacingid" in line_data
and line_data["spacingid"].lower() != "none"
and line_data["spacingid"] in self.spacings
):
spacing_data = self.spacings[line_data["spacingid"]]
else:
spacing_data = {}
if (
"condid_n1" in line_data
and line_data["condid_n1"].lower() != "none"
and line_data["condid_n1"] in self.conductors
and "condid_n2" in line_data
and line_data["condid_n2"].lower() != "none"
and line_data["condid_n2"] in self.conductors
):
conductor_n1_data = self.conductors[line_data["condid_n1"]]
conductor_n2_data = self.conductors[line_data["condid_n2"]]
api_wire_n1 = self.configure_wire(
model,
conductor_n1_data,
spacing_data,
"N1",
False,
False,
False,
False,
False,
False,
False,
)
api_wire_n2 = self.configure_wire(
model,
conductor_n2_data,
spacing_data,
"N2",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire_n1)
new_line["wires"].append(api_wire_n2)
elif (
"condid_n" in line_data
and line_data["condid_n"].lower() != "none"
and line_data["condid_n"] in self.conductors
):
conductor_data = self.conductors[line_data["condid_n"]]
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
else:
if (
"condid_n1" in line_data
and line_data["condid_n1"].lower() != "none"
and line_data["condid_n1"] in self.conductors
):
conductor_data = self.conductors[line_data["condid_n1"]]
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
elif line_data["type"] == "unbalanced_spacing_conf":
# IMPEDANCE MATRIX FROM SPACINGS
#
# First, we have to get the wires' positions:
if settings["spacingid"] in self.spacings:
# Get the spacing data
spacing_data = self.spacings[settings["spacingid"]]
pos = []
for i, p in enumerate(phases):
pos.append([None, None])
for j, k in enumerate(["x", "y"]):
if (
"posofcond{i}_{k}".format(i=i + 1, k=k)
in spacing_data
):
try:
pos[-1][j] = float(
spacing_data[
"posofcond{i}_{k}".format(i=i + 1, k=k)
]
)
except:
pass
pos.append([None, None])
if (
"posofneutralcond_x" in spacing_data
and "posofneutralcond_y" in spacing_data
):
try:
pos[-1][0] = float(spacing_data["posofneutralcond_x"])
pos[-1][1] = float(spacing_data["posofneutralcond_y"])
except:
pass
pos.append([None, None])
if (
"posofneutralcond_n2_x" in spacing_data
and spacing_data["posofneutralcond_n2_x"] != ""
and "posofneutralcond_n2_y" in spacing_data
and spacing_data["posofneutralcond_n2_y"] != ""
):
try:
pos[-1][0] = float(
spacing_data["posofneutralcond_n2_x"]
)
pos[-1][1] = float(
spacing_data["posofneutralcond_n2_y"]
)
except:
pass
valid_cond = []
ph_list = ["a", "b", "c", "n1", "n2"]
for idd, po in enumerate(pos):
if po != [None, None]:
valid_cond.append(idd)
distance_matrix = []
for i, ii in enumerate(valid_cond):
distance_matrix.append([])
for j, jj in enumerate(valid_cond):
distance_matrix[-1].append(
3.28084 * self.distance(pos[ii], pos[jj])
) # 0.0328084
distance_matrix = np.array(distance_matrix)
gmr_list = []
resistance_list = []
perform_kron_reduction = False
# Get GMR and resistance of valid conductor
for idx, p in enumerate(phases):
if (
"condid_{}".format(p.lower()) in settings
and settings["condid_{}".format(p.lower())]
in self.conductors
):
gmr_list.append(
0.0328084
* float(
self.conductors[
settings["condid_{}".format(p.lower())]
]["gmr"]
)
)
resistance_list.append(
1.0
/ 0.621371
* float(
self.conductors[
settings["condid_{}".format(p.lower())]
]["r25"]
)
)
else:
logger.warning(
"Could not find conductor {name}. Using DEFAULT...".format(
name="condid_{}".format(p.lower())
)
)
gmr_list.append(
0.0328084 * float(self.conductors["DEFAULT"]["gmr"])
)
resistance_list.append(
1.0
/ 0.621371
* float(self.conductors["DEFAULT"]["r25"])
)
# gmr_list.append(None)
# resistance_list.append(None)
if "condid_n" in settings:
if settings["condid_n"] in self.conductors:
gmr_list.append(
0.0328084
* float(
self.conductors[settings["condid_n"]]["gmr"]
)
)
resistance_list.append(
1.0
/ 0.621371
* float(
self.conductors[settings["condid_n"]]["r25"]
)
)
else:
logger.warning(
"Could not find neutral conductor {name}. Using DEFAULT...".format(
name=settings["condid_n"]
)
)
gmr_list.append(
0.0328084 * float(self.conductors["DEFAULT"]["gmr"])
)
resistance_list.append(
1.0
/ 0.621371
* float(self.conductors["DEFAULT"]["r25"])
)
elif (
"condid_n1" in settings
and settings["condid_n1"] is not None
and settings["condid_n1"].lower() != "none"
):
if settings["condid_n1"] in self.conductors:
gmr_list.append(
0.0328084
* float(
self.conductors[settings["condid_n1"]]["gmr"]
)
)
resistance_list.append(
1.0
/ 0.621371
* float(
self.conductors[settings["condid_n1"]]["r25"]
)
)
else:
logger.warning(
"Could not find neutral conductor {name}. Using DEFAULT...".format(
name=settings["condid_n1"]
)
)
gmr_list.append(
0.0328084 * float(self.conductors["DEFAULT"]["gmr"])
)
resistance_list.append(
1.0
/ 0.621371
* float(self.conductors["DEFAULT"]["r25"])
)
else:
gmr_list.append(None)
resistance_list.append(None)
perform_kron_reduction = False
gmr_list = np.array(gmr_list)
resistance_list = np.array(resistance_list)
idx_to_remove = np.argwhere(gmr_list == None).flatten()
idx_to_keep = [
idx
for idx in range(len(distance_matrix))
if idx not in idx_to_remove
]
try:
distance_matrix = distance_matrix[idx_to_keep, :][
:, idx_to_keep
]
except IndexError:
# It can happen that a one phase line is defined with a spacing table where no position are defined.
# This is uncommon but raises an IndexError here.
# To avoid that, use a dummy distance matrix
distance_matrix = np.array([[1]])
pass
primitive_imp_matrix = self.get_primitive_impedance_matrix(
distance_matrix, gmr_list, resistance_list
)
if perform_kron_reduction:
phase_imp_matrix = (
1.0
/ 1609.34
* self.kron_reduction(primitive_imp_matrix)
)
else:
phase_imp_matrix = 1.0 / 1609.34 * primitive_imp_matrix
impedance_matrix = phase_imp_matrix.tolist()
new_line["wires"] = []
for phase in phases:
if (
"condid_{}".format(phase.lower()) in settings
and settings["condid_{}".format(phase.lower())]
in self.conductors
):
conductor_data = self.conductors[
settings["condid_{}".format(phase.lower())]
]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in settings
and settings["spacingid"] in self.spacings
):
spacing_data = self.spacings[settings["spacingid"]]
else:
spacing_data = {}
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
phase,
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
# Handle the neutral conductors
if (
"condid_n" in settings
and settings["condid_n"] is not None
and settings["condid_n"] != ""
and settings["condid_n"] != "NONE"
and settings["condid_n"] in self.conductors
):
conductor_data = self.conductors[settings["condid_n"]]
elif (
"condid_n1" in settings
and settings["condid_n1"] is not None
and settings["condid_n1"] != ""
and settings["condid_n1"] != "NONE"
and settings["condid_n1"] in self.conductors
):
conductor_data = self.conductors[settings["condid_n1"]]
else:
conductor_data = {}
# In addition, we might have some information on the spacings
if (
"spacingid" in settings
and settings["spacingid"] in self.spacings
):
spacing_data = self.spacings[settings["spacingid"]]
else:
spacing_data = {}
if len(conductor_data) != 0:
api_wire = self.configure_wire(
model,
conductor_data,
spacing_data,
"N",
False,
False,
False,
False,
False,
False,
False,
)
new_line["wires"].append(api_wire)
try:
new_line["impedance_matrix"] = impedance_matrix
except:
pass
api_line = Line(model)
for k, v in new_line.items():
setattr(api_line, k, v)
# Append the line DiTTo object to the list of DiTTo lines
self._lines.append(api_line)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_line)
return 1
def parse_capacitors(self, model):
"""Parse the capacitors from CYME to DiTTo."""
# Instanciate the list in which we store the DiTTo capacitor objects
self._capacitors = []
mapp_serie_capacitor_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
}
mapp_shunt_capacitor_settings = {
"sectionid": 0,
"shuntcapacitorid": 39,
"connection": 6,
"fixedkvara": 7,
"fixedkvarb": 8,
"fixedkvarc": 9,
"switchedkvara": 13,
"switchedkvarb": 14,
"switchedkvarc": 15,
"kv": 24,
"controllingphase": 35,
}
mapp_serie_capacitor = {"id": 0, "reactance": 6}
mapp_shunt_capacitor = {"id": 0, "kvar": 1, "kv": 2, "type": 6}
self.settings = {}
self.capacitors = {}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# SERIE CAPACITOR #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["serie_capacitor_settings"],
["sectionid", "eqid", "coordx", "coordy"],
mapp_serie_capacitor_settings,
{"type": "serie"},
)
)
#########################################
# #
# SHUNT CAPACITOR #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["shunt_capacitor_settings"],
[
"sectionid",
"shuntcapacitorid",
"connection",
"fixedkvara",
"fixedkvarb",
"fixedkvarc",
"switchedkvara",
"switchedkvarb",
"switchedkvarc",
"kv",
"controllingphase",
],
mapp_shunt_capacitor_settings,
{"type": "shunt"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# SERIE CAPACITOR #
# #
#########################################
#
self.capacitors.update(
self.parser_helper(
line, ["serie_capacitor"], ["id", "reactance"], mapp_serie_capacitor
)
)
#########################################
# #
# SHUNT CAPACITOR #
# #
#########################################
#
self.capacitors.update(
self.parser_helper(
line,
["shunt_capacitor"],
["id", "kvar", "kv", "type"],
mapp_shunt_capacitor,
)
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
# Instanciate Capacitor DiTTo objects
try:
api_capacitor = Capacitor(model)
except:
raise ValueError(
"Unable to instanciate capacitor {id}".format(id=scap["sectionid"])
)
# Set the name
try:
api_capacitor.name = "Cap_" + sectionID
except:
pass
# Set the connecting element (info is in the section)
try:
api_capacitor.connecting_element = self.section_phase_mapping[
sectionID
]["fromnodeid"]
except:
pass
# PT phase
# (Only works with shunt capacitors)
try:
api_capacitor.pt_phase = self.phase_mapping(
settings["controllingphase"]
)
except:
pass
api_capacitor.feeder_name = self.section_feeder_mapping[sectionID]
# Connection_type
# (Only works with shunt capacitors)
try:
api_capacitor.connection_type = self.capacitors_connection_mapping(
settings["connection"]
)
except:
pass
# Position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
api_capacitor.position.append(position)
except:
pass
# Get the device number
if "eqid" in settings:
dev_num = settings["eqid"]
elif "shuntcapacitorid" in settings:
dev_num = settings["shuntcapacitorid"]
else:
dev_num = None
capacitor_data = None
if dev_num is not None:
if dev_num in self.capacitors:
capacitor_data = self.capacitors[dev_num]
# Reactance
try:
api_capacitor.reactance = float(capacitor_data["reactance"])
except:
pass
# KV
try:
api_capacitor.nominal_voltage = (
float(capacitor_data["kv"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Map the phases to DiTTo phase format
phases = self.section_phase_mapping[sectionID]["phase"]
# Rated KV
#
# Note: Rated KV is line-to-neutral for Wye-grounded configuration,
# and line-to-line for delta configuration
#
# If the capacitor is one phase, we have a line-to-neutral,
# and line-to-line if it is 3 phase
#
if "kv" in settings:
try:
if api_capacitor.connection_type == "Y" or len(phases) == 1:
api_capacitor.nominal_voltage = (
float(settings["kv"]) * 10 ** 3
) # DiTTo in var
if api_capacitor.connection_type == "D" or len(phases) == 3:
api_capacitor.nominal_voltage = (
float(settings["kv"]) * 10 ** 3 * math.sqrt(3)
) # DiTTo in var
except:
pass
if (
api_capacitor.pt_phase is not None
and api_capacitor.pt_phase not in phases
):
raise ValueError(
"Capacitor {name} is monitoring phase {p} which is not in the section {id} phase list {lis}.".format(
name=api_capacitor.name,
p=api_capacitor.pt_phase,
id=scap["sectionid"],
lis=phases,
)
)
# For each phase...
for p in phases:
# Instanciate a PhaseCapacitor DiTTo object
try:
api_phaseCapacitor = PhaseCapacitor(model)
except:
raise ValueError(
"Unable to instanciate PhaseCapacitor DiTTo object."
)
# Set the phase
try:
api_phaseCapacitor.phase = p
except:
pass
# Set var value
if (
"fixedkvara" in settings
and "fixedkvarb" in settings
and "fixedkvarc" in settings
and max(
float(settings["fixedkvara"]),
max(
float(settings["fixedkvarb"]), float(settings["fixedkvarc"])
),
)
> 0
):
try:
if p == "A":
api_phaseCapacitor.var = (
float(settings["fixedkvara"]) * 10 ** 3
) # Ditto in var
if p == "B":
api_phaseCapacitor.var = (
float(settings["fixedkvarb"]) * 10 ** 3
) # Ditto in var
if p == "C":
api_phaseCapacitor.var = (
float(settings["fixedkvarc"]) * 10 ** 3
) # Ditto in var
except:
pass
elif (
"switchedkvara" in settings
and "switchedkvarb" in settings
and "switchedkvarc" in settings
and max(
float(settings["switchedkvara"]),
max(
float(settings["switchedkvarb"]),
float(settings["switchedkvarc"]),
),
)
> 0
):
try:
if p == "A":
api_phaseCapacitor.var = (
float(settings["switchedkvara"]) * 10 ** 3
) # Ditto in var
if p == "B":
api_phaseCapacitor.var = (
float(settings["switchedkvarb"]) * 10 ** 3
) # Ditto in var
if p == "C":
api_phaseCapacitor.var = (
float(settings["switchedkvarc"]) * 10 ** 3
) # Ditto in var
except:
pass
elif capacitor_data is not None:
try:
api_phaseCapacitor.var = (
float(capacitor_data["kvar"]) * 10 ** 3
) # DiTTo in var
except:
pass
# Append the phase capacitor object to the capacitor
api_capacitor.phase_capacitors.append(api_phaseCapacitor)
self._capacitors.append(api_capacitor)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_capacitor)
return 1
def parse_transformers(self, model):
"""Parse the transformers from CYME to DiTTo. Since substation transformer can have LTCs attached, when parsing a transformer, we may also create a regulator. LTCs are represented as regulators."""
# Instanciate the list in which we store the DiTTo transformer objects
self._transformers = []
mapp_auto_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"connection_configuration": 9,
"tap": 25,
}
mapp_auto_transformer = {
"id": 0,
"kva": 3,
"connection_configuration": 18,
"noloadlosses": 32,
"isltc": 21,
"taps": 22,
"lowerbandwidth": 23,
"upperbandwidth": 24,
}
mapp_grounding_transformer_settings = {
"sectionid": 0,
"equipmentid": 6,
"connectionconfiguration": 10,
"phase": 13,
}
mapp_grounding_transformer = {
"id": 0,
"connectionconfiguration": 7,
"ratedvoltage": 5,
"ratedcapacity": 6,
}
mapp_three_winding_auto_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"primaryfixedtapsetting": 10,
"secondaryfixedtapsetting": 11,
"tertiaryfixedtapsetting": 12,
"primarybasevoltage": 13,
"secondarybasevoltage": 14,
"tertiarybasevoltage": 15,
}
mapp_three_winding_auto_transformer = {
"id": 0,
"primaryratedcapacity": 1,
"primaryvoltage": 6,
"secondaryratedcapacity": 22,
"secondaryvoltage": 27,
"tertiaryratedcapacity": 30,
"tertiaryvoltage": 35,
"noloadlosses": 50,
}
mapp_three_winding_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"primaryfixedtapsetting": 10,
"secondaryfixedtapsetting": 11,
"tertiaryfixedtapsetting": 12,
"primarybasevoltage": 13,
"secondarybasevoltage": 14,
"tertiarybasevoltage": 15,
}
mapp_three_winding_transformer = {
"id": 0,
"primaryratedcapacity": 1,
"primaryvoltage": 6,
"secondaryratedcapacity": 24,
"secondaryvoltage": 29,
"tertiaryratedcapacity": 33,
"tertiaryvoltage": 38,
"noloadlosses": 53,
}
mapp_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"conn": 9,
"primtap": 10,
"secondarytap": 11,
"primarybasevoltage": 17,
"secondarybasevoltage": 18,
"setpoint": 21,
"maxbuck": 29,
"maxboost": 30,
"ct": 31,
"pt": 32,
"phaseon": 37,
}
mapp_transformer = {
"id": 0,
"type": 1,
"kva": 3,
"kvllprim": 5,
"kvllsec": 6,
"z1": 7,
"z0": 8,
"xr": 12,
"xr0": 13,
"conn": 18,
"noloadlosses": 34,
"isltc": 23,
"taps": 24,
"lowerbandwidth": 25,
"upperbandwidth": 26,
"phaseshift": 41,
}
mapp_phase_shifter_transformer_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 10,
"coordy": 11,
}
self.auto_transformers = {}
self.grounding_transformers = {}
self.three_winding_auto_transformers = {}
self.three_winding_transformers = {}
self.settings = {}
self.transformers = {}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# AUTO TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["auto_transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"connection_configuration",
"tap",
],
mapp_auto_transformer_settings,
{"type": "auto_transformer"},
)
)
#########################################
# #
# GROUNDING TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["grounding_transformer_settings"],
["sectionid", "equipmentid", "connectionconfiguration", "phase"],
mapp_grounding_transformer_settings,
{"type": "grounding_transformer"},
)
)
#########################################
# #
# THREE WINDING AUTO TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["three_winding_auto_transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"primaryfixedtapsetting",
"secondaryfixedtapsetting",
"tertiaryfixedtapsetting",
"primarybasevoltage",
"secondarybasevoltage",
"tertiarybasevoltage",
],
mapp_three_winding_auto_transformer_settings,
{"type": "three_winding_auto_transformer"},
)
)
#########################################
# #
# THREE WINDING TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["three_winding_transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"primaryfixedtapsetting",
"secondaryfixedtapsetting",
"tertiaryfixedtapsetting",
"primarybasevoltage",
"secondarybasevoltage",
"tertiarybasevoltage",
],
mapp_three_winding_transformer_settings,
{"type": "three_winding_transformer"},
)
)
#########################################
# #
# TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["transformer_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"primaryfixedtapsetting",
"secondaryfixedtapsetting",
"tertiaryfixedtapsetting",
"primarybasevoltage",
"secondarybasevoltage",
"tertiarybasevoltage",
"setpoint",
"maxbuck",
"maxboost",
"ct",
"pt",
],
mapp_transformer_settings,
{"type": "transformer"},
)
)
#########################################
# #
# PHASE SHIFTER TRANSFORMER #
# #
#########################################
#
self.settings.update(
self.parser_helper(
line,
["phase_shifter_transformer_settings"],
["sectionid", "eqid", "coordx", "coordy"],
mapp_phase_shifter_transformer_settings,
{"type": "phase_shifter_transformer"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# AUTO TRANSFORMER #
# #
#########################################
#
self.auto_transformers.update(
self.parser_helper(
line,
["auto_transformer"],
[
"id",
"kva",
"connection_configuration",
"noloadlosses",
"isltc",
"taps",
"lowerbandwidth",
"upperbandwidth",
],
mapp_auto_transformer,
)
)
#########################################
# #
# GROUNDING TRANSFORMER #
# #
#########################################
#
self.grounding_transformers.update(
self.parser_helper(
line,
["grounding_transformer"],
["id", "ratedcapacity", "ratedvoltage", "connection_configuration"],
mapp_grounding_transformer,
)
)
#########################################
# #
# THREE WINDING AUTO TRANSFORMER #
# #
#########################################
#
# LTC controls not yet supported for three-winding transformers
self.three_winding_auto_transformers.update(
self.parser_helper(
line,
["three_winding_auto_transformer"],
[
"id",
"primaryratedcapacity",
"primaryvoltage",
"secondaryratedcapacity",
"secondaryvoltage",
"tertiaryratedcapacity",
"tertiaryvoltage",
"noloadlosses",
],
mapp_three_winding_auto_transformer,
)
)
#########################################
# #
# THREE WINDING TRANSFORMER #
# #
#########################################
#
# LTC controls not yet supported for three-winding transformers
self.three_winding_transformers.update(
self.parser_helper(
line,
["three_winding_transformer"],
[
"id",
"primaryratedcapacity",
"primaryvoltage",
"secondaryratedcapacity",
"secondaryvoltage",
"tertiaryratedcapacity",
"tertiaryvoltage",
"noloadlosses",
],
mapp_three_winding_transformer,
)
)
#########################################
# #
# TRANSFORMER #
# #
#########################################
#
self.transformers.update(
self.parser_helper(
line,
["transformer"],
[
"id",
"type",
"kva",
"kvllprim",
"kvllsec",
"z1",
"z0",
"xr",
"xr0",
"conn",
"noloadlosses",
"phaseshift",
"isltc",
"taps",
"lowerbandwidth",
"upperbandwidth",
],
mapp_transformer,
)
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
# Instanciate a PowerTransformer DiTTo object
try:
api_transformer = PowerTransformer(model)
except:
raise ValueError("Unable to instanciate PowerTransformer DiTTo object.")
# Set the name
try:
api_transformer.name = "Trans_" + settings["sectionid"]
except:
pass
api_transformer.feeder_name = self.section_feeder_mapping[sectionID]
try:
phases = self.section_phase_mapping[sectionID]["phase"]
except:
raise ValueError("Empty phases for transformer {}.".format(sectionID))
# Set from_element
try:
api_transformer.from_element = self.section_phase_mapping[sectionID][
"fromnodeid"
]
except:
pass
# Set to_element
try:
api_transformer.to_element = self.section_phase_mapping[sectionID][
"tonodeid"
]
except:
pass
# Set the connection index for the from_element (info is in the section)
try:
api_transformer["from_element_connection_index"] = int(
self.section_phase_mapping[sectionID]["fromnodeindex"]
)
except:
pass
# Set the connection index for the from_element (info is in the section)
try:
api_transformer["to_element_connection_index"] = int(
self.section_phase_mapping[sectionID]["tonodeindex"]
)
except:
pass
# Set the position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
api_transformer.positions.append(position)
except:
pass
# Handle the three winding transformers
if settings["type"] in [
"three_winding_transformer",
"three_winding_auto_transformer",
]:
# Here we know that we have three windings...
for w in range(3):
# Instanciate a DiTTo Winding object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the base voltage
# We assume that 1st winding is primary, 2nd secondary, and third tertiary
try:
if w == 0:
api_winding.nominal_voltage = (
float(settings["primarybasevoltage"]) * 10 ** 3
) # DiTTo in volt
if w == 1:
api_winding.nominal_voltage = (
float(settings["secondarybasevoltage"]) * 10 ** 3
) # DiTTo in volt
if w == 2:
api_winding.nominal_voltage = (
float(settings["tertiarybasevoltage"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Set the rated power
try:
if w == 0:
api_winding.rated_power = (
float(settings["primaryratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 1:
api_winding.rated_power = (
float(settings["secondaryratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 2:
api_winding.rated_power = (
float(settings["tertiaryratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Create the phase windings
for p in phases:
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError(
"Unable to instanciate PhaseWinding DiTTo object."
)
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Set the tap position
try:
if w == 0:
api_phase_winding.tap_position = int(
settings["primaryfixedtapsetting"]
)
if w == 1:
api_phase_winding.tap_position = int(
settings["secondaryfixedtapsetting"]
)
if w == 2:
api_phase_winding.tap_position = int(
settings["tertiaryfixedtapsetting"]
)
except:
pass
# Add the phase winding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# Add the winding object to the transformer
api_transformer.windings.append(api_winding)
# Handle two windings transformers
if settings["type"] == "transformer":
if settings["eqid"] in self.transformers:
transformer_data = self.transformers[settings["eqid"]]
else:
transformer_data = self.transformers["DEFAULT"]
# Resistance
#
# Note: Imported from Julietta's code
#
Z1 = float(transformer_data["z1"])
Z0 = float(transformer_data["z0"])
XR = float(transformer_data["xr"])
XR0 = float(transformer_data["xr0"])
if XR == 0:
R1 = 0
X1 = 0
else:
R1 = Z1 / math.sqrt(1 + XR * XR)
X1 = Z1 / math.sqrt(1 + 1 / (XR * XR))
if XR0 == 0:
R0 = 0
X0 = 0
else:
R0 = Z0 / math.sqrt(1 + XR0 * XR0)
X0 = Z0 / math.sqrt(1 + 1 / (XR0 * XR0))
complex0 = complex(R0, X0)
complex1 = complex(R1, X1)
matrix = np.array(
[[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]]
)
a = 1 * cmath.exp(2 * math.pi * 1j / 3)
T = np.array([[1.0, 1.0, 1.0], [1.0, a * a, a], [1.0, a, a * a]])
T_inv = np.linalg.inv(T)
Zabc = T * matrix * T_inv
Z_perc = Zabc.item((0, 0))
R_perc = Z_perc.real / 2.0
xhl = Z_perc.imag
# Check if it's an LTC
#
if "isltc" in transformer_data and transformer_data["isltc"]:
# Instanciate a Regulator DiTTo object
try:
api_regulator = Regulator(model)
except:
raise ValueError(
"Unable to instanciate Regulator DiTTo object."
)
try:
api_regulator.name = "Reg_" + settings["sectionid"]
except:
pass
api_regulator.feeder_name = self.section_feeder_mapping[sectionID]
try:
api_regulator.connected_transformer = api_transformer.name
except:
raise ValueError("Unable to connect LTC to transformer")
taps = float(transformer_data["taps"])
lowerbandwidth = float(transformer_data["lowerbandwidth"])
upperbandwidth = float(transformer_data["upperbandwidth"])
minreg_range = int(float(settings["maxbuck"]))
maxreg_range = int(float(settings["maxboost"]))
setpoint = float(settings["setpoint"])
ct = int(float(settings["ct"]))
pt = int(float(settings["pt"]))
center_bandwidth = upperbandwidth - lowerbandwidth
api_regulator.ltc = 1
api_regulator.highstep = minreg_range
api_regulator.lowstep = maxreg_range
api_regulator.pt_ratio = pt
api_regulator.ct_ratio = ct
api_regulator.setpoint = setpoint
api_regulator.center_bandwidth = center_bandwidth
api_regulator.bandwidth = (
upperbandwidth + lowerbandwidth
) # ie. use the average bandwidth. The upper and lower are typically the same
# TODO: Add unit checking. These units are in percentages. Need to be updated to be in Volts for consistency (BUG in cyme writer too)
# TODO: Decide whether or not to put parameters in for the regulator range, and what units they should be.
try:
api_transformer.reactances = [float(xhl)]
except:
pass
# Here we know that we have two windings...
for w in range(2):
# Instanciate a Winding DiTTo object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the rated power
try:
if w == 0:
api_winding.rated_power = (
float(transformer_data["kva"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 1:
api_winding.rated_power = (
float(transformer_data["kva"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Set the nominal voltage
try:
if w == 0:
api_winding.nominal_voltage = (
float(transformer_data["kvllprim"]) * 10 ** 3
) # DiTTo in volt
if w == 1:
api_winding.nominal_voltage = (
float(transformer_data["kvllsec"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Connection configuration
try:
api_winding.connection_type = self.transformer_connection_configuration_mapping(
transformer_data["conn"], w
)
except:
pass
# Resistance
try:
api_winding.resistance = R_perc
except:
pass
# For each phase...
for p in phases:
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError(
"Unable to instanciate PhaseWinding DiTTo object."
)
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Add the phase winding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# Add the winding object to the transformer
api_transformer.windings.append(api_winding)
# Handle Grounding transformers
if settings["type"] == "grounding_transformer":
if settings["equipmentid"] in self.grounding_transformers:
transformer_data = self.grounding_transformers[
settings["equipmentid"]
]
else:
transformer_data = {}
# Here we know that we have two windings...
for w in range(2):
# Instanciate a Winding DiTTo object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the rated power
try:
if w == 0:
api_winding.rated_power = (
float(transformer_data["ratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
if w == 1:
api_winding.rated_power = (
float(transformer_data["ratedcapacity"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Set the nominal voltage
try:
if w == 0:
api_winding.nominal_voltage = (
float(transformer_data["ratedvoltage"]) * 10 ** 3
) # DiTTo in volt
if w == 1:
api_winding.nominal_voltage = (
float(transformer_data["ratedvoltage"]) * 10 ** 3
) # DiTTo in volt
except:
pass
# Set the connection configuration
try:
api_winding.connection_type = self.connection_configuration_mapping(
transformer_data["conn"]
)
except:
pass
# For each phase...
for p in phases:
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError(
"Unable to instanciate PhaseWinding DiTTo object."
)
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Add the phase winding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# Add the winding object to the transformer
api_transformer.windings.append(api_winding)
# Add the transformer object to the list of transformers
self._transformers.append(api_transformer)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_transformer)
return 1
def parse_regulators(self, model):
"""Parse the regulators from CYME to DiTTo.
.. note::
In CYME a regulator does not have to be associated with a transformer (as it is the case for OpenDSS for example).
In addition, a regulator can monitor multiple phases.
The parser should create the transformers and create separate regulator objects for different phases.
"""
# Instanciate the list in which we store the DiTTo regulator objects
self._regulators = []
mapp_regulators = {
"id": 0,
"type": 1,
"kva": 2,
"kva_1": 3,
"kva_2": 4,
"kva_3": 5,
"kva_4": 6,
"kvln": 7,
"forwardbandwidth": 11,
"bandwidth": 11, # For old CYME version 'forwardbandwidth' is just 'bandwidth'
"ct": 13,
"pt": 14,
}
mapp_regulator_settings = {
"sectionid": 0,
"eqid": 2,
"coordx": 7,
"coordy": 8,
"phaseon": 9,
"ct": 12,
"pt": 13,
"vseta": 16,
"vsetb": 17,
"vsetc": 18,
"bandwidtha": 25,
"bandwidthb": 26,
"bandwidthc": 27,
"tapa": 28,
"tapb": 29,
"tapc": 30,
"conn": 31,
}
self.settings = {}
self.regulators = {}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
self.settings.update(
self.parser_helper(
line,
["regulator_settings"],
[
"sectionid",
"eqid",
"coordx",
"coordy",
"phaseon",
"ct",
"pt",
"vseta",
"vsetb",
"vsetc",
"bandwidtha",
"bandwidthb",
"bandwidthc",
"tapa",
"tapb",
"tapc",
"conn",
],
mapp_regulator_settings,
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("equipment")
# Loop over the network file
for line in self.content:
self.regulators.update(
self.parser_helper(
line,
["regulator"],
[
"id",
"type",
"kva",
"kva_1",
"kva_2",
"kva_3",
"kva_4",
"kvln",
"forwardbandwidth",
"bandwidth",
"ct",
"pt",
],
mapp_regulators,
)
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
try:
phases = self.section_phase_mapping[sectionID]["phase"]
except:
raise ValueError("No phase for section {}".format(sectionID))
try:
phases_on = self.phase_mapping(settings["phaseon"])
except:
raise ValueError(
"Unable to get phases for regulator {}".format(sectionID)
)
if "eqid" in settings and settings["eqid"] in self.regulators:
regulator_data = self.regulators[settings["eqid"]]
else:
regulator_data = {}
for p in phases_on:
if p not in phases:
logger.warning(
"Regulator {id} monitors phase {p} which is not in the section phases {pp}".format(
id=sectionID, p=p, pp=phases
)
)
# Instanciate a Regulator DiTTo object
try:
api_regulator = Regulator(model)
except:
raise ValueError("Unable to instanciate Regulator DiTTo object.")
try:
api_regulator.name = "Reg_" + sectionID + "_" + p
except:
pass
api_regulator.feeder_name = self.section_feeder_mapping[sectionID]
try:
api_regulator.from_element = self.section_phase_mapping[sectionID][
"fromnodeid"
]
except:
pass
try:
api_regulator.to_element = self.section_phase_mapping[sectionID][
"tonodeid"
]
except:
pass
try:
api_regulator.pt_phase = p
except:
pass
try:
position = Position(model)
position.long = float(reg_set["coordx"])
position.lat = float(reg_set["coordy"])
position.elevation = 0
api_regulator.positions.append(position)
except:
pass
try:
api_regulator.pt_ratio = float(settings["pt"])
except:
pass
try:
api_regulator.ct_prim = float(settings["ct"])
except:
pass
try:
if p == "A":
api_regulator.bandcenter = float(settings["vseta"])
if p == "B":
api_regulator.bandcenter = float(settings["vsetb"])
if p == "C":
api_regulator.bandcenter = float(settings["vsetc"])
except:
pass
try:
if (
p == "A"
and "bandwidtha" in settings
and settings["bandwidtha"] is not None
):
api_regulator.bandwidth = float(settings["bandwidtha"])
elif "forwardbandwidth" in regulator_data:
api_regulator.bandwidth = float(
regulator_data["forwardbandwidth"]
)
else:
api_regulator.bandwidth = float(
regulator_data["bandwidth"]
) # For old CYME versions
if (
p == "B"
and "bandwidthb" in settings
and settings["bandwidthb"] is not None
):
api_regulator.bandwidth = float(settings["bandwidthb"])
elif "forwardbandwidth" in regulator_data:
api_regulator.bandwidth = float(
regulator_data["forwardbandwidth"]
)
else:
api_regulator.bandwidth = float(
regulator_data["bandwidth"]
) # For old CYME versions
if (
p == "C"
and "bandwidthc" in settings
and settings["bandwidthc"] is not None
):
api_regulator.bandwidth = float(settings["bandwidthc"])
elif "forwardbandwidth" in regulator_data:
api_regulator.bandwidth = float(
regulator_data["forwardbandwidth"]
)
else:
api_regulator.bandwidth = float(
regulator_data["bandwidth"]
) # For old CYME versions
except:
pass
for w in range(2):
# Instanciate a Winding DiTTo object
try:
api_winding = Winding(model)
except:
raise ValueError("Unable to instanciate Winding DiTTo object.")
# Set the rated power
try:
api_winding.rated_power = (
float(regulator_data["kva"]) * 10 ** 3
) # DiTTo in volt ampere
except:
pass
# Set the connection type
try:
api_winding.connection_type = self.connection_configuration_mapping(
settings["conn"]
)
except:
pass
# Set the nominal voltage
try:
api_winding.nominal_voltage = float(regulator_data["kvln"])
except:
pass
# Instanciate a PhaseWinding DiTTo object
try:
api_phase_winding = PhaseWinding(model)
except:
raise ValueError("Unable to instanciate PhaseWinding object.")
# Set the phase
try:
api_phase_winding.phase = p
except:
pass
# Append the phaseWinding object to the winding
api_winding.phase_windings.append(api_phase_winding)
# api_transformer.windings.append(api_winding)
# Add the winding object to the regulator
api_regulator.windings.append(api_winding)
self._regulators.append(api_regulator)
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_regulator)
return 1
def parse_network_equivalent(self, model):
"""Parse Network equivalent objects from CYME into DiTTo.
Network Equivalent objects are elements which have been reduced by cyme (using its network reduction capabilities)
They encapsulate multiple loads, lines and transformers into one equivalent object
This function creates load and line objects to represent the network equivalent objects from CYME
"""
self._network_equivalents = {}
mapp_section = {"sectionid": 0, "fromnodeid": 1, "tonodeid": 2, "phase": 3}
mapp_network_equivalents = {
"sectionid":0,
"devicenumber": 2,
"coordx": 6,
"coordy": 7,
'zraa':9,
'zrab':10,
'zrac':11,
'zrba': 12,
'zrbb':13,
'zrbc':14,
'zrca':15,
'zrcb':16,
'zrcc':17,
'zxaa':18,
'zxab':19,
'zxac':20,
'zxba':21,
'zxbb': 22,
'zxbc': 23,
'zxca':24,
'zxcb':25,
'zxcc':26,
'loadfromkwa': 27,
'loadfromkwb': 28,
'loadfromkwc':29,
'loadfromkvara': 30,
'loadfromkvarb':31,
'loadfromkvarc':32,
'loadtokwa':33,
'loadtokwb': 34,
'loadtokwc':35,
'loadtokvara':36,
'loadtokvarb':37,
'loadtokvarc':38,
'totallengtha':39,
'totallengthb': 40,
'totallengthc': 41,
}
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# NETWORK EQUIVALENTS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["network_equivalent_setting"],
['sectionid', 'devicenumber', 'coordx', 'coordy', 'zraa', 'zrab', 'zrac', 'zrba', 'zrbb', 'zrbc', 'zrca', 'zrcb', 'zrcc', 'zxaa', 'zxab', 'zxac', 'zxba', 'zxbb', 'zxbc', 'zxca', 'zxcb', 'zxcc', 'loadfromkwa', 'loadfromkwb', 'loadfromkwc', 'loadfromkvara', 'loadfromkvarb', 'loadfromkvarc', 'loadtokwa', 'loadtokwb', 'loadtokwc', 'loadtokvara', 'loadtokvarb', 'loadtokvarc', 'totallengtha', 'totallengthb', 'totallengthc'],
mapp_network_equivalents,
{"type": "network_equivalent"},
),
)
#########################################
# #
# SECTIONS. #
# #
#########################################
#
self.settings = self.update_dict(
self.settings,
self.parser_helper(
line,
["section"],
["sectionid", "fromnodeid", "tonodeid", "phase"],
mapp_section,
),
)
for sectionID, settings in self.settings.items():
sectionID = sectionID.strip("*").lower()
if 'type' in settings and settings['type'] == 'network_equivalent':
#### Create Line for Network equivalent ###
api_line = Line(model)
api_line.from_element = self.section_phase_mapping[sectionID][ "fromnodeid" ]
api_line.to_element = self.section_phase_mapping[sectionID][ "tonodeid" ]
api_line.name = sectionID+'_ne_line'
# Set the connection index for the from_element (info is in the section)
try:
api_line.from_element_connection_index = int( self.section_phase_mapping[sectionID]["fromnodeindex"])
except:
pass
# Set the connection index for the from_element (info is in the section)
try:
api_line.to_element_connection_index = int( self.section_phase_mapping[sectionID]["tonodeindex"])
except:
pass
api_line.feeder_name = self.section_feeder_mapping[sectionID]
# Set the position
try:
position = Position(model)
position.long = float(settings["coordx"])
position.lat = float(settings["coordy"])
position.elevation = 0
api_line.position = position
except:
pass
# Set the line type
api_line.is_switch = False
api_line.is_fuse = False
api_line.is_recloser = False
api_line.is_breaker = False
api_line.is_sectionalizer = False
api_line.is_network_protector = False
if "eqid" in settings:
api_line.nameclass = settings["eqid"]
api_line.line_type = 'overhead' #set all network equivalent lines to be overhead by default.
phases = []
if float(settings['zraa']) !=0:
phases.append('A')
if float(settings['zrbb']) !=0:
phases.append('B')
if float(settings['zrcc']) !=0:
phases.append('C')
api_line.wires = []
for phase in phases:
api_wire = Wire(model)
api_wire.phase = phase
api_line.wires.append(api_wire)
impedance_matrix = []
for phase1 in phases:
impedance_matrix_inner = []
for phase2 in phases:
resistance_name = 'zr'+phase1.lower() +phase2.lower()
reactance_name = 'zx'+phase1.lower() +phase2.lower()
impedance_element = complex(float(settings[resistance_name]),float(settings[reactance_name]))
impedance_matrix_inner.append(impedance_element)
impedance_matrix.append(impedance_matrix_inner)
api_line.impedance_matrix = impedance_matrix
max_distance = max(float(settings['totallengtha']), float(settings['totallengthb']),float(settings['totallengthc']))
api_line.length = max_distance # outputs in SI units
### Create load at from node
total_load_from = 0
total_load_from_kvar = 0
for ph in phases:
total_load_from += float(settings['loadfromkw'+ph.lower()])
total_load_from_kvar += float(settings['loadfromkvar'+ph.lower()])
if total_load_from > 0 and total_load_from_kvar > 0:
api_load_from = Load(model)
api_load_from.model=1
api_load_from.connecting_element = self.section_phase_mapping[sectionID][ "fromnodeid" ]
api_load_from.name = sectionID+'_ne_from_load'
# Set the connection index for the from_element (info is in the section)
api_load_from.phase_loads = []
api_load_from.feeder_name = self.section_feeder_mapping[sectionID]
for ph in phases:
try:
api_phase_load_from = PhaseLoad(model)
except:
raise ValueError(
"Unable to instanciate PhaseLoad DiTTo object."
)
try:
api_phase_load_from.phase = ph
except:
pass
try:
api_phase_load_from.p, api_phase_load_from.q = (
10 ** 3 * float(settings['loadfromkw'+ph.lower()]),
10 ** 3 * float(settings['loadfromkvar'+ph.lower()]),
)
except:
pass
# TODO : use load_type_data
api_phase_load_from.ppercentcurrent = 0
api_phase_load_from.qpercentcurrent = 0
api_phase_load_from.ppercentpower = 1
api_phase_load_from.qpercentpower = 1
api_phase_load_from.ppercentimpedance = 0
api_phase_load_from.qpercentimpedance = 0
api_load_from.phase_loads.append(api_phase_load_from)
### Create load at from node
total_load_to = 0
total_load_to_kvar = 0
for ph in phases:
total_load_to += float(settings['loadtokw'+ph.lower()])
total_load_to_kvar += float(settings['loadfromkvar'+ph.lower()])
if total_load_to > 0 and total_load_to_kvar > 0:
api_load_to = Load(model)
api_load_to.model=1
api_load_to.connecting_element = self.section_phase_mapping[sectionID][ "tonodeid" ]
api_load_to.name = sectionID+'_ne_to_load'
# Set the connection index for the from_element (info is in the section)
api_load_to.phase_loads = []
api_load_to.feeder_name = self.section_feeder_mapping[sectionID]
for ph in phases:
try:
api_phase_load_to = PhaseLoad(model)
except:
raise ValueError(
"Unable to instanciate PhaseLoad DiTTo object."
)
try:
api_phase_load_to.phase = ph
except:
pass
try:
api_phase_load_to.p, api_phase_load_to.q = (
10 ** 3 * float(settings['loadtokw'+ph.lower()]),
10 ** 3 * float(settings['loadtokvar'+ph.lower()]),
)
except:
pass
# TODO : use load_type_data
api_phase_load_to.ppercentcurrent = 0
api_phase_load_to.qpercentcurrent = 0
api_phase_load_to.ppercentpower = 1
api_phase_load_to.qpercentpower = 1
api_phase_load_to.ppercentimpedance = 0
api_phase_load_to.qpercentimpedance = 0
api_load_to.phase_loads.append(api_phase_load_to)
def parse_loads(self, model):
"""Parse the loads from CYME to DiTTo."""
# Instanciate the list in which we store the DiTTo load objects
self._loads = {}
mapp_loads = {"sectionid": 0, "devicenumber": 1, "loadtype": 4, "connection": 5}
mapp_customer_loads = {
"sectionid": 0,
"devicenumber": 1,
"loadtype": 2,
"customernumber": 3,
"customertype": 4,
"loadmodelid": 8,
"valuetype": 11,
"loadphase": 12,
"value1": 13,
"value2": 14,
"connectedkva": 15,
"numberofcustomer": 17,
}
mapp_customer_class = {
"id": 0,
"constantpower": 4,
"constantcurrent": 5,
"constantimpedance": 6,
"powerfactor": 8,
"constantimpedancezp": 17,
"constantimpedancezq": 18,
"constantcurrentip": 19,
"constantcurrentiq": 20,
"constantpowerpp": 21,
"constantpowerpq": 22,
}
self.loads = {}
self.customer_loads = {}
self.customer_class = {}
#####################################################
# #
# LOAD FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("load")
# Loop over the load file
for line in self.content:
#########################################
# #
# LOADS #
# #
#########################################
#
self.loads.update(
self.parser_helper(
line,
["loads"],
["sectionid", "devicenumber", "loadtype", "connection"],
mapp_loads,
)
)
#########################################
# #
# CUSTOMER LOADS #
# #
#########################################
#
self.customer_loads.update(
self.parser_helper(
line,
["customer_loads"],
[
"sectionid",
"devicenumber",
"loadtype",
"customernumber",
"customertype",
"loadmodelid",
"valuetype",
"loadphase",
"value1",
"value2",
"connectedkva",
"numberofcustomer",
],
mapp_customer_loads,
)
)
#########################################
# #
# CUSTOMER CLASS #
# #
#########################################
#
self.customer_class.update(
self.parser_helper(
line,
["customer_class"],
[
"id",
"constantpower",
"constantcurrent",
"constantimpedance",
"powerfactor",
"constantimpedancezp",
"constantimpedancezq",
"constantcurrentip",
"constantcurrentiq",
"constantpowerpp",
"constantpowerpq",
],
mapp_customer_class,
)
)
duplicate_loads = set()
for sectionID in self.customer_loads.keys():
if sectionID.endswith("*"):
duplicate_loads.add(sectionID.lower().strip("*"))
for sectionID, settings in self.customer_loads.items():
sectionID = sectionID.strip("*").lower()
if sectionID in self.loads:
load_data = self.loads[sectionID]
else:
load_data = {}
if "connectedkva" in settings:
connectedkva = float(settings["connectedkva"])
else:
connectedkva = None
if "valuetype" in settings:
value_type = int(settings["valuetype"])
if "value1" in settings and "value2" in settings:
if (
float(settings["value1"]) == 0.0
and float(settings["value2"]) == 0.0
):
p = 0
q = 0
elif value_type == 0: # P and Q are given
try:
p, q = float(settings["value1"]), float(settings["value2"])
except:
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
elif value_type == 1: # KVA and PF are given
try:
kva, PF = (
float(settings["value1"]),
float(settings["value2"]) * 0.01,
)
if kva == 0 and "connectedkva" in settings:
kva = float(settings["connectedkva"])
p = kva * PF
q = math.sqrt(kva ** 2 - p ** 2)
except:
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
elif value_type == 2: # P and PF are given
try:
p, PF = float(settings["value1"]), float(settings["value2"])
if 0 <= PF <= 1:
q = p * math.sqrt((1 - PF ** 2) / PF ** 2)
elif 1 < PF <= 100:
PF /= 100.0
q = p * math.sqrt((1 - PF ** 2) / PF ** 2)
else:
logger.warning("problem with PF")
logger.warning(PF)
except:
logger.warning("Skipping load on section {}".format(sectionID))
continue
elif value_type == 3: # AMP and PF are given
# TODO
logger.warning(
"WARNING:: Skipping load on section {}".format(sectionID)
)
continue
if p >= 0 or q >= 0:
if "loadphase" in settings:
phases = settings["loadphase"]
else:
phases = []
fused = False
if sectionID in duplicate_loads:
fusion = True
if sectionID in self._loads:
api_load = self._loads[sectionID]
fused = True
elif p != 0:
api_load = Load(model)
else:
fusion = False
api_load = Load(model)
if fusion and p == 0:
# logger.warning(
# "WARNING:: Skipping duplicate load on section {} with p=0".format(sectionID)
# )
continue
try:
if fusion and sectionID in self._loads:
api_load.name += "_" + reduce(
lambda x, y: x + "_" + y, phases
)
else:
api_load.name = (
"Load_"
+ sectionID
+ "_"
+ reduce(lambda x, y: x + "_" + y, phases)
)
except:
pass
try:
if not (fusion and sectionID in self._loads):
if connectedkva is not None:
api_load.transformer_connected_kva = (
connectedkva * 10 ** 3
) # DiTTo in var
elif connectedkva is not None:
if api_load.transformer_connected_kva is None:
api_load.transformer_connected_kva = (
connectedkva * 10 ** 3
) # DiTTo in var
else:
api_load.transformer_connected_kva += (
connectedkva * 10 ** 3
) # DiTTo in var
except:
pass
try:
if not (fusion and sectionID in self._loads):
api_load.connection_type = self.connection_configuration_mapping(
load_data["connection"]
)
except:
pass
if not (fusion and sectionID in self._loads):
if (
"loadtype" in settings
and settings["loadtype"] in self.customer_class
):
load_type_data = self.customer_class[settings["loadtype"]]
else:
load_type_data = {}
try:
if not (fusion and sectionID in self._loads):
api_load.connecting_element = self.section_phase_mapping[
sectionID
]["fromnodeid"]
except:
pass
api_load.feeder_name = self.section_feeder_mapping[sectionID]
api_load.num_users = float(settings["numberofcustomer"])
for ph in phases:
try:
api_phase_load = PhaseLoad(model)
except:
raise ValueError(
"Unable to instanciate PhaseLoad DiTTo object."
)
try:
api_phase_load.phase = ph
except:
pass
try:
api_phase_load.p, api_phase_load.q = (
10 ** 3 * p / len(phases),
10 ** 3 * q / len(phases),
)
except:
pass
# ZIP load parameters
try:
api_phase_load.ppercentcurrent = (
float(load_type_data["constantcurrentip"]) / 100.0
)
api_phase_load.qpercentcurrent = (
float(load_type_data["constantcurrentiq"]) / 100.0
)
api_phase_load.ppercentpower = (
float(load_type_data["constantpowerpp"]) / 100.0
)
api_phase_load.qpercentpower = (
float(load_type_data["constantpowerpq"]) / 100.0
)
api_phase_load.ppercentimpedance = (
float(load_type_data["constantimpedancezp"]) / 100.0
)
api_phase_load.qpercentimpedance = (
float(load_type_data["constantimpedancezq"]) / 100.0
)
# api_phase_load.use_zip=1
# api_phase_load.model=8
except:
pass
# CYME store phase loads with P=0 and Q=0.
# Do not add them to DiTTo (otherwise it will make the validation
# on the number of objects fail since we will have many more loads than there actually are...)
# if api_phase_load.p!=0 or api_phase_load.q!=0:
api_load.phase_loads.append(api_phase_load)
self._loads[sectionID] = api_load
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
if not fused: #Because mutiple loads on different phases are joined into a single one
self.section_duplicates[sectionID].append(api_load)
return 1
def parse_dg(self, model):
""" Parse the Distributed Generation from CYME to DiTTo. May be respresented as ECGs or PVs.
This reads the objets [CONVERTER], [CONVERTER CONTROL SETTING], [LONG TERM DYNAMICS CURVE EXT] [DGGENERATIONMODEL] and in the case when PV is included [PHOTOVOLTAIC SETTINGS]"""
self._dgs = []
self.converter = {}
self.converter_settings = {}
self.long_term_dynamics = {}
self.photovoltaic_settings = {}
self.bess = {}
self.bess_settings = {}
self.dg_generation = {}
mapp_converter = {
"devicenumber": 0,
"devicetype": 1,
"converterrating": 2,
"activepowerrating": 3,
"reactivepowerrating": 4,
"minimumpowerfactor": 5,
"powerfalllimit": 23,
"powerriselimit": 24,
"risefallunit": 25,
}
mapp_converter_settings = {
"devicenumber": 0,
"devicetype": 1,
"controlindex": 2,
"timetriggerindex": 3,
"controltype": 4,
"fixedvarinjection": 5,
"injectionreference": 6,
"convertercontrolid": 7,
"powerreference": 8,
"powerfactor": 9,
}
mapp_photovoltaic_settings = {
"sectionid": 0,
"location": 1,
"devicenumber": 2,
"equipmentid": 6,
"eqphase": 7,
"ambienttemperature": 11,
}
mapp_bess = {
"id": 0,
"ratedstorageenergy": 1,
"maxchargingpower": 2,
"maxdischargingpower": 3,
"chargeefficiency": 4,
"dischargeefficiency": 5,
}
mapp_bess_settings = {
"sectionid": 0,
"devicenumber": 2,
"equipmentid": 6,
"phase": 7,
"maximumsoc": 10,
"minimumsoc": 11,
"initialsoc": 16,
}
mapp_bess = {
"id": 0,
"ratedstorageenergy": 1,
"maxchargingpower": 2,
"maxdischargingpower": 3,
"chargeefficiency": 4,
"dischargeefficiency": 5,
}
mapp_bess_settings = {
"sectionid": 0,
"devicenumber": 2,
"equipmentid": 6,
"phase": 7,
"maximumsoc": 10,
"minimumsoc": 11,
"initialsoc": 16,
}
mapp_long_term_dynamics = {
"devicenumber": 0,
"devicetype": 1,
"adjustmentsettings": 2,
"powercurvemodel": 3,
}
mapp_dg_generation_model = {
"devicenumber": 0,
"devicetype": 1,
"loadmodelname": 2,
"activegeneration": 3,
"powerfactor": 4,
}
#####################################################
# #
# NETWORK FILE #
# #
#####################################################
#
# Open the network file
self.get_file_content("network")
# Loop over the network file
for line in self.content:
#########################################
# #
# CONVERTER #
# #
#########################################
self.converter.update(
self.parser_helper(
line,
["converter"],
[
"devicenumber",
"devicetype",
"converterrating",
"activepowerrating",
"reactivepowerrating",
"minimumpowerfactor",
"powerfalllimit",
"powerriselimit",
"risefallunit",
],
mapp_converter,
{"type": "converter"},
)
)
#########################################
# #
# CONVERTER CONTROL SETTINGS #
# #
#########################################
self.converter_settings.update(
self.parser_helper(
line,
["converter_control_settings"],
[
"devicenumber",
"devicetype",
"controltype",
"fixedvarinjection",
"injectionreference",
"convertercontrolid",
"powerreference",
"powerfactor",
],
mapp_converter_settings,
{"type": "converter_settings"},
)
)
#########################################
# #
# PHOTOVOLTAIC SETTINGS #
# #
#########################################
self.photovoltaic_settings.update(
self.parser_helper(
line,
["photovoltaic_settings"],
["sectionid", "devicenumber", "eqphase", "ambienttemperature"],
mapp_photovoltaic_settings,
{"type": "photovoltaic_settings"},
)
)
#########################################
# #
# BESS SETTINGS #
# #
#########################################
self.bess_settings.update(
self.parser_helper(
line,
["bess_settings"],
[
"sectionid",
"devicenumber",
"equipmentid",
"phase",
"maximumsoc",
"minimumsoc",
"initialsoc",
],
mapp_bess_settings,
{"type": "bess_settings"},
)
)
#########################################
# #
# LONG TERM DYNAMICS CURVE EXT #
# #
#########################################
self.long_term_dynamics.update(
self.parser_helper(
line,
["long_term_dynamics_curve_ext"],
[
"devicenumber",
"devicetype",
"adjustmentsettings",
"powercurvemodel",
],
mapp_long_term_dynamics,
{"type": "long_term_dynamics"},
)
)
#########################################
# #
# DGGENERATIONMODEL #
# #
#########################################
self.dg_generation.update(
self.parser_helper(
line,
["dggenerationmodel"],
[
"devicenumber",
"devicetype",
"activegeneration",
"powerfactor",
"loadmodelname",
],
mapp_dg_generation_model,
{"type": "dg_generation_model"},
)
)
#####################################################
# #
# EQUIPMENT FILE #
# #
#####################################################
#
# Open the equipment file
self.get_file_content("equipment")
# Loop over the equipment file
for line in self.content:
#########################################
# #
# BESS #
# #
#########################################
#
self.bess.update(
self.parser_helper(
line,
["bess"],
[
"id",
"ratedstorageenergy",
"maxchargingpower",
"maxdischargingpower",
"chargeefficiency",
"dischargeefficiency",
],
mapp_bess,
)
)
api_photovoltaics = {}
api_bessi = {}
for sectionID, settings in self.photovoltaic_settings.items():
try:
api_photovoltaic = Photovoltaic(model)
except:
raise ValueError(
"Unable to instanciate photovoltaic {id}".format(id=sectionID)
)
try:
api_photovoltaic.name = "PV_" + settings["devicenumber"].lower()
api_photovoltaic.feeder_name = self.section_feeder_mapping[
sectionID.lower()
]
api_photovoltaics[settings["devicenumber"].lower()] = api_photovoltaic
except:
raise ValueError(
"Unable to set photovoltaic name for {id}".format(id=sectionID)
)
try:
api_photovoltaic.temperature = float(
settings["ambienttemperature"]
) # Not included in ECG SETTINGS
except:
pass
try:
api_photovoltaic.phases = [
Unicode(k) for k in list(settings["eqphase"])
]
except:
pass
try:
api_photovoltaic.connecting_element = self.section_phase_mapping[
sectionID.lower()
]["fromnodeid"]
except:
pass
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_photovoltaic)
for sectionID, settings in self.bess_settings.items():
try:
api_bess = Storage(model)
except:
raise ValueError("Unable to instanciate bess {id}".format(id=sectionID))
try:
api_bess.name = "BESS_" + settings["devicenumber"].lower()
api_bess.feeder_name = self.section_feeder_mapping[sectionID.lower()]
api_bessi[settings["devicenumber"].lower()] = api_bess
except:
raise ValueError(
"Unable to set bess name for {id}".format(id=sectionID)
)
phase_storages = []
if "phase" in settings:
phases = self.phase_mapping(settings["phase"])
else:
phases = ["A", "B", "C"]
for phase in phases:
phase_storage = PhaseStorage(model)
phase_storage.phase = phase
phase_storages.append(phase_storage)
api_bess.phase_storages = phase_storages
if "equipmentid" in settings:
dev_num = settings["equipmentid"]
else:
dev_num = None
if dev_num is not None and dev_num in self.bess:
bess_data = self.bess[dev_num]
try:
api_bess.rated_kWh = float(bess_data["ratedstorageenergy"])
except:
pass
try:
api_bess.chargeefficiency = float(bess_data["chargingefficiency"])
except:
pass
try:
api_bess.dischargeefficiency = float(
bess_data["dischargeefficiency"]
)
except:
pass
try:
charging = float("inf")
discharging = float("inf")
if "maxchargingpower" in bess_data:
charging = float(bess_data["maxchargingpower"])
if "maxdischargingpower" in bess_data:
discharging = float(bess_data["maxdischargingpower"])
power = min(charging, discharging) * 1000
if power < float("inf"):
average_power = power / float(len(phase_storages))
for ps in phase_storages:
ps.p = average_power
except:
pass
try:
api_bess.reserve = float(settings["maximumsoc"])
except:
pass
try:
api_bess.stored_kWh = (
float(settings["initialsoc"]) * api_bess.rated_kWh / 100.0
)
except:
pass
try:
api_bess.connecting_element = self.section_phase_mapping[
sectionID.lower()
]["fromnodeid"]
except:
pass
if not sectionID in self.section_duplicates:
self.section_duplicates[sectionID] = []
self.section_duplicates[sectionID].append(api_bess)
for deviceID, settings in self.dg_generation.items():
deviceID = deviceID.strip(
"*"
).lower() # TODO: Deal with multiple configurations for the same location
api_photovoltaic = api_photovoltaics[deviceID]
# Use the default setting if available
if (
"loadmodelname" in settings
and settings["loadmodelname"].lower() == "default"
):
try:
api_photovoltaic.active_rating = (
float(settings["activegeneration"]) * 1000
)
except:
pass
try:
api_photovoltaic.power_factor = (
float(settings["powerfactor"]) / 100.0
)
except:
pass
for deviceID, settings in self.converter.items():
deviceID = deviceID.strip(
"*"
).lower() # TODO: Deal with multiple configurations for the same location
if deviceID in api_photovoltaics:
api_photovoltaic = api_photovoltaics[deviceID]
try:
api_photovoltaic.rated_power = (
float(settings["activepowerrating"]) * 1000
)
except:
pass
try:
api_photovoltaic.reactive_rating = (
float(settings["reactivepowerrating"]) * 1000
)
except:
pass
try:
api_photovoltaic.min_powerfactor = (
float(settings["minimumpowerfactor"]) / 100.0
)
except:
pass
try:
api_photovoltaic.fall_limit = float(settings["powerfalllimit"])
except:
pass
try:
api_photovoltaic.rise_limit = float(settings["powerriselimit"])
except:
pass
# TODO: check the units being used
elif deviceID in api_bessi:
api_bess = api_bessi[deviceID]
try:
api_bess.rated_power = float(settings["activepowerrating"]) * 1000
except:
pass
try:
api_bess.reactive_rating = (
float(settings["reactivepowerrating"]) * 1000
)
except:
pass
try:
api_bess.min_powerfactor = (
float(settings["minimumpowerfactor"]) / 100.0
)
except:
pass
for deviceID, settings in self.converter_settings.items():
deviceID = deviceID.strip(
"*"
).lower() # TODO: Deal with multiple configurations for the same location
if deviceID in api_photovoltaics:
api_photovoltaic = api_photovoltaics[deviceID]
try:
control_type = str(settings["controltype"])
if control_type == "1":
api_photovoltaic.control_type = "voltvar_vars_over_watts"
if control_type == "0":
api_photovoltaic.control_type = "voltvar_watts_over_vars"
if control_type == "2":
api_photovoltaic.control_type = "voltvar_fixedvars"
if control_type == "3":
api_photovoltaic.control_type = "voltvar_novars"
if control_type == "5":
api_photovoltaic.control_type = "voltwatt"
if control_type == "6":
api_photovoltaic.control_type = "watt_powerfactor"
if control_type == "10":
api_photovoltaic.control_type = "powerfactor"
except:
pass
try:
api_photovoltaic.var_injection = float(
settings["fixedvarinjection"]
)
except:
pass
try:
curve = float(settings["convertercontrolid"])
if (
api_photovoltaic.control_type == "voltvar_watts_over_vars"
or api_photovoltaic.control_type == "voltvar_vars_over_watts"
):
api_photovoltaic.voltvar_curve = curve
if api_photovoltaic.control_type == "voltwatt":
api_photovoltaic.voltwatt_curve = curve
if api_photovoltaic.control_type == "watt_powerfactor":
api_photovoltaic.watt_powerfactor_curve = curve
except:
pass
try:
pf = float(settings["powerfactor"]) / 100.0
api_photovoltaic.power_factor = pf
except:
pass
def fix_section_overlaps(self, model, **kwargs):
"""
Some sections will have multiple components included in them (e.g. a line, transformer and capacitor).
This function identifies the sections that have multiple components in them and creates intermediate nodes between them
so that they are not connected in parallel
Place components in series:
Regulator -> Transformer -> Line -> (Loads, PV, BESS, Capacitors)
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set the verbose mode. Optional. Default=True
:type verbose: bool
"""
model.set_names()
multiple_elements = {}
for i,j in self.section_duplicates.items():
if len(j)>1:
multiple_elements[i] = j
for sectionID in multiple_elements:
connectors = []
regulators = []
transformers = []
lines = [] #Warning - if multiple lines are used the names will be the same
loads = []
pvs = []
bess = []
capacitors = []
from_element = None
to_element = None
connector_count = 0
for element in multiple_elements[sectionID]:
if isinstance(element,Regulator):
regulators.append(element)
from_element = element.from_element
to_element = element.to_element
connector_count+=1
if isinstance(element,PowerTransformer):
transformers.append(element)
from_element = element.from_element
to_element = element.to_element
connector_count+=1
if isinstance(element,Line):
lines.append(element)
from_element = element.from_element
to_element = element.to_element
connector_count+=1
if isinstance(element,Load):
loads.append(element)
if isinstance(element,Storage):
bess.append(element)
if isinstance(element,Capacitor):
capacitors.append(element)
connectors = [regulators,transformers,lines]
non_connectors = [loads,bess,pvs,capacitors]
if from_element is None or to_element is None: # i.e. just loads, pvs and caps so no problem
continue
original_from_element = from_element
original_from_node = model[from_element]
intermediate_count = 0
for connector in connectors:
for element in connector:
if from_element != original_from_element:
element.from_element = from_element
# Regulators go between the same two nodes
if isinstance(element,Regulator):
from_element = original_from_element+'_reg'
else:
from_element = original_from_element+'_sec_'+str(intermediate_count)
intermediate_count +=1
if intermediate_count != connector_count:
element.to_element = from_element
api_node = Node(model)
api_node.name = from_element
if original_from_node.positions is not None:
api_positions = []
for position in original_from_node.positions:
api_position = Position(model)
api_position.long = position.long
api_position.lat = position.lat
api_positions.append(api_position)
api_node.positions = api_positions #set the positions to be the same as in the original
# Assumes we have had at least one connecting element added
# Connect these all to the final to-node
for non_connector in non_connectors:
for element in non_connector:
element.connecting_element = to_element
| [
"logging.getLogger",
"numpy.sqrt",
"ditto.models.wire.Wire",
"ditto.models.feeder_metadata.Feeder_metadata",
"ditto.models.base.Unicode",
"ditto.models.photovoltaic.Photovoltaic",
"math.sqrt",
"ditto.models.winding.Winding",
"ditto.models.storage.Storage",
"ditto.modify.system_structure.system_str... | [((1152, 1179), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1169, 1179), False, 'import logging\n'), ((35145, 35200), 'numpy.any', 'np.any', (['[(x in line) for x in self.header_mapping[obj]]'], {}), '([(x in line) for x in self.header_mapping[obj]])\n', (35151, 35200), True, 'import numpy as np\n'), ((41728, 41760), 'ditto.modify.system_structure.system_structure_modifier', 'system_structure_modifier', (['model'], {}), '(model)\n', (41753, 41760), False, 'from ditto.modify.system_structure import system_structure_modifier\n'), ((61675, 61686), 'ditto.models.wire.Wire', 'Wire', (['model'], {}), '(model)\n', (61679, 61686), False, 'from ditto.models.wire import Wire\n'), ((19336, 19394), 'os.path.join', 'os.path.join', (['self.data_folder_path', 'self.network_filename'], {}), '(self.data_folder_path, self.network_filename)\n', (19348, 19394), False, 'import os\n'), ((35794, 35818), 'numpy.array', 'np.array', (['attribute_list'], {}), '(attribute_list)\n', (35802, 35818), True, 'import numpy as np\n'), ((45370, 45392), 'ditto.models.feeder_metadata.Feeder_metadata', 'Feeder_metadata', (['model'], {}), '(model)\n', (45385, 45392), False, 'from ditto.models.feeder_metadata import Feeder_metadata\n'), ((59290, 59301), 'ditto.models.node.Node', 'Node', (['model'], {}), '(model)\n', (59294, 59301), False, 'from ditto.models.node import Node\n'), ((157670, 157681), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (157674, 157681), False, 'from ditto.models.line import Line\n'), ((19456, 19516), 'os.path.join', 'os.path.join', (['self.data_folder_path', 'self.equipment_filename'], {}), '(self.data_folder_path, self.equipment_filename)\n', (19468, 19516), False, 'import os\n'), ((97561, 97576), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (97569, 97576), False, 'from ditto.models.position import Position\n'), ((162822, 162838), 'ditto.models.capacitor.Capacitor', 'Capacitor', (['model'], {}), '(model)\n', (162831, 162838), False, 'from ditto.models.capacitor import Capacitor\n'), ((164097, 164112), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (164105, 164112), False, 'from ditto.models.position import Position\n'), ((185729, 185752), 'ditto.models.powertransformer.PowerTransformer', 'PowerTransformer', (['model'], {}), '(model)\n', (185745, 185752), False, 'from ditto.models.powertransformer import PowerTransformer\n'), ((187437, 187452), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (187445, 187452), False, 'from ditto.models.position import Position\n'), ((192848, 192912), 'numpy.array', 'np.array', (['[[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]]'], {}), '([[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]])\n', (192856, 192912), True, 'import numpy as np\n'), ((193027, 193088), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [1.0, a * a, a], [1.0, a, a * a]]'], {}), '([[1.0, 1.0, 1.0], [1.0, a * a, a], [1.0, a, a * a]])\n', (193035, 193088), True, 'import numpy as np\n'), ((193113, 193129), 'numpy.linalg.inv', 'np.linalg.inv', (['T'], {}), '(T)\n', (193126, 193129), True, 'import numpy as np\n'), ((217114, 217125), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (217118, 217125), False, 'from ditto.models.line import Line\n'), ((248330, 248349), 'ditto.models.photovoltaic.Photovoltaic', 'Photovoltaic', (['model'], {}), '(model)\n', (248342, 248349), False, 'from ditto.models.photovoltaic import Photovoltaic\n'), ((249883, 249897), 'ditto.models.storage.Storage', 'Storage', (['model'], {}), '(model)\n', (249890, 249897), False, 'from ditto.models.storage import Storage\n'), ((250661, 250680), 'ditto.models.phase_storage.PhaseStorage', 'PhaseStorage', (['model'], {}), '(model)\n', (250673, 250680), False, 'from ditto.models.phase_storage import PhaseStorage\n'), ((19573, 19628), 'os.path.join', 'os.path.join', (['self.data_folder_path', 'self.load_filename'], {}), '(self.data_folder_path, self.load_filename)\n', (19585, 19628), False, 'import os\n'), ((48939, 48957), 'ditto.models.power_source.PowerSource', 'PowerSource', (['model'], {}), '(model)\n', (48950, 48957), False, 'from ditto.models.power_source import PowerSource\n'), ((59543, 59558), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (59551, 59558), False, 'from ditto.models.position import Position\n'), ((167031, 167052), 'ditto.models.phase_capacitor.PhaseCapacitor', 'PhaseCapacitor', (['model'], {}), '(model)\n', (167045, 167052), False, 'from ditto.models.phase_capacitor import PhaseCapacitor\n'), ((192975, 193008), 'cmath.exp', 'cmath.exp', (['(2 * math.pi * 1.0j / 3)'], {}), '(2 * math.pi * 1.0j / 3)\n', (192984, 193008), False, 'import cmath\n'), ((207122, 207138), 'ditto.models.regulator.Regulator', 'Regulator', (['model'], {}), '(model)\n', (207131, 207138), False, 'from ditto.models.regulator import Regulator\n'), ((208077, 208092), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (208085, 208092), False, 'from ditto.models.position import Position\n'), ((218108, 218123), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (218116, 218123), False, 'from ditto.models.position import Position\n'), ((219304, 219315), 'ditto.models.wire.Wire', 'Wire', (['model'], {}), '(model)\n', (219308, 219315), False, 'from ditto.models.wire import Wire\n'), ((220687, 220698), 'ditto.models.load.Load', 'Load', (['model'], {}), '(model)\n', (220691, 220698), False, 'from ditto.models.load import Load\n'), ((222964, 222975), 'ditto.models.load.Load', 'Load', (['model'], {}), '(model)\n', (222968, 222975), False, 'from ditto.models.load import Load\n'), ((249274, 249284), 'ditto.models.base.Unicode', 'Unicode', (['k'], {}), '(k)\n', (249281, 249284), False, 'from ditto.models.base import Unicode\n'), ((24669, 24693), 'numpy.sqrt', 'np.sqrt', (['(1 - value2 ** 2)'], {}), '(1 - value2 ** 2)\n', (24676, 24693), True, 'import numpy as np\n'), ((24808, 24832), 'numpy.sqrt', 'np.sqrt', (['(1 - value2 ** 2)'], {}), '(1 - value2 ** 2)\n', (24815, 24832), True, 'import numpy as np\n'), ((52288, 52306), 'ditto.models.power_source.PowerSource', 'PowerSource', (['model'], {}), '(model)\n', (52299, 52306), False, 'from ditto.models.power_source import PowerSource\n'), ((59889, 59904), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (59897, 59904), False, 'from ditto.models.position import Position\n'), ((60714, 60729), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (60722, 60729), False, 'from ditto.models.position import Position\n'), ((188125, 188139), 'ditto.models.winding.Winding', 'Winding', (['model'], {}), '(model)\n', (188132, 188139), False, 'from ditto.models.winding import Winding\n'), ((192434, 192456), 'math.sqrt', 'math.sqrt', (['(1 + XR * XR)'], {}), '(1 + XR * XR)\n', (192443, 192456), False, 'import math\n'), ((192487, 192515), 'math.sqrt', 'math.sqrt', (['(1 + 1 / (XR * XR))'], {}), '(1 + 1 / (XR * XR))\n', (192496, 192515), False, 'import math\n'), ((192651, 192675), 'math.sqrt', 'math.sqrt', (['(1 + XR0 * XR0)'], {}), '(1 + XR0 * XR0)\n', (192660, 192675), False, 'import math\n'), ((192706, 192736), 'math.sqrt', 'math.sqrt', (['(1 + 1 / (XR0 * XR0))'], {}), '(1 + 1 / (XR0 * XR0))\n', (192715, 192736), False, 'import math\n'), ((193552, 193568), 'ditto.models.regulator.Regulator', 'Regulator', (['model'], {}), '(model)\n', (193561, 193568), False, 'from ditto.models.regulator import Regulator\n'), ((196004, 196018), 'ditto.models.winding.Winding', 'Winding', (['model'], {}), '(model)\n', (196011, 196018), False, 'from ditto.models.winding import Winding\n'), ((199255, 199269), 'ditto.models.winding.Winding', 'Winding', (['model'], {}), '(model)\n', (199262, 199269), False, 'from ditto.models.winding import Winding\n'), ((211298, 211312), 'ditto.models.winding.Winding', 'Winding', (['model'], {}), '(model)\n', (211305, 211312), False, 'from ditto.models.winding import Winding\n'), ((212361, 212380), 'ditto.models.phase_winding.PhaseWinding', 'PhaseWinding', (['model'], {}), '(model)\n', (212373, 212380), False, 'from ditto.models.phase_winding import PhaseWinding\n'), ((232733, 232744), 'ditto.models.load.Load', 'Load', (['model'], {}), '(model)\n', (232737, 232744), False, 'from ditto.models.load import Load\n'), ((261654, 261665), 'ditto.models.node.Node', 'Node', (['model'], {}), '(model)\n', (261658, 261665), False, 'from ditto.models.node import Node\n'), ((25173, 25197), 'numpy.sqrt', 'np.sqrt', (['(1 - value2 ** 2)'], {}), '(1 - value2 ** 2)\n', (25180, 25197), True, 'import numpy as np\n'), ((25278, 25302), 'numpy.sqrt', 'np.sqrt', (['(1 - value2 ** 2)'], {}), '(1 - value2 ** 2)\n', (25285, 25302), True, 'import numpy as np\n'), ((101745, 101756), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (101749, 101756), False, 'from ditto.models.line import Line\n'), ((166224, 166236), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (166233, 166236), False, 'import math\n'), ((190185, 190204), 'ditto.models.phase_winding.PhaseWinding', 'PhaseWinding', (['model'], {}), '(model)\n', (190197, 190204), False, 'from ditto.models.phase_winding import PhaseWinding\n'), ((197975, 197994), 'ditto.models.phase_winding.PhaseWinding', 'PhaseWinding', (['model'], {}), '(model)\n', (197987, 197994), False, 'from ditto.models.phase_winding import PhaseWinding\n'), ((201076, 201095), 'ditto.models.phase_winding.PhaseWinding', 'PhaseWinding', (['model'], {}), '(model)\n', (201088, 201095), False, 'from ditto.models.phase_winding import PhaseWinding\n'), ((221287, 221303), 'ditto.models.phase_load.PhaseLoad', 'PhaseLoad', (['model'], {}), '(model)\n', (221296, 221303), False, 'from ditto.models.phase_load import PhaseLoad\n'), ((223553, 223569), 'ditto.models.phase_load.PhaseLoad', 'PhaseLoad', (['model'], {}), '(model)\n', (223562, 223569), False, 'from ditto.models.phase_load import PhaseLoad\n'), ((235863, 235879), 'ditto.models.phase_load.PhaseLoad', 'PhaseLoad', (['model'], {}), '(model)\n', (235872, 235879), False, 'from ditto.models.phase_load import PhaseLoad\n'), ((37474, 37508), 'numpy.argwhere', 'np.argwhere', (['(arg == attribute_list)'], {}), '(arg == attribute_list)\n', (37485, 37508), True, 'import numpy as np\n'), ((39220, 39251), 'numpy.array', 'np.array', (['additional_attributes'], {}), '(additional_attributes)\n', (39228, 39251), True, 'import numpy as np\n'), ((60366, 60381), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (60374, 60381), False, 'from ditto.models.position import Position\n'), ((105360, 105371), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (105364, 105371), False, 'from ditto.models.line import Line\n'), ((146622, 146647), 'numpy.array', 'np.array', (['distance_matrix'], {}), '(distance_matrix)\n', (146630, 146647), True, 'import numpy as np\n'), ((152370, 152388), 'numpy.array', 'np.array', (['gmr_list'], {}), '(gmr_list)\n', (152378, 152388), True, 'import numpy as np\n'), ((152431, 152456), 'numpy.array', 'np.array', (['resistance_list'], {}), '(resistance_list)\n', (152439, 152456), True, 'import numpy as np\n'), ((230831, 230859), 'math.sqrt', 'math.sqrt', (['(kva ** 2 - p ** 2)'], {}), '(kva ** 2 - p ** 2)\n', (230840, 230859), False, 'import math\n'), ((232621, 232632), 'ditto.models.load.Load', 'Load', (['model'], {}), '(model)\n', (232625, 232632), False, 'from ditto.models.load import Load\n'), ((233138, 233178), 'functools.reduce', 'reduce', (["(lambda x, y: x + '_' + y)", 'phases'], {}), "(lambda x, y: x + '_' + y, phases)\n", (233144, 233178), False, 'from functools import reduce\n'), ((233473, 233513), 'functools.reduce', 'reduce', (["(lambda x, y: x + '_' + y)", 'phases'], {}), "(lambda x, y: x + '_' + y, phases)\n", (233479, 233513), False, 'from functools import reduce\n'), ((261956, 261971), 'ditto.models.position.Position', 'Position', (['model'], {}), '(model)\n', (261964, 261971), False, 'from ditto.models.position import Position\n'), ((108805, 108816), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (108809, 108816), False, 'from ditto.models.line import Line\n'), ((39655, 39696), 'numpy.argwhere', 'np.argwhere', (['(arg == additional_attributes)'], {}), '(arg == additional_attributes)\n', (39666, 39696), True, 'import numpy as np\n'), ((112355, 112366), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (112359, 112366), False, 'from ditto.models.line import Line\n'), ((152497, 152526), 'numpy.argwhere', 'np.argwhere', (['(gmr_list == None)'], {}), '(gmr_list == None)\n', (152508, 152526), True, 'import numpy as np\n'), ((153312, 153327), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (153320, 153327), True, 'import numpy as np\n'), ((115838, 115849), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (115842, 115849), False, 'from ditto.models.line import Line\n'), ((231321, 231355), 'math.sqrt', 'math.sqrt', (['((1 - PF ** 2) / PF ** 2)'], {}), '((1 - PF ** 2) / PF ** 2)\n', (231330, 231355), False, 'import math\n'), ((119679, 119690), 'ditto.models.line.Line', 'Line', (['model'], {}), '(model)\n', (119683, 119690), False, 'from ditto.models.line import Line\n'), ((231476, 231510), 'math.sqrt', 'math.sqrt', (['((1 - PF ** 2) / PF ** 2)'], {}), '((1 - PF ** 2) / PF ** 2)\n', (231485, 231510), False, 'import math\n')] |
import math
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_hub as hub
import tensorflow_datasets as tfds
from tensorflow.keras import layers
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# %%
splits = ["train[:70%]", "train[70%:]"]
(training_set, validation_set), info = tfds.load(
"tf_flowers", with_info=True, as_supervised=True, split=splits
)
# %%
# some info about dataset
num_classes = info.features["label"].num_classes
num_training_examples = math.ceil(info.splits["train"].num_examples * 0.7)
num_validation_examples = math.floor(info.splits["train"].num_examples * 0.3)
print(f"Total number of classes: {num_classes}")
print(f"Total number of training images: {num_training_examples}")
print(f"Total number of validation images: {num_validation_examples}")
# %%
IMG_RES = 224
def format_image(image, label):
return tf.image.resize(image, (IMG_RES, IMG_RES)) / 255, label
BATCH_SIZE = 32
train_batches = (
training_set.shuffle(num_training_examples // 4)
.map(format_image)
.batch(BATCH_SIZE)
.prefetch(1)
)
validation_batches = validation_set.map(format_image).batch(BATCH_SIZE).prefetch(1)
# %%
# feauture extractor (efficientnet)
URL = "https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1"
feature_extractor = hub.KerasLayer(URL, input_shape=(IMG_RES, IMG_RES, 3))
feature_extractor.trainable = False # freeze
# attach to a classification head
model = tf.keras.Sequential([feature_extractor, layers.Dense(num_classes)])
model.summary()
# %%
# train model
EPOCHS = 3
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="adam",
metrics=["accuracy"],
)
# %%
history = model.fit(train_batches, epochs=EPOCHS, validation_data=validation_batches)
# %% plot some metrics
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs_range = range(EPOCHS)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label="Training Accuracy")
plt.plot(epochs_range, val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label="Training Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.savefig("flowers_tl_loss.png")
# %%
class_names = np.array(info.features["label"].names)
class_names
# %% make predictions on an image batch
image_batch, label_batch = iter(validation_batches).next()
predicted_batch = model.predict(image_batch)
predicted_batch = tf.squeeze(predicted_batch).numpy()
predicted_ids = np.argmax(predicted_batch, axis=-1)
predicted_class_names = class_names[predicted_ids]
# %%
lc = lambda x: class_names[x]
print(f"True labels: \n{lc(label_batch)}, \n\n Predcited labels: \n{lc(predicted_ids)}")
# %% plot model preds
fig = plt.figure(figsize=(25, 12))
for idx in range(BATCH_SIZE):
ax = fig.add_subplot(4, BATCH_SIZE / 4, idx + 1, xticks=[], yticks=[])
plt.imshow(np.transpose(image_batch[idx], (0, 1, 2)))
color = "green" if predicted_ids[idx] == label_batch[idx] else "red"
ax.set_title(
f"{predicted_class_names[idx].title(), class_names[label_batch[idx]]}",
color=color,
)
plt.savefig("flowers_tl.png")
# %% save model as pb, load model back as keras model
import time
t = int(time.time())
tf.saved_model.save(model, t)
# load
rm = tf.keras.models.load_model(t, custom_objects={"KerasLayer": hub.KerasLayer})
| [
"math.floor",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.plot",
"tensorflow_hub.KerasLayer",
"matplotlib.pyplot.savefig",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"numpy.argmax",
"tensorflow.saved_model.save",
"tensorf... | [((214, 229), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (227, 229), True, 'import tensorflow as tf\n'), ((347, 420), 'tensorflow_datasets.load', 'tfds.load', (['"""tf_flowers"""'], {'with_info': '(True)', 'as_supervised': '(True)', 'split': 'splits'}), "('tf_flowers', with_info=True, as_supervised=True, split=splits)\n", (356, 420), True, 'import tensorflow_datasets as tfds\n'), ((531, 581), 'math.ceil', 'math.ceil', (["(info.splits['train'].num_examples * 0.7)"], {}), "(info.splits['train'].num_examples * 0.7)\n", (540, 581), False, 'import math\n'), ((608, 659), 'math.floor', 'math.floor', (["(info.splits['train'].num_examples * 0.3)"], {}), "(info.splits['train'].num_examples * 0.3)\n", (618, 659), False, 'import math\n'), ((1337, 1391), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['URL'], {'input_shape': '(IMG_RES, IMG_RES, 3)'}), '(URL, input_shape=(IMG_RES, IMG_RES, 3))\n', (1351, 1391), True, 'import tensorflow_hub as hub\n'), ((2028, 2054), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2038, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2075), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2066, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2130), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (2084, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2131, 2191), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (2139, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2192, 2221), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2202, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2267), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (2231, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2289), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2280, 2289), True, 'import matplotlib.pyplot as plt\n'), ((2290, 2341), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (2298, 2341), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2399), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (2350, 2399), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2429), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2410, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2471), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (2439, 2471), True, 'import matplotlib.pyplot as plt\n'), ((2473, 2507), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""flowers_tl_loss.png"""'], {}), "('flowers_tl_loss.png')\n", (2484, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2565), 'numpy.array', 'np.array', (["info.features['label'].names"], {}), "(info.features['label'].names)\n", (2535, 2565), True, 'import numpy as np\n'), ((2794, 2829), 'numpy.argmax', 'np.argmax', (['predicted_batch'], {'axis': '(-1)'}), '(predicted_batch, axis=-1)\n', (2803, 2829), True, 'import numpy as np\n'), ((3034, 3062), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 12)'}), '(figsize=(25, 12))\n', (3044, 3062), True, 'import matplotlib.pyplot as plt\n'), ((3546, 3575), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['model', 't'], {}), '(model, t)\n', (3565, 3575), True, 'import tensorflow as tf\n'), ((3588, 3664), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['t'], {'custom_objects': "{'KerasLayer': hub.KerasLayer}"}), "(t, custom_objects={'KerasLayer': hub.KerasLayer})\n", (3614, 3664), True, 'import tensorflow as tf\n'), ((3428, 3457), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""flowers_tl.png"""'], {}), "('flowers_tl.png')\n", (3439, 3457), True, 'import matplotlib.pyplot as plt\n'), ((3533, 3544), 'time.time', 'time.time', ([], {}), '()\n', (3542, 3544), False, 'import time\n'), ((1521, 1546), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_classes'], {}), '(num_classes)\n', (1533, 1546), False, 'from tensorflow.keras import layers\n'), ((1620, 1683), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1665, 1683), True, 'import tensorflow as tf\n'), ((2741, 2768), 'tensorflow.squeeze', 'tf.squeeze', (['predicted_batch'], {}), '(predicted_batch)\n', (2751, 2768), True, 'import tensorflow as tf\n'), ((3183, 3224), 'numpy.transpose', 'np.transpose', (['image_batch[idx]', '(0, 1, 2)'], {}), '(image_batch[idx], (0, 1, 2))\n', (3195, 3224), True, 'import numpy as np\n'), ((912, 954), 'tensorflow.image.resize', 'tf.image.resize', (['image', '(IMG_RES, IMG_RES)'], {}), '(image, (IMG_RES, IMG_RES))\n', (927, 954), True, 'import tensorflow as tf\n')] |
import numpy as np
from sklearn.model_selection import train_test_split
from utilities_test import get_data, \
get_feature_vector_from_mfcc
_DATA_PATH = '../korean_dataset'
_CLASS_LABELS = ("angry", "disappoint", "fear", "neutral", "sad", "surrender")
def extract_data(flatten):
data, labels = get_data(_DATA_PATH, class_labels=_CLASS_LABELS,
flatten=flatten)
x_train, x_test, y_train, y_test = train_test_split(
data,
labels,
test_size=0.2,
random_state=42)
return np.array(x_train), np.array(x_test), np.array(y_train), np.array(
y_test), len(_CLASS_LABELS)
def get_feature_vector(file_path, flatten):
return get_feature_vector_from_mfcc(file_path, flatten, mfcc_len=39)
| [
"sklearn.model_selection.train_test_split",
"numpy.array",
"utilities_test.get_feature_vector_from_mfcc",
"utilities_test.get_data"
] | [((306, 371), 'utilities_test.get_data', 'get_data', (['_DATA_PATH'], {'class_labels': '_CLASS_LABELS', 'flatten': 'flatten'}), '(_DATA_PATH, class_labels=_CLASS_LABELS, flatten=flatten)\n', (314, 371), False, 'from utilities_test import get_data, get_feature_vector_from_mfcc\n'), ((439, 501), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, labels, test_size=0.2, random_state=42)\n', (455, 501), False, 'from sklearn.model_selection import train_test_split\n'), ((705, 766), 'utilities_test.get_feature_vector_from_mfcc', 'get_feature_vector_from_mfcc', (['file_path', 'flatten'], {'mfcc_len': '(39)'}), '(file_path, flatten, mfcc_len=39)\n', (733, 766), False, 'from utilities_test import get_data, get_feature_vector_from_mfcc\n'), ((546, 563), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (554, 563), True, 'import numpy as np\n'), ((565, 581), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (573, 581), True, 'import numpy as np\n'), ((583, 600), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (591, 600), True, 'import numpy as np\n'), ((602, 618), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (610, 618), True, 'import numpy as np\n')] |
import os
import pandas as p
import numpy as np
from PIL import ImageEnhance
from PIL import Image, ImageChops, ImageOps
import keras
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="2, 3"
train_labels = p.read_csv(os.path.join('/mnt/lab_data2/amr1/diabetic_retinopathy/trainLabels.csv'))
valid_ids = []
for line in open("valid_ids.txt"):
new_id = line.rstrip().split('.')[0]
valid_ids.append(new_id)
image_to_label = dict(zip(train_labels.image, train_labels.level))
train_ids = [patient_id for patient_id in image_to_label.keys()
if patient_id not in valid_ids]
def load_and_resize_image(image_path, output_shape,
zmuv_mean, zmuv_std,
transfo_params=None):
im = Image.open(image_path, mode='r')
sort_dim = list(np.sort(im.size))
dim_dst = [0, 0]
dim_dst[0] = sort_dim[1] / 700.0
dim_dst[1] = sort_dim[0] / 700.0
im_new = im
# Dict to keep track of random values.
chosen_values = {}
if transfo_params.get('extra_width_crop', False):
w, h = im_new.size
if w / float(h) >= 1.3:
cols_thres = np.where(
np.max(
np.max(
np.asarray(im_new),
axis=2),
axis=0) > 35)[0]
# Extra cond compared to orig crop.
if len(cols_thres) > output_shape[0] // 2:
min_x, max_x = cols_thres[0], cols_thres[-1]
else:
min_x, max_x = 0, -1
im_new = im_new.crop((min_x, 0,
max_x, h))
if transfo_params.get('crop_height', False):
w, h = im_new.size
if w > 1 and 0.98 <= h / float(w) <= 1.02:
# "Normal" without height crop, do height crop.
im_new = im_new.crop((0, int(0.05 * h),
w, int(0.95 * h)))
if transfo_params.get('crop', False) and not \
transfo_params.get('crop_after_rotation', False):
do_crop = transfo_params['crop_prob'] > np.random.rand()
chosen_values['do_crop'] = do_crop
if do_crop:
out_w, out_h = im_new.size
w_dev = int(transfo_params['crop_w'] * out_w)
h_dev = int(transfo_params['crop_h'] * out_h)
w0 = np.random.randint(0, w_dev + 1)
w1 = np.random.randint(0, w_dev + 1)
h0 = np.random.randint(0, h_dev + 1)
h1 = np.random.randint(0, h_dev + 1)
# Add params to dict.
chosen_values['w0'] = w0
chosen_values['w1'] = w1
chosen_values['h0'] = h0
chosen_values['h1'] = h1
im_new = im_new.crop((0 + w0, 0 + h0,
out_w - w1, out_h - h1))
if transfo_params.get('rotation_before_resize', False):
rotation_param = np.random.randint(
transfo_params['rotation_range'][0],
transfo_params['rotation_range'][1])
chosen_values['rotation_param'] = rotation_param
im_new = im_new.rotate(rotation_param, resample=Image.BILINEAR,
expand=transfo_params.get('rotation_expand',
False))
if transfo_params.get('rotation_expand',
False):
im_new = im_new.crop(im_new.getbbox())
if transfo_params.get('crop_after_rotation', False):
do_crop = transfo_params['crop_prob'] > np.random.rand()
chosen_values['do_crop'] = do_crop
if do_crop:
out_w, out_h = im_new.size
w_dev = int(transfo_params['crop_w'] * out_w)
h_dev = int(transfo_params['crop_h'] * out_h)
w0 = np.random.randint(0, w_dev + 1)
w1 = np.random.randint(0, w_dev + 1)
h0 = np.random.randint(0, h_dev + 1)
h1 = np.random.randint(0, h_dev + 1)
# Add params to dict.
chosen_values['w0'] = w0
chosen_values['w1'] = w1
chosen_values['h0'] = h0
chosen_values['h1'] = h1
im_new = im_new.crop((0 + w0, 0 + h0,
out_w - w1, out_h - h1))
# im_new = im_new.thumbnail(output_shape, resample=Image.BILINEAR)
if transfo_params.get('keep_aspect_ratio', False):
im_new = make_thumb(im_new, size=output_shape,
pad=transfo_params['resize_pad'])
else:
im_new = im_new.resize(output_shape, resample=Image.BILINEAR)
# im_new = im_new.resize(output_shape, resample=Image.BICUBIC)
# im_new = im_new.resize(map(lambda x: int(x * 1.2), output_shape),
# resample=Image.BICUBIC)
# im_new = im_new.crop(im_new.getbbox())
if transfo_params.get('rotation', False) \
and not transfo_params.get('rotation_before_resize', False):
rotation_param = np.random.randint(
transfo_params['rotation_range'][0],
transfo_params['rotation_range'][1])
chosen_values['rotation_param'] = rotation_param
im_new = im_new.rotate(rotation_param, resample=Image.BILINEAR,
expand=transfo_params.get('rotation_expand',
False))
if transfo_params.get('rotation_expand',
False):
im_new = im_new.crop(im_new.getbbox())
# im_new = im_new.resize(output_shape, resample=Image.BICUBIC)
if transfo_params.get('contrast', False):
contrast_min, contrast_max = transfo_params['contrast_range']
contrast_param = np.random.uniform(contrast_min, contrast_max)
chosen_values['contrast_param'] = contrast_param
im_new = ImageEnhance.Contrast(im_new).enhance(contrast_param)
if transfo_params.get('brightness', False):
brightness_min, brightness_max = transfo_params['brightness_range']
brightness_param = np.random.uniform(brightness_min,
brightness_max)
chosen_values['brightness_param'] = brightness_param
im_new = ImageEnhance.Brightness(im_new).enhance(brightness_param)
if transfo_params.get('color', False):
color_min, color_max = transfo_params['color_range']
color_param = np.random.uniform(color_min, color_max)
chosen_values['color_param'] = color_param
im_new = ImageEnhance.Color(im_new).enhance(color_param)
if transfo_params.get('flip', False):
do_flip = transfo_params['flip_prob'] > np.random.rand()
chosen_values['do_flip'] = do_flip
if do_flip:
im_new = im_new.transpose(Image.FLIP_LEFT_RIGHT)
if output_shape[0] < 200 and False:
# Otherwise too slow.
# TODO: Disabled for now
if 'rotation' in transfo_params and transfo_params['rotation']:
rotation_param = np.random.randint(
transfo_params['rotation_range'][0],
transfo_params['rotation_range'][1])
im_new = im_new.rotate(rotation_param, resample=Image.BILINEAR,
expand=False)
# im_new = im_new.crop(im_new.getbbox())
chosen_values['rotation_param2'] = rotation_param
if transfo_params.get('zoom', False):
do_zoom = transfo_params['zoom_prob'] > np.random.rand()
chosen_values['do_zoom'] = do_zoom
if do_zoom:
zoom_min, zoom_max = transfo_params['zoom_range']
out_w, out_h = im_new.size
w_dev = int(np.random.uniform(zoom_min, zoom_max) / 2 * out_w)
chosen_values['w_dev'] = w_dev
im_new = im_new.crop((0 + w_dev,
0 + w_dev,
out_w - w_dev,
out_h - w_dev))
if im_new.size != output_shape:
im_new = im_new.resize(output_shape, resample=Image.BILINEAR)
im_new = np.asarray(im_new).astype('float32')/255
im.close()
im_new = (im_new - zmuv_mean)/(0.05 + zmuv_std)
return im_new, dim_dst, chosen_values
def get_image_batch_generator(image_paths, labels, batch_size, output_shape, zmuv_mean, zmuv_std):
transfo_params = {'rotation': True, 'rotation_range': (0, 360),
'contrast': True, 'contrast_range': (0.7, 1.3),
'brightness': True, 'brightness_range': (0.7, 1.3),
'color': True, 'color_range': (0.7, 1.3),
'flip': True, 'flip_prob': 0.5,
'crop': True, 'crop_prob': 0.4,
'crop_w': 0.03, 'crop_h': 0.04,
'keep_aspect_ratio': False,
'resize_pad': False,
'zoom': True, 'zoom_prob': 0.5,
'zoom_range': (0.00, 0.05),
'paired_transfos': False,
'rotation_expand': False,
'crop_height': False,
'extra_width_crop': True,
'rotation_before_resize': False,
'crop_after_rotation': True}
batch_idx = 0
while (True):
if (batch_idx*batch_size >= len(image_paths)):
batch_idx = 0
image_batch = []
label_batch = []
for i in range(batch_size):
if ((batch_size*batch_idx + i) < len(image_paths)):
image_path = image_paths[batch_size*batch_idx + i]
im, dim, chosen = load_and_resize_image(image_path=image_path,
output_shape=output_shape,
zmuv_mean=zmuv_mean,
zmuv_std=zmuv_std,
transfo_params=transfo_params)
image_batch.append(im)
lbl = np.zeros((4))
for idx in range(labels[batch_size*batch_idx + i]):
lbl[idx] = 1
label_batch.append(lbl)
yield [np.array(image_batch), np.array(label_batch)]
batch_idx += 1
image_dir = "/mnt/lab_data2/amr1/diabetic_retinopathy/unzipped_train_ds2_crop/"
valid_image_paths = []
valid_image_labels = []
for patient_id in valid_ids:
valid_image_paths.append(image_dir+str(patient_id)+".jpeg")
valid_image_labels.append(image_to_label[str(patient_id)])
train_image_paths = []
train_image_labels = []
for patient_id in train_ids:
train_image_paths.append(image_dir+str(patient_id)+".jpeg")
train_image_labels.append(image_to_label[str(patient_id)])
batch_size = 64
maxepoches = 250
learning_rate = 3e-4
batch_generator = get_image_batch_generator(image_paths=train_image_paths,
labels=train_image_labels,
batch_size=batch_size,
output_shape=(512,512),
zmuv_mean=0.042,
zmuv_std=0.204)
validation_generator = get_image_batch_generator(image_paths=valid_image_paths,
labels=valid_image_labels,
batch_size=batch_size,
output_shape=(512,512),
zmuv_mean=0.042,
zmuv_std=0.204)
print("building model...")
model = Sequential([
Conv2D(32, (7, 7), strides=(2, 2), input_shape=(512,512,3)),
LeakyReLU(alpha=0.5),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
MaxPooling2D((2, 2)),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
MaxPooling2D((2, 2)),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
MaxPooling2D((2, 2)),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
Conv2D(32, (3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.5),
MaxPooling2D((2, 2)),
Dropout(0.5),
Flatten(),
Dense(512),#Lambda(Maxout, name='maxout1'),# MaxoutDense(512),
Activation('relu'),
Dropout(0.5),
Dense(512),#Lambda(Maxout, name='maxout2'),#MaxoutDense(512),
Activation('relu'),
Dropout(0.5),
Dense(10),
Dense(4),
Activation('sigmoid'),
])
print("compiling model...")
sgd = optimizers.Adam(lr=learning_rate)
model.compile(loss='binary_crossentropy', optimizer=sgd,metrics=['accuracy'])
print("training model...")
early_stopping_callback = keras.callbacks.EarlyStopping(
patience=25, restore_best_weights=True)
history = model.fit_generator(generator=batch_generator,
validation_data=validation_generator,
steps_per_epoch=len(train_image_labels) // batch_size,
validation_steps=len(valid_image_labels) // batch_size,
epochs=maxepoches,
callbacks=[early_stopping_callback])
print("saving model...")
model.set_weights(early_stopping_callback.best_weights)
model.save('model_1.h5')
print("done.")
| [
"keras.layers.Conv2D",
"numpy.random.rand",
"PIL.ImageEnhance.Contrast",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.sort",
"numpy.asarray",
"PIL.ImageEnhance.Color",
"keras.callbacks.EarlyStopping",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.layers.Ma... | [((13293, 13326), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (13308, 13326), False, 'from keras import optimizers\n'), ((13459, 13528), 'keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'patience': '(25)', 'restore_best_weights': '(True)'}), '(patience=25, restore_best_weights=True)\n', (13488, 13528), False, 'import keras\n'), ((434, 506), 'os.path.join', 'os.path.join', (['"""/mnt/lab_data2/amr1/diabetic_retinopathy/trainLabels.csv"""'], {}), "('/mnt/lab_data2/amr1/diabetic_retinopathy/trainLabels.csv')\n", (446, 506), False, 'import os\n'), ((964, 996), 'PIL.Image.open', 'Image.open', (['image_path'], {'mode': '"""r"""'}), "(image_path, mode='r')\n", (974, 996), False, 'from PIL import Image, ImageChops, ImageOps\n'), ((1017, 1033), 'numpy.sort', 'np.sort', (['im.size'], {}), '(im.size)\n', (1024, 1033), True, 'import numpy as np\n'), ((3110, 3206), 'numpy.random.randint', 'np.random.randint', (["transfo_params['rotation_range'][0]", "transfo_params['rotation_range'][1]"], {}), "(transfo_params['rotation_range'][0], transfo_params[\n 'rotation_range'][1])\n", (3127, 3206), True, 'import numpy as np\n'), ((5159, 5255), 'numpy.random.randint', 'np.random.randint', (["transfo_params['rotation_range'][0]", "transfo_params['rotation_range'][1]"], {}), "(transfo_params['rotation_range'][0], transfo_params[\n 'rotation_range'][1])\n", (5176, 5255), True, 'import numpy as np\n'), ((5902, 5947), 'numpy.random.uniform', 'np.random.uniform', (['contrast_min', 'contrast_max'], {}), '(contrast_min, contrast_max)\n', (5919, 5947), True, 'import numpy as np\n'), ((6229, 6278), 'numpy.random.uniform', 'np.random.uniform', (['brightness_min', 'brightness_max'], {}), '(brightness_min, brightness_max)\n', (6246, 6278), True, 'import numpy as np\n'), ((6592, 6631), 'numpy.random.uniform', 'np.random.uniform', (['color_min', 'color_max'], {}), '(color_min, color_max)\n', (6609, 6631), True, 'import numpy as np\n'), ((11914, 11975), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(7, 7)'], {'strides': '(2, 2)', 'input_shape': '(512, 512, 3)'}), '(32, (7, 7), strides=(2, 2), input_shape=(512, 512, 3))\n', (11920, 11975), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((11979, 11999), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (11988, 11999), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12005, 12051), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)'}), '(pool_size=(3, 3), strides=(2, 2))\n', (12017, 12051), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12057, 12091), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12063, 12091), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12097, 12117), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12106, 12117), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12123, 12157), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12129, 12157), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12163, 12183), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12172, 12183), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12189, 12209), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (12201, 12209), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12215, 12249), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12221, 12249), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12255, 12275), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12264, 12275), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12281, 12315), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12287, 12315), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12321, 12341), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12330, 12341), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12347, 12367), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (12359, 12367), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12373, 12407), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12379, 12407), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12413, 12433), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12422, 12433), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12439, 12473), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12445, 12473), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12479, 12499), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12488, 12499), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12505, 12539), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12511, 12539), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12545, 12565), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12554, 12565), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12571, 12605), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12577, 12605), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12611, 12631), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12620, 12631), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12637, 12657), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (12649, 12657), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12663, 12697), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12669, 12697), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12703, 12723), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12712, 12723), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12729, 12763), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12735, 12763), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12769, 12789), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12778, 12789), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12795, 12829), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12801, 12829), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12835, 12855), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12844, 12855), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12861, 12895), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1, 1)'}), '(32, (3, 3), strides=(1, 1))\n', (12867, 12895), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12901, 12921), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (12910, 12921), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12927, 12947), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (12939, 12947), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12953, 12965), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (12960, 12965), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12971, 12980), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (12978, 12980), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((12986, 12996), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (12991, 12996), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13053, 13071), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13063, 13071), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13077, 13089), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (13084, 13089), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13095, 13105), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (13100, 13105), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13161, 13179), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13171, 13179), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13185, 13197), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (13192, 13197), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13203, 13212), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (13208, 13212), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13218, 13226), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (13223, 13226), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((13232, 13253), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (13242, 13253), False, 'from keras.layers import Dense, Activation, Flatten, LeakyReLU, Conv2D, MaxPooling2D, Dropout, Lambda\n'), ((2299, 2315), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2313, 2315), True, 'import numpy as np\n'), ((2552, 2583), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_dev + 1)'], {}), '(0, w_dev + 1)\n', (2569, 2583), True, 'import numpy as np\n'), ((2601, 2632), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_dev + 1)'], {}), '(0, w_dev + 1)\n', (2618, 2632), True, 'import numpy as np\n'), ((2650, 2681), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_dev + 1)'], {}), '(0, h_dev + 1)\n', (2667, 2681), True, 'import numpy as np\n'), ((2699, 2730), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_dev + 1)'], {}), '(0, h_dev + 1)\n', (2716, 2730), True, 'import numpy as np\n'), ((3750, 3766), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3764, 3766), True, 'import numpy as np\n'), ((4004, 4035), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_dev + 1)'], {}), '(0, w_dev + 1)\n', (4021, 4035), True, 'import numpy as np\n'), ((4053, 4084), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_dev + 1)'], {}), '(0, w_dev + 1)\n', (4070, 4084), True, 'import numpy as np\n'), ((4102, 4133), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_dev + 1)'], {}), '(0, h_dev + 1)\n', (4119, 4133), True, 'import numpy as np\n'), ((4151, 4182), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_dev + 1)'], {}), '(0, h_dev + 1)\n', (4168, 4182), True, 'import numpy as np\n'), ((6840, 6856), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6854, 6856), True, 'import numpy as np\n'), ((7187, 7283), 'numpy.random.randint', 'np.random.randint', (["transfo_params['rotation_range'][0]", "transfo_params['rotation_range'][1]"], {}), "(transfo_params['rotation_range'][0], transfo_params[\n 'rotation_range'][1])\n", (7204, 7283), True, 'import numpy as np\n'), ((7652, 7668), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7666, 7668), True, 'import numpy as np\n'), ((6023, 6052), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['im_new'], {}), '(im_new)\n', (6044, 6052), False, 'from PIL import ImageEnhance\n'), ((6407, 6438), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['im_new'], {}), '(im_new)\n', (6430, 6438), False, 'from PIL import ImageEnhance\n'), ((6701, 6727), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['im_new'], {}), '(im_new)\n', (6719, 6727), False, 'from PIL import ImageEnhance\n'), ((8271, 8289), 'numpy.asarray', 'np.asarray', (['im_new'], {}), '(im_new)\n', (8281, 8289), True, 'import numpy as np\n'), ((10257, 10268), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (10265, 10268), True, 'import numpy as np\n'), ((10427, 10448), 'numpy.array', 'np.array', (['image_batch'], {}), '(image_batch)\n', (10435, 10448), True, 'import numpy as np\n'), ((10450, 10471), 'numpy.array', 'np.array', (['label_batch'], {}), '(label_batch)\n', (10458, 10471), True, 'import numpy as np\n'), ((7858, 7895), 'numpy.random.uniform', 'np.random.uniform', (['zoom_min', 'zoom_max'], {}), '(zoom_min, zoom_max)\n', (7875, 7895), True, 'import numpy as np\n'), ((1443, 1461), 'numpy.asarray', 'np.asarray', (['im_new'], {}), '(im_new)\n', (1453, 1461), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import os
import numpy as np
import sqlite3
import pandas as pd
import astropy.table as at
from astroquery.irsa_dust import IrsaDust
import astropy.coordinates as coord
import astropy.units as u
def main():
db = sqlite3.connect('test_schedule_v8_msip.db')
table = pd.read_sql_query("SELECT * from SUMMARY", db)
ind = table['subprogram'] == 'all_sky'
msip = table[ind]
release_date = '20180622'
survey = 'ZTF_MSIP'
filters = ''.join(np.unique(msip['filter']))
user = 'gnarayan'
host = 'grimnir.stsci.edu'
comment = 'Based on ZTF observing log DB from <NAME>, <NAME> on {}'.format(release_date)
pixsize = 1.
fields = np.unique(msip['fieldID'])
nlibid = len(fields)
outlines = []
outlines.append('SURVEY: {}'.format(survey))
outlines.append('FILTERS: {}'.format(filters))
outlines.append('TELESCOPE: ZTF')
outlines.append('USER: {}'.format(user))
outlines.append('HOST: {}'.format(host))
outlines.append('SKYSIG_UNIT: ADU_PER_SQARCSEC')
outlines.append('PIXSIZE: {:0.1f}'.format(pixsize))
outlines.append('NLIBID: {}'.format(nlibid))
outlines.append('COMMENT: {}'.format(comment))
outlines.append('BEGIN LIBGEN')
for field in fields:
outlines.append('# --------------------------------------------')
# select from table, not MSIP in case some of the other programs
# observe the same field this may not be useful since we don't have
# access to non-MSIP data but in principle these observations have been
# taken and could be used to classify the data
outlines.append('LIBID: {}'.format(field))
indf = (table['fieldID'] == field)
# all the positions appear to be identical, so there's no way to
# account for dithers or overlaps
ra = np.unique(table[indf]['fieldRA'])[0]
dec = np.unique(table[indf]['fieldDec'])[0]
coo = coord.SkyCoord(ra*u.deg, dec*u.deg, frame='icrs')
dust = IrsaDust.get_query_table(coo, section='ebv')
mwebv = dust['ext SandF mean'][0]
nobs = len(table[indf])
outlines.append('RA: {} DEC: {} NOBS: {} PIXSIZE: {} MWEBV: {} FIELD: {}'.format(ra, dec, nobs, pixsize, mwebv, field))
outlines.append('# CCD CCD PSF1 PSF2 PSF2/1')
outlines.append('# MJD ID*NEXPOSE FLT GAIN NOISE SKYSIG (pixels) RATIO ZPTAVG ZPTERR MAG')
entries = at.Table.from_pandas(table[indf])
for entry in entries:
# get some quantities
flt = entry['filter']
skymag = entry['filtSkyBright']
depth = entry['fiveSigmaDepth']
snr = 5.
fwhm = entry['FWHMeff']
term1 = 2.0 * depth - skymag
term2 = - (depth - skymag)
# convert FWHM from arcsec to sigma_gaussian in pixels
sigma_pixel = fwhm /2.35 /pixsize
pixel_area = area = (1.51 * fwhm)**2
arg = pixel_area * snr * snr
# Background dominated limit assuming counts with system transmission only
# is approximately equal to counts with total transmission
zpt_approx = term1 + 2.5 * np.log10(arg)
tmp = 10. **(-0.4 * term2)
zpt_cor = 2.5 * np.log10(1. + 1. / (pixel_area * tmp))
simlib_zptavg = zpt_approx + zpt_cor
npix_asec = 1. / pixsize**2.
skysig = np.sqrt((1.0 / npix_asec) * 10.**(-0.4 * (skymag - simlib_zptavg)))
lst = ['S:',
"{0:5.4f}".format(entry['expMJD']),
"{0:10d}*2".format(entry['obsHistID']),
entry['filter'],
"{0:5.2f}".format(1.), # CCD Gain
"{0:5.2f}".format(0.25), # CCD Noise
"{0:6.2f}".format(skysig), # SKYSIG
"{0:4.2f}".format(sigma_pixel), # PSF1
"{0:4.2f}".format(0.), # PSF2
"{0:4.3f}".format(0.), # PSFRatio
"{0:6.2f}".format(simlib_zptavg), # ZPTAVG
"{0:6.3f}".format(0.005), # ZPTNoise
"{0:+7.3f}".format(-99.)] # MAG
out = ' '.join(lst)
outlines.append(out)
outlines.append('END_LIBID: {}'.format(field))
outlines = '\n'.join(outlines)
with open('ztf_msip_simlib_{}.dat'.format(release_date), 'w') as f:
f.write(outlines)
if __name__=='__main__':
sys.exit(main())
| [
"pandas.read_sql_query",
"numpy.log10",
"numpy.sqrt",
"numpy.unique",
"sqlite3.connect",
"astropy.table.Table.from_pandas",
"astropy.coordinates.SkyCoord",
"astroquery.irsa_dust.IrsaDust.get_query_table"
] | [((250, 293), 'sqlite3.connect', 'sqlite3.connect', (['"""test_schedule_v8_msip.db"""'], {}), "('test_schedule_v8_msip.db')\n", (265, 293), False, 'import sqlite3\n'), ((306, 352), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT * from SUMMARY"""', 'db'], {}), "('SELECT * from SUMMARY', db)\n", (323, 352), True, 'import pandas as pd\n'), ((698, 724), 'numpy.unique', 'np.unique', (["msip['fieldID']"], {}), "(msip['fieldID'])\n", (707, 724), True, 'import numpy as np\n'), ((495, 520), 'numpy.unique', 'np.unique', (["msip['filter']"], {}), "(msip['filter'])\n", (504, 520), True, 'import numpy as np\n'), ((1955, 2008), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['(ra * u.deg)', '(dec * u.deg)'], {'frame': '"""icrs"""'}), "(ra * u.deg, dec * u.deg, frame='icrs')\n", (1969, 2008), True, 'import astropy.coordinates as coord\n'), ((2020, 2064), 'astroquery.irsa_dust.IrsaDust.get_query_table', 'IrsaDust.get_query_table', (['coo'], {'section': '"""ebv"""'}), "(coo, section='ebv')\n", (2044, 2064), False, 'from astroquery.irsa_dust import IrsaDust\n'), ((2504, 2537), 'astropy.table.Table.from_pandas', 'at.Table.from_pandas', (['table[indf]'], {}), '(table[indf])\n', (2524, 2537), True, 'import astropy.table as at\n'), ((1851, 1884), 'numpy.unique', 'np.unique', (["table[indf]['fieldRA']"], {}), "(table[indf]['fieldRA'])\n", (1860, 1884), True, 'import numpy as np\n'), ((1902, 1936), 'numpy.unique', 'np.unique', (["table[indf]['fieldDec']"], {}), "(table[indf]['fieldDec'])\n", (1911, 1936), True, 'import numpy as np\n'), ((3501, 3569), 'numpy.sqrt', 'np.sqrt', (['(1.0 / npix_asec * 10.0 ** (-0.4 * (skymag - simlib_zptavg)))'], {}), '(1.0 / npix_asec * 10.0 ** (-0.4 * (skymag - simlib_zptavg)))\n', (3508, 3569), True, 'import numpy as np\n'), ((3350, 3390), 'numpy.log10', 'np.log10', (['(1.0 + 1.0 / (pixel_area * tmp))'], {}), '(1.0 + 1.0 / (pixel_area * tmp))\n', (3358, 3390), True, 'import numpy as np\n'), ((3268, 3281), 'numpy.log10', 'np.log10', (['arg'], {}), '(arg)\n', (3276, 3281), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CIE xyY Colourspace
===================
Defines the *CIE xyY* colourspace transformations:
- :func:`XYZ_to_xyY`
- :func:`xyY_to_XYZ`
- :func:`xy_to_XYZ`
- :func:`XYZ_to_xy`
See Also
--------
`CIE xyY Colourspace IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/cie_xyy.ipynb>`_ # noqa
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
(Last accessed 24 February 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['XYZ_to_xyY',
'xyY_to_XYZ',
'xy_to_XYZ',
'XYZ_to_xy']
def XYZ_to_xyY(XYZ,
illuminant=ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D50')):
"""
Converts from *CIE XYZ* colourspace to *CIE xyY* colourspace and reference
*illuminant*.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant : array_like, optional
Reference *illuminant* chromaticity coordinates.
Returns
-------
ndarray, (3,)
*CIE xyY* colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 1].
- Output *CIE xyY* colourspace matrix is in domain [0, 1].
References
----------
.. [2] http://www.brucelindbloom.com/Eqn_XYZ_to_xyY.html
(Last accessed 24 February 2014)
Examples
--------
>>> XYZ_to_xyY(np.array([0.1180583421, 0.1034, 0.0515089229]))
array([ 0.4325, 0.3788, 0.1034])
"""
X, Y, Z = np.ravel(XYZ)
if X == 0 and Y == 0 and Z == 0:
return np.array([illuminant[0], illuminant[1], Y])
else:
return np.array([X / (X + Y + Z), Y / (X + Y + Z), Y])
def xyY_to_XYZ(xyY):
"""
Converts from *CIE xyY* colourspace to *CIE XYZ* colourspace.
Parameters
----------
xyY : array_like, (3,)
*CIE xyY* colourspace matrix.
Returns
-------
ndarray, (3,)
*CIE XYZ* colourspace matrix.
Notes
-----
- Input *CIE xyY* colourspace matrix is in domain [0, 1].
- Output *CIE XYZ* colourspace matrix is in domain [0, 1].
References
----------
.. [3] http://www.brucelindbloom.com/Eqn_xyY_to_XYZ.html
(Last accessed 24 February 2014)
Examples
--------
>>> xyY_to_XYZ(np.array([0.4325, 0.3788, 0.1034])) # doctest: +ELLIPSIS
array([ 0.1180583..., 0.1034 , 0.0515089...])
"""
x, y, Y = np.ravel(xyY)
if y == 0:
return np.array([0, 0, 0])
else:
return np.array([x * Y / y, Y, (1 - x - y) * Y / y])
def xy_to_XYZ(xy):
"""
Returns the *CIE XYZ* colourspace matrix from given *xy* chromaticity
coordinates.
Parameters
----------
xy : array_like
*xy* chromaticity coordinates.
Returns
-------
ndarray, (3,)
*CIE XYZ* colourspace matrix.
Notes
-----
- Input *xy* chromaticity coordinates are in domain [0, 1].
- Output *CIE XYZ* colourspace matrix is in domain [0, 1].
Examples
--------
>>> xy_to_XYZ((0.25, 0.25))
array([ 1., 1., 2.])
"""
return xyY_to_XYZ(np.array([xy[0], xy[1], 1]))
def XYZ_to_xy(XYZ,
illuminant=ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D50')):
"""
Returns the *xy* chromaticity coordinates from given *CIE XYZ* colourspace
matrix.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant : array_like, optional
Reference *illuminant* chromaticity coordinates.
Returns
-------
tuple
*xy* chromaticity coordinates.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 1].
- Output *xy* chromaticity coordinates are in domain [0, 1].
Examples
--------
>>> XYZ_to_xy(np.array([0.97137399, 1, 1.04462134])) # doctest: +ELLIPSIS
(0.3220741..., 0.3315655...)
>>> XYZ_to_xy((0.97137399, 1, 1.04462134)) # doctest: +ELLIPSIS
(0.3220741..., 0.3315655...)
"""
xyY = np.ravel(XYZ_to_xyY(XYZ, illuminant))
return xyY[0], xyY[1]
| [
"numpy.ravel",
"numpy.array",
"colour.colorimetry.ILLUMINANTS.get"
] | [((1969, 1982), 'numpy.ravel', 'np.ravel', (['XYZ'], {}), '(XYZ)\n', (1977, 1982), True, 'import numpy as np\n'), ((2897, 2910), 'numpy.ravel', 'np.ravel', (['xyY'], {}), '(xyY)\n', (2905, 2910), True, 'import numpy as np\n'), ((2036, 2079), 'numpy.array', 'np.array', (['[illuminant[0], illuminant[1], Y]'], {}), '([illuminant[0], illuminant[1], Y])\n', (2044, 2079), True, 'import numpy as np\n'), ((2105, 2152), 'numpy.array', 'np.array', (['[X / (X + Y + Z), Y / (X + Y + Z), Y]'], {}), '([X / (X + Y + Z), Y / (X + Y + Z), Y])\n', (2113, 2152), True, 'import numpy as np\n'), ((2942, 2961), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2950, 2961), True, 'import numpy as np\n'), ((2987, 3032), 'numpy.array', 'np.array', (['[x * Y / y, Y, (1 - x - y) * Y / y]'], {}), '([x * Y / y, Y, (1 - x - y) * Y / y])\n', (2995, 3032), True, 'import numpy as np\n'), ((3593, 3620), 'numpy.array', 'np.array', (['[xy[0], xy[1], 1]'], {}), '([xy[0], xy[1], 1])\n', (3601, 3620), True, 'import numpy as np\n'), ((1060, 1114), 'colour.colorimetry.ILLUMINANTS.get', 'ILLUMINANTS.get', (['"""CIE 1931 2 Degree Standard Observer"""'], {}), "('CIE 1931 2 Degree Standard Observer')\n", (1075, 1114), False, 'from colour.colorimetry import ILLUMINANTS\n'), ((3668, 3722), 'colour.colorimetry.ILLUMINANTS.get', 'ILLUMINANTS.get', (['"""CIE 1931 2 Degree Standard Observer"""'], {}), "('CIE 1931 2 Degree Standard Observer')\n", (3683, 3722), False, 'from colour.colorimetry import ILLUMINANTS\n')] |
import argparse
import os
import numpy as np
from tqdm import tqdm
import torch.backends.cudnn as cudnn
# from mypath import Path
from common import config
from data import make_data_loader
from model.sync_batchnorm.replicate import patch_replication_callback
from model.deeplab import *
from utils.loss import SegmentationLosses, SimilarityLosses
from utils.lr_scheduler import LR_Scheduler
from utils.metrics import Evaluator
import json
import visdom
import torch
class Trainer(object):
def __init__(self, config, args):
self.args = args
self.config = config
self.vis = visdom.Visdom(env=os.getcwd().split('/')[-1])
# Define Dataloader
self.train_loader, self.finetune_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
config)
# Define network
model = DeepLab(num_classes=self.nclass,
backbone=config.backbone,
output_stride=config.out_stride,
sync_bn=config.sync_bn,
freeze_bn=config.freeze_bn)
train_params = [{'params': model.get_1x_lr_params(), 'lr': config.lr},
{'params': model.get_10x_lr_params(), 'lr': config.lr * 10}]
# Define Optimizer
optimizer = torch.optim.SGD(train_params, momentum=config.momentum,
weight_decay=config.weight_decay)
# Define Criterion
# whether to use class balanced weights
self.criterion_seg = SegmentationLosses(weight=None, cuda=args.cuda).build_loss(mode=config.loss)
self.criterion_sim = SimilarityLosses(cuda=args.cuda)
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(self.nclass)
# Define lr scheduler
self.scheduler = LR_Scheduler(config.lr_scheduler, config.lr,
config.epochs, len(self.finetune_loader),
config.lr_step, config.warmup_epochs)
# Using cuda
if args.cuda:
# self.model = torch.nn.DataParallel(self.model)
# patch_replication_callback(self.model)
# cudnn.benchmark = True
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
if args.cuda:
# self.model.module.load_state_dict(checkpoint)
self.model.load_state_dict(checkpoint)
else:
self.model.load_state_dict(checkpoint, map_location=torch.device('cpu'))
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, args.start_epoch))
def training(self, epoch):
train_loss = 0.0
self.model.train()
tbar = tqdm(self.finetune_loader)
num_img_tr = len(self.finetune_loader)
for i, sample in enumerate(tbar):
iter = epoch * len(self.finetune_loader) + i
self.vis.line(X=torch.tensor([iter]), Y=torch.tensor([self.optimizer.param_groups[0]['lr']]),
win='lr', opts=dict(title='lr', xlabel='iter', ylabel='lr'),
update='append' if iter > 0 else None)
image, target = sample['image'], sample['label']
if self.args.cuda:
image, target = image.cuda(), target.cuda()
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
gt1, gt2 = target[0, :, :], target[1, :, :]
gt_similarity = self.evaluator.Building_GT_IoU(gt1, gt2)
neck, low_level_feat, size = self.model.get_neck(image)
output = self.model.decode(neck, low_level_feat, size)
# output = self.model(image)
loss1 = self.criterion_sim.get_loss(neck[0, :, :, :], neck[1, :, :, :], gt_similarity)
loss2 = self.criterion_seg(output, target)
loss = config.beta * loss1 + loss2
loss.backward()
self.optimizer.step()
train_loss += loss2.item()
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.config.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
self.vis.line(X=torch.tensor([epoch]), Y=torch.tensor([train_loss]), win='loss', name='train',
opts=dict(title='loss', xlabel='epoch', ylabel='loss'),
update='append' if epoch > 0 else None)
def validation(self, epoch):
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc='\r')
test_loss = 0.0
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
if self.args.cuda:
image, target = image.cuda(), target.cuda()
with torch.no_grad():
output = self.model(image)
loss = self.criterion_seg(output, target)
test_loss += loss.item()
tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
pred = output.data.cpu().numpy()
target = target.cpu().numpy()
pred = np.argmax(pred, axis=1)
# Add batch sample into evaluator
self.evaluator.add_batch(target, pred)
# Fast test during the training
Acc = self.evaluator.Building_Acc()
# Acc_class = self.evaluator.Pixel_Accuracy_Class()
IoU = self.evaluator.Building_IoU()
mIoU = self.evaluator.Mean_Intersection_over_Union()
# FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
print('Validation:')
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.config.batch_size + image.data.shape[0]))
print("Acc:{}, IoU:{}, mIoU:{}".format(Acc, IoU, mIoU))
print('Loss: %.3f' % test_loss)
self.vis.line(X=torch.tensor([epoch]), Y=torch.tensor([test_loss]), win='loss', name='val',
update='append')
self.vis.line(X=torch.tensor([epoch]), Y=torch.tensor([Acc]), win='metrics', name='acc',
opts=dict(title='metrics', xlabel='epoch', ylabel='performance'),
update='append' if epoch > 0 else None)
self.vis.line(X=torch.tensor([epoch]), Y=torch.tensor([IoU]), win='metrics', name='IoU',
update='append')
self.vis.line(X=torch.tensor([epoch]), Y=torch.tensor([mIoU]), win='metrics', name='mIoU',
update='append')
new_pred = mIoU
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
print('Saving state, epoch:', epoch)
# torch.save(self.model.module.state_dict(), self.args.save_folder + 'models/'
# + 'epoch' + str(epoch) + '.pth')
torch.save(self.model.state_dict(), self.args.save_folder + 'models/'
+ 'epoch' + str(epoch) + '.pth')
loss_file = {'Acc': Acc, 'IoU': IoU, 'mIoU': mIoU}
with open(os.path.join(self.args.save_folder, 'eval', 'epoch' + str(epoch) + '.json'), 'w') as f:
json.dump(loss_file, f)
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
# training hyper params
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
# '/usr/xtmp/satellite/train_models/xh.deeplab.mobilenet.shanghai/epoch432.pth'
parser.add_argument('--resume', type=str,
default='epoch41.pth',
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None)
parser.add_argument('--save_folder', default='train_log/',
help='Directory for saving checkpoint models')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if not os.path.exists(args.save_folder + 'eval/'):
os.mkdir(args.save_folder + 'eval/')
# if not os.path.exists(args.save_folder + 'models/'):
# os.mkdir(args.save_folder + 'models/')
if not os.path.exists('/usr/xtmp/satellite/train_models/' + os.getcwd().split('/')[-1]):
os.mkdir('/usr/xtmp/satellite/train_models/' + os.getcwd().split('/')[-1])
os.symlink('/usr/xtmp/satellite/train_models/' + os.getcwd().split('/')[-1], args.save_folder + 'models')
print('Create soft link!')
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
print('Using cuda device:', args.gpu)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
print(args)
torch.manual_seed(args.seed)
trainer = Trainer(config, args)
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.config.epochs)
for epoch in range(trainer.args.start_epoch, trainer.config.epochs):
trainer.training(epoch)
# if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):
trainer.validation(epoch)
if __name__ == "__main__":
main()
| [
"torch.cuda.is_available",
"os.path.exists",
"argparse.ArgumentParser",
"os.mkdir",
"utils.metrics.Evaluator",
"torch.optim.SGD",
"data.make_data_loader",
"numpy.argmax",
"os.path.isfile",
"torch.device",
"torch.manual_seed",
"utils.loss.SimilarityLosses",
"torch.load",
"tqdm.tqdm",
"os.... | [((7575, 7644), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch DeeplabV3Plus Training"""'}), "(description='PyTorch DeeplabV3Plus Training')\n", (7598, 7644), False, 'import argparse\n'), ((9581, 9609), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9598, 9609), False, 'import torch\n'), ((775, 799), 'data.make_data_loader', 'make_data_loader', (['config'], {}), '(config)\n', (791, 799), False, 'from data import make_data_loader\n'), ((1308, 1402), 'torch.optim.SGD', 'torch.optim.SGD', (['train_params'], {'momentum': 'config.momentum', 'weight_decay': 'config.weight_decay'}), '(train_params, momentum=config.momentum, weight_decay=config\n .weight_decay)\n', (1323, 1402), False, 'import torch\n'), ((1645, 1677), 'utils.loss.SimilarityLosses', 'SimilarityLosses', ([], {'cuda': 'args.cuda'}), '(cuda=args.cuda)\n', (1661, 1677), False, 'from utils.loss import SegmentationLosses, SimilarityLosses\n'), ((1785, 1807), 'utils.metrics.Evaluator', 'Evaluator', (['self.nclass'], {}), '(self.nclass)\n', (1794, 1807), False, 'from utils.metrics import Evaluator\n'), ((3050, 3076), 'tqdm.tqdm', 'tqdm', (['self.finetune_loader'], {}), '(self.finetune_loader)\n', (3054, 3076), False, 'from tqdm import tqdm\n'), ((4922, 4954), 'tqdm.tqdm', 'tqdm', (['self.val_loader'], {'desc': "'\\r'"}), "(self.val_loader, desc='\\r')\n", (4926, 4954), False, 'from tqdm import tqdm\n'), ((8771, 8803), 'os.path.exists', 'os.path.exists', (['args.save_folder'], {}), '(args.save_folder)\n', (8785, 8803), False, 'import os\n'), ((8813, 8839), 'os.mkdir', 'os.mkdir', (['args.save_folder'], {}), '(args.save_folder)\n', (8821, 8839), False, 'import os\n'), ((8851, 8893), 'os.path.exists', 'os.path.exists', (["(args.save_folder + 'eval/')"], {}), "(args.save_folder + 'eval/')\n", (8865, 8893), False, 'import os\n'), ((8903, 8939), 'os.mkdir', 'os.mkdir', (["(args.save_folder + 'eval/')"], {}), "(args.save_folder + 'eval/')\n", (8911, 8939), False, 'import os\n'), ((9411, 9436), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9434, 9436), False, 'import torch\n'), ((2560, 2583), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (2570, 2583), False, 'import torch\n'), ((5523, 5546), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (5532, 5546), True, 'import numpy as np\n'), ((1539, 1586), 'utils.loss.SegmentationLosses', 'SegmentationLosses', ([], {'weight': 'None', 'cuda': 'args.cuda'}), '(weight=None, cuda=args.cuda)\n', (1557, 1586), False, 'from utils.loss import SegmentationLosses, SimilarityLosses\n'), ((2417, 2444), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (2431, 2444), False, 'import os\n'), ((4597, 4618), 'torch.tensor', 'torch.tensor', (['[epoch]'], {}), '([epoch])\n', (4609, 4618), False, 'import torch\n'), ((4622, 4648), 'torch.tensor', 'torch.tensor', (['[train_loss]'], {}), '([train_loss])\n', (4634, 4648), False, 'import torch\n'), ((5190, 5205), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5203, 5205), False, 'import torch\n'), ((6235, 6256), 'torch.tensor', 'torch.tensor', (['[epoch]'], {}), '([epoch])\n', (6247, 6256), False, 'import torch\n'), ((6260, 6285), 'torch.tensor', 'torch.tensor', (['[test_loss]'], {}), '([test_loss])\n', (6272, 6285), False, 'import torch\n'), ((6374, 6395), 'torch.tensor', 'torch.tensor', (['[epoch]'], {}), '([epoch])\n', (6386, 6395), False, 'import torch\n'), ((6399, 6418), 'torch.tensor', 'torch.tensor', (['[Acc]'], {}), '([Acc])\n', (6411, 6418), False, 'import torch\n'), ((6621, 6642), 'torch.tensor', 'torch.tensor', (['[epoch]'], {}), '([epoch])\n', (6633, 6642), False, 'import torch\n'), ((6646, 6665), 'torch.tensor', 'torch.tensor', (['[IoU]'], {}), '([IoU])\n', (6658, 6665), False, 'import torch\n'), ((6757, 6778), 'torch.tensor', 'torch.tensor', (['[epoch]'], {}), '([epoch])\n', (6769, 6778), False, 'import torch\n'), ((6782, 6802), 'torch.tensor', 'torch.tensor', (['[mIoU]'], {}), '([mIoU])\n', (6794, 6802), False, 'import torch\n'), ((7524, 7547), 'json.dump', 'json.dump', (['loss_file', 'f'], {}), '(loss_file, f)\n', (7533, 7547), False, 'import json\n'), ((3251, 3271), 'torch.tensor', 'torch.tensor', (['[iter]'], {}), '([iter])\n', (3263, 3271), False, 'import torch\n'), ((3275, 3327), 'torch.tensor', 'torch.tensor', (["[self.optimizer.param_groups[0]['lr']]"], {}), "([self.optimizer.param_groups[0]['lr']])\n", (3287, 3327), False, 'import torch\n'), ((2815, 2834), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2827, 2834), False, 'import torch\n'), ((621, 632), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (630, 632), False, 'import os\n'), ((9112, 9123), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9121, 9123), False, 'import os\n'), ((9196, 9207), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9205, 9207), False, 'import os\n'), ((9281, 9292), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9290, 9292), False, 'import os\n')] |
from __future__ import absolute_import, print_function
import glob
import json
import os
import pickle
from typing import Dict
import numpy as np
from loguru import logger
from tqdm import tqdm
_VALID_SUBSETS = ['train', 'test']
class LaSOT(object):
r"""`LaSOT <https://cis.temple.edu/lasot/>`_ Datasets.
Publication:
``LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking``,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, and <NAME>., CVPR 2019.
Args:
root_dir (string): Root directory of dataset where sequence
folders exist.
subset (string, optional): Specify ``train`` or ``test``
subset of LaSOT.
"""
data_dict = {subset: dict() for subset in _VALID_SUBSETS}
def __init__(self,
root_dir,
subset='test',
return_meta=False,
check_integrity=True,
cache_path=None,
ignore_cache=False):
super(LaSOT, self).__init__()
subset = subset.split('_')
assert set(subset).issubset({'train', 'test'}), 'Unknown subset.'
self.root_dir = root_dir
self.subset = subset
self.return_meta = return_meta
# check seems useless, disabled
# if check_integrity:
# self._check_integrity(root_dir)
self.cache_path = cache_path
self.ignore_cache = ignore_cache
self.anno_files = sorted(
glob.glob(os.path.join(root_dir, '*/*/groundtruth.txt')))
self.seq_dirs = [
os.path.join(os.path.dirname(f), 'img') for f in self.anno_files
]
self.seq_names = [
os.path.basename(os.path.dirname(f)) for f in self.anno_files
]
# load subset sequence names
split_file = os.path.join(os.path.dirname(__file__), 'lasot.json')
with open(split_file, 'r') as f:
splits = json.load(f)
self.splits = splits
self.seq_names = []
for s in subset:
self.seq_names.extend(splits[s])
# Former seq_dirs/anno_files have been replaced by caching mechanism.
# See _ensure_cache for detail.
# image and annotation paths
# self.seq_dirs = [os.path.join(
# root_dir, n[:n.rfind('-')], n, 'img')
# for n in self.seq_names]
# self.anno_files = [os.path.join(
# os.path.dirname(d), 'groundtruth.txt')
# for d in self.seq_dirs]
self._ensure_cache()
self.seq_names = [
k for subset in self.subset
for k, _ in LaSOT.data_dict[subset].items()
]
self.seq_names = sorted(self.seq_names)
self.seq_datas = {
k: v
for subset in self.subset
for k, v in LaSOT.data_dict[subset].items()
}
def __getitem__(self, index):
r"""
Args:
index (integer or string): Index or name of a sequence.
Returns:
tuple: (img_files, anno) if ``return_meta`` is False, otherwise
(img_files, anno, meta), where ``img_files`` is a list of
file names, ``anno`` is a N x 4 (rectangles) numpy array, while
``meta`` is a dict contains meta information about the sequence.
"""
# if isinstance(index, six.string_types):
# if not index in self.seq_names:
# raise Exception('Sequence {} not found.'.format(index))
# index = self.seq_names.index(index)
if isinstance(index, int):
index = self.seq_names[index]
seq_data = self.seq_datas[index]
img_files = seq_data["img_files"]
anno = seq_data["anno"]
meta = seq_data["meta"]
# img_files = sorted(glob.glob(os.path.join(
# self.seq_dirs[index], '*.jpg')))
# anno = np.loadtxt(self.anno_files[index], delimiter=',')
if self.return_meta:
meta = self._fetch_meta(self.seq_dirs[index])
return img_files, anno, meta
else:
return img_files, anno
def __len__(self):
return len(self.seq_names)
def _check_integrity(self, root_dir):
seq_names = os.listdir(root_dir)
seq_names = [n for n in seq_names if not n[0] == '.']
if os.path.isdir(root_dir) and len(seq_names) > 0:
# check each sequence folder
for seq_name in seq_names:
seq_dir = os.path.join(root_dir, seq_name)
if not os.path.isdir(seq_dir):
print('Warning: sequence %s not exists.' % seq_name)
else:
# dataset not exists
raise Exception('Dataset not found or corrupted.')
def _fetch_meta(self, seq_dir):
seq_dir = os.path.dirname(seq_dir)
meta = {}
# attributes
for att in ['full_occlusion', 'out_of_view']:
att_file = os.path.join(seq_dir, att + '.txt')
meta[att] = np.loadtxt(att_file, delimiter=',')
# nlp
nlp_file = os.path.join(seq_dir, 'nlp.txt')
with open(nlp_file, 'r') as f:
meta['nlp'] = f.read().strip()
return meta
def _ensure_cache(self):
"""Perform all overheads related to cache (building/loading/check)
"""
# check if subset cache already exists in LaSOT.data_dict and is valid w.r.t. list.txt
if self._check_cache_for_current_subset():
return
# load subset cache into LaSOT.data_dict
cache_path = self._get_cache_path(cache_path=self.cache_path)
self.cache_path = cache_path
if all([os.path.isfile(p)
for p in self.cache_path.values()]) and not self.ignore_cache:
logger.info("{}: cache file exists: {} ".format(
LaSOT.__name__, cache_path))
self._load_cache_for_current_subset(cache_path)
if self._check_cache_for_current_subset():
logger.info(
"{}: record check has been processed and validity is confirmed for cache file: {} "
.format(LaSOT.__name__, cache_path))
return
else:
logger.info(
"{}: cache file {} not valid, rebuilding cache...".format(
LaSOT.__name__, cache_path))
# build subset cache in LaSOT.data_dict and cache to storage
self._build_cache_for_current_subset()
logger.info("{}: current cache file: {} ".format(
LaSOT.__name__, self.cache_path))
logger.info(
"{}: need to clean this cache file if you move dataset directory".
format(LaSOT.__name__))
logger.info(
"{}: consider cleaning this cache file in case of erros such as FileNotFoundError or IOError"
.format(LaSOT.__name__))
def _get_cache_path(self, cache_path: Dict[str, str] = None):
r"""Ensure cache_path.
If cache_path does not exist, turn to default set: root_dir/subset.pkl.
"""
if (cache_path is None) or any(
[not os.path.isfile(cache_path) for p in cache_path.values()]):
logger.info(
"{}: passed cache file {} invalid, change to default cache path"
.format(LaSOT.__name__, cache_path))
cache_path = {
subset: os.path.join(self.root_dir, subset + ".pkl")
for subset in self.subset
}
return cache_path
def _check_cache_for_current_subset(self) -> bool:
r""" check if LaSOT.data_dict[subset] exists and contains all record in seq_names
"""
is_valid_data_dict = all([subset in LaSOT.data_dict for subset in self.subset]) and \
(set([seq_name for subset in self.subset for seq_name in LaSOT.data_dict[subset].keys()]) == set(self.seq_names))
return is_valid_data_dict
def _build_cache_for_current_subset(self):
r"""Build cache for current subset (self.subset)
"""
root_dir = self.root_dir
subset = self.subset
for s in subset:
logger.info("{}: start loading {}".format(LaSOT.__name__, s))
seq_names = self.splits[s]
for seq_name in tqdm(seq_names):
seq_dir = os.path.join(root_dir, seq_name[:seq_name.rfind('-')],
seq_name)
img_files, anno, meta = self.load_single_sequence(seq_dir)
LaSOT.data_dict[s][seq_name] = dict(img_files=img_files,
anno=anno,
meta=meta)
with open(self.cache_path[s], "wb") as f:
pickle.dump(LaSOT.data_dict[s], f)
logger.info("{}: dump cache file to {}".format(
LaSOT.__name__, self.cache_path[s]))
def _load_cache_for_current_subset(self, cache_path: Dict[str, str]):
for subset in self.subset:
assert os.path.exists(
cache_path[subset]
), "cache_path does not exist: %s " % cache_path[subset]
with open(cache_path[subset], "rb") as f:
LaSOT.data_dict[subset] = pickle.load(f)
logger.info("{}: loaded cache file {}".format(
LaSOT.__name__, cache_path[subset]))
def load_single_sequence(self, seq_dir):
img_files = sorted(glob.glob(os.path.join(seq_dir, 'img/*.jpg')))
anno = np.loadtxt(os.path.join(seq_dir, "groundtruth.txt"),
delimiter=',')
assert len(img_files) == len(anno)
if self.return_meta:
meta = self._fetch_meta(seq_dir)
return img_files, anno, meta
else:
return img_files, anno, None
| [
"os.path.exists",
"os.listdir",
"pickle.dump",
"tqdm.tqdm",
"os.path.join",
"pickle.load",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"json.load",
"numpy.loadtxt"
] | [((4299, 4319), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (4309, 4319), False, 'import os\n'), ((4866, 4890), 'os.path.dirname', 'os.path.dirname', (['seq_dir'], {}), '(seq_dir)\n', (4881, 4890), False, 'import os\n'), ((5138, 5170), 'os.path.join', 'os.path.join', (['seq_dir', '"""nlp.txt"""'], {}), "(seq_dir, 'nlp.txt')\n", (5150, 5170), False, 'import os\n'), ((1876, 1901), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1891, 1901), False, 'import os\n'), ((1979, 1991), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1988, 1991), False, 'import json\n'), ((4394, 4417), 'os.path.isdir', 'os.path.isdir', (['root_dir'], {}), '(root_dir)\n', (4407, 4417), False, 'import os\n'), ((5008, 5043), 'os.path.join', 'os.path.join', (['seq_dir', "(att + '.txt')"], {}), "(seq_dir, att + '.txt')\n", (5020, 5043), False, 'import os\n'), ((5068, 5103), 'numpy.loadtxt', 'np.loadtxt', (['att_file'], {'delimiter': '""","""'}), "(att_file, delimiter=',')\n", (5078, 5103), True, 'import numpy as np\n'), ((8380, 8395), 'tqdm.tqdm', 'tqdm', (['seq_names'], {}), '(seq_names)\n', (8384, 8395), False, 'from tqdm import tqdm\n'), ((9148, 9182), 'os.path.exists', 'os.path.exists', (['cache_path[subset]'], {}), '(cache_path[subset])\n', (9162, 9182), False, 'import os\n'), ((9637, 9677), 'os.path.join', 'os.path.join', (['seq_dir', '"""groundtruth.txt"""'], {}), "(seq_dir, 'groundtruth.txt')\n", (9649, 9677), False, 'import os\n'), ((1532, 1577), 'os.path.join', 'os.path.join', (['root_dir', '"""*/*/groundtruth.txt"""'], {}), "(root_dir, '*/*/groundtruth.txt')\n", (1544, 1577), False, 'import os\n'), ((1631, 1649), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (1646, 1649), False, 'import os\n'), ((1749, 1767), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (1764, 1767), False, 'import os\n'), ((4548, 4580), 'os.path.join', 'os.path.join', (['root_dir', 'seq_name'], {}), '(root_dir, seq_name)\n', (4560, 4580), False, 'import os\n'), ((7479, 7523), 'os.path.join', 'os.path.join', (['self.root_dir', "(subset + '.pkl')"], {}), "(self.root_dir, subset + '.pkl')\n", (7491, 7523), False, 'import os\n'), ((8871, 8905), 'pickle.dump', 'pickle.dump', (['LaSOT.data_dict[s]', 'f'], {}), '(LaSOT.data_dict[s], f)\n', (8882, 8905), False, 'import pickle\n'), ((9364, 9378), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9375, 9378), False, 'import pickle\n'), ((9574, 9608), 'os.path.join', 'os.path.join', (['seq_dir', '"""img/*.jpg"""'], {}), "(seq_dir, 'img/*.jpg')\n", (9586, 9608), False, 'import os\n'), ((4604, 4626), 'os.path.isdir', 'os.path.isdir', (['seq_dir'], {}), '(seq_dir)\n', (4617, 4626), False, 'import os\n'), ((5729, 5746), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (5743, 5746), False, 'import os\n'), ((7210, 7236), 'os.path.isfile', 'os.path.isfile', (['cache_path'], {}), '(cache_path)\n', (7224, 7236), False, 'import os\n')] |
""" Layer common utilities """
import pickle
import numpy as np
import tensorflow as tf
from absl import logging
def init_word_embedding(vocab_size, num_units, we_trainable, we_file=None, name_prefix="w"):
"""Initialize word embeddings from random initialization or pretrained word embedding """
if not we_file:
embedding_name = "{}_embedding".format(name_prefix)
# Random initialization
embedding = tf.compat.v1.get_variable(
embedding_name, [vocab_size, num_units], dtype=tf.float32, trainable=we_trainable)
logging.info(f'Initializing embedding {embedding_name}')
else:
# Initialize by pretrained word embedding
embedding_name = "{}_pretrained_embedding".format(name_prefix)
we = pickle.load(tf.io.gfile.GFile(we_file, 'rb'))
assert vocab_size == we.shape[0] and num_units == we.shape[1]
embedding = tf.compat.v1.get_variable(name=embedding_name,
shape=[vocab_size, num_units],
dtype=tf.float32,
initializer=tf.compat.v1.constant_initializer(we),
trainable=we_trainable)
logging.info(f'Loading pretrained embedding {embedding_name} from {we_file}')
return embedding
def get_sorted_dict(dct: dict):
"""Returns dictionary in sorted order"""
return dict(sorted(dct.items()))
def inf(dtype):
"""Returns a value close to infinity, but is still finite in `dtype`.
This is useful to get a very large value that is still zero when multiplied by zero. The floating-point "Inf" value is NaN when multiplied by zero.
:param dtype: A dtype. The returned value will be finite when casted to this dtype.
:return A very large value.
"""
if dtype == "float32" or dtype == "bfloat16":
return 1e7
elif dtype == "float16":
# Disable no-member lint error, as the linter thinks np.float16 does not
# exist for some reason.
return np.finfo(np.float16).max - 1 # pylint: disable=no-member
else:
raise AssertionError("Invalid dtype: %s" % dtype)
def expand_to_same_rank(tensor, target):
"""Expands a given tensor to target's rank to be broadcastable.
:param tensor: input tensor to tile. Shape: [b, d1, ..., da]
:param target: target tensor. Shape: [b, d1, ..., da, ..., dn]
:return Tiled tensor of shape [b, d1, ..., da, 1, ..., 1] with same rank of target.
:raise ValueError, if the shape rank of rank tensor/target is None.
"""
if tensor.shape.rank is None:
raise ValueError("Expect rank for tensor shape, but got None.")
if target.shape.rank is None:
raise ValueError("Expect rank for target shape, but got None.")
with tf.name_scope("expand_rank"):
diff_rank = target.shape.rank - tensor.shape.rank
for _ in range(diff_rank):
tensor = tf.expand_dims(tensor, -1)
return tensor
def get_last_valid_elements(x, batch_size, seq_len):
"""Returns the last valid element in x
:param x: input sequences. Shape=[batch_size, max_seq_len]
:param batch_size: batch size
:param seq_len: length of sequences for each sentence
"""
indices = tf.stack([tf.range(batch_size), seq_len - 1], axis=1)
last_elements = tf.gather_nd(x, indices) # [batch_size, num_units]
return last_elements
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = tf.convert_to_tensor(t, name="t") # shape=[d0, d1, ..., dn]
shape_t = tf.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1) # value=[1, 1, ..., 1] n+1 1s
tiling[1] = multiplier # value=[1, multiplier, 1, 1]
tiled_static_batch_size = (
t.shape[0] * multiplier if t.shape[0] is not None else None
)
tiled = tf.tile(tf.expand_dims(t, 1), tiling) # shape=[d0, multiplier, d1, ..., dn]
tiled = tf.reshape(tiled, tf.concat(([shape_t[0] * multiplier], shape_t[1:]), 0)) # shape=[d0*multiplier, d1, ..., dn]
tiled.set_shape(tf.TensorShape([tiled_static_batch_size]).concatenate(t.shape[1:]))
return tiled
def tile_batch(t, multiplier: int, name=None) -> tf.Tensor:
"""Tiles the batch dimension of a (possibly nested structure of) tensor(s).
For each tensor t in a (possibly nested structure) of tensors, this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed
of minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape `[batch_size * multiplier, s0, s1, ...]` composed of minibatch
entries `t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated `multiplier` times.
:param t: `Tensor` shaped `[batch_size, ...]`.
:param multiplier: Python int.
:param name: Name scope for any created operations.
:return A (possibly nested structure of) `Tensor` shaped `[batch_size * multiplier, ...]`.
:raise ValueError: if tensor(s) `t` do not have a statically known rank or the rank is < 1.
"""
with tf.name_scope(name or "tile_batch"):
return tf.nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def _shape_list(tensor):
"""Return a list of the tensor's shape, and ensure no None values in list."""
# Get statically known shape (may contain None's for unknown dimensions)
shape = tensor.get_shape().as_list()
# Ensure that the shape values are not None
dynamic_shape = tf.shape(tensor)
for i in range(len(shape)): # pylint: disable=consider-using-enumerate
if shape[i] is None:
shape[i] = dynamic_shape[i]
return shape
def _get_shape_keep_last_dim(tensor):
shape_list = _shape_list(tensor)
# Only the last
for i in range(len(shape_list) - 1):
shape_list[i] = None
if isinstance(shape_list[-1], tf.Tensor):
shape_list[-1] = None
return tf.TensorShape(shape_list)
def _log_prob_from_logits(logits):
""" Returns log prob from logits
:param logits: tf.Tensor(dtype=tf.float32) Shape=[batch_size, beam_width, vocab_size]
"""
return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True)
def is_tf_function(func):
"""Returns whether the function is a tf.function decorated method"""
return hasattr(func, 'get_concrete_function')
def get_tf_function_names(clz):
"""Returns the list of tf function names of the class"""
methods = [method_name for method_name in dir(clz) if callable(getattr(clz, method_name))]
return list(filter(
lambda method_name: is_tf_function(getattr(clz, method_name)),
methods))
| [
"tensorflow.shape",
"tensorflow.compat.v1.get_variable",
"tensorflow.io.gfile.GFile",
"tensorflow.reduce_logsumexp",
"absl.logging.info",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.name_scope",
"numpy.finfo",
"tensorflow.convert_to_tensor",
... | [((3384, 3408), 'tensorflow.gather_nd', 'tf.gather_nd', (['x', 'indices'], {}), '(x, indices)\n', (3396, 3408), True, 'import tensorflow as tf\n'), ((3562, 3595), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['t'], {'name': '"""t"""'}), "(t, name='t')\n", (3582, 3595), True, 'import tensorflow as tf\n'), ((3637, 3648), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (3645, 3648), True, 'import tensorflow as tf\n'), ((5617, 5633), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (5625, 5633), True, 'import tensorflow as tf\n'), ((6052, 6078), 'tensorflow.TensorShape', 'tf.TensorShape', (['shape_list'], {}), '(shape_list)\n', (6066, 6078), True, 'import tensorflow as tf\n'), ((436, 549), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', (['embedding_name', '[vocab_size, num_units]'], {'dtype': 'tf.float32', 'trainable': 'we_trainable'}), '(embedding_name, [vocab_size, num_units], dtype=tf\n .float32, trainable=we_trainable)\n', (461, 549), True, 'import tensorflow as tf\n'), ((566, 622), 'absl.logging.info', 'logging.info', (['f"""Initializing embedding {embedding_name}"""'], {}), "(f'Initializing embedding {embedding_name}')\n", (578, 622), False, 'from absl import logging\n'), ((1266, 1343), 'absl.logging.info', 'logging.info', (['f"""Loading pretrained embedding {embedding_name} from {we_file}"""'], {}), "(f'Loading pretrained embedding {embedding_name} from {we_file}')\n", (1278, 1343), False, 'from absl import logging\n'), ((2841, 2869), 'tensorflow.name_scope', 'tf.name_scope', (['"""expand_rank"""'], {}), "('expand_rank')\n", (2854, 2869), True, 'import tensorflow as tf\n'), ((4016, 4036), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(1)'], {}), '(t, 1)\n', (4030, 4036), True, 'import tensorflow as tf\n'), ((4115, 4169), 'tensorflow.concat', 'tf.concat', (['([shape_t[0] * multiplier], shape_t[1:])', '(0)'], {}), '(([shape_t[0] * multiplier], shape_t[1:]), 0)\n', (4124, 4169), True, 'import tensorflow as tf\n'), ((5204, 5239), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'tile_batch')"], {}), "(name or 'tile_batch')\n", (5217, 5239), True, 'import tensorflow as tf\n'), ((6271, 6321), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['logits'], {'axis': '(2)', 'keepdims': '(True)'}), '(logits, axis=2, keepdims=True)\n', (6290, 6321), True, 'import tensorflow as tf\n'), ((779, 811), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['we_file', '"""rb"""'], {}), "(we_file, 'rb')\n", (796, 811), True, 'import tensorflow as tf\n'), ((2985, 3011), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor', '(-1)'], {}), '(tensor, -1)\n', (2999, 3011), True, 'import tensorflow as tf\n'), ((3320, 3340), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (3328, 3340), True, 'import tensorflow as tf\n'), ((1149, 1186), 'tensorflow.compat.v1.constant_initializer', 'tf.compat.v1.constant_initializer', (['we'], {}), '(we)\n', (1182, 1186), True, 'import tensorflow as tf\n'), ((4229, 4270), 'tensorflow.TensorShape', 'tf.TensorShape', (['[tiled_static_batch_size]'], {}), '([tiled_static_batch_size])\n', (4243, 4270), True, 'import tensorflow as tf\n'), ((2081, 2101), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (2089, 2101), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import glob
import random
import os
import cv2
import csv
class DataGenerator(tf.keras.utils.Sequence):
# 'Generates data for tf.keras'
def __init__(self, args, shuffle=True,):
self.shuffle = shuffle
# self.input_dir = os.path.join(args.base_data_dir,args.input_data_dir + "*")
# self.output_dir = os.path.join(args.base_data_dir,args.output_data_dir + "*")
self.input_dir = "Normalized_Data/x_data/*"
self.output_dir = "Normalized_Data/y_data/*"
self.batch_size = args.batch_size
self.input_files = sorted(glob.glob(self.input_dir))
self.output_dir = sorted(glob.glob(self.output_dir))
self.all_files = list(zip(self.input_files,self.output_dir))
self.on_epoch_end()
self.count = self.__len__()
print("number of all samples = ", len(self.input_files))
def __len__(self):
'Denotes the number of batches per epoch'
self.num_batches = int(np.floor(len(self.all_files) / self.batch_size))
return self.num_batches
def __getitem__(self, index):
X,Y = self.__data_generation(index)
return X,Y
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.all_files)
def __data_generation(self, idx):
'Generates data containing batch_size samples'
batch_files = self.all_files[idx*self.batch_size:idx*self.batch_size+self.batch_size]
X = np.empty((self.batch_size,4,2))
Y = np.empty((self.batch_size,52,3))
# read image
for i, batch_file in enumerate(batch_files):
X[i] = np.load(batch_file[0])
Y[i] = np.load(batch_file[1])
# with open(batch_file[0]) as file:
# csv_reader = csv.reader(file, delimiter=',')
# for j,row in enumerate(csv_reader):
# for k, val in enumerate(row[1:3]):
# X[i,j,k] = float(val)/1024
# with open(batch_file[1]) as file:
# csv_reader = csv.reader(file, delimiter=',')
# for j,row in enumerate(csv_reader):
# for k, val in enumerate(row[3:6]):
# Y[i,j,k] = float(val)/1024
# # for k, val in enumerate(row[4:7]):
# # Y[i,j,k] = float(val)/360
return X,Y
class ImageDataGenerator(tf.keras.utils.Sequence):
# 'Generates data for tf.keras'
def __init__(self, args, shuffle=True,):
self.shuffle = shuffle
# self.input_dir = os.path.join(args.base_data_dir,args.input_data_dir + "*")
# self.output_dir = os.path.join(args.base_data_dir,args.output_data_dir + "*")
self.input_dir = "Normalized_Data/x_data/*"
self.output_dir = "Normalized_Data/y_data/*"
self.batch_size = args.batch_size
self.input_files = sorted(glob.glob(self.input_dir))
self.output_dir = sorted(glob.glob(self.output_dir))
self.all_files = list(zip(self.input_files,self.output_dir))
self.on_epoch_end()
self.count = self.__len__()
print("number of all samples = ", len(self.input_files))
def __len__(self):
'Denotes the number of batches per epoch'
self.num_batches = int(np.floor(len(self.all_files) / self.batch_size))
return self.num_batches
def __getitem__(self, index):
X,Y = self.__data_generation(index)
return X,Y
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.all_files)
def __data_generation(self, idx):
'Generates data containing batch_size samples'
batch_files = self.all_files[idx*self.batch_size:idx*self.batch_size+self.batch_size]
X = np.empty((self.batch_size,128,128,3))
Y = np.empty((self.batch_size,52,3))
# read image
for i, batch_file in enumerate(batch_files):
X[i] = np.load(batch_file[0])
Y[i] = np.load(batch_file[1])
return X,Y
| [
"numpy.load",
"numpy.empty",
"glob.glob",
"numpy.random.shuffle"
] | [((1517, 1550), 'numpy.empty', 'np.empty', (['(self.batch_size, 4, 2)'], {}), '((self.batch_size, 4, 2))\n', (1525, 1550), True, 'import numpy as np\n'), ((1561, 1595), 'numpy.empty', 'np.empty', (['(self.batch_size, 52, 3)'], {}), '((self.batch_size, 52, 3))\n', (1569, 1595), True, 'import numpy as np\n'), ((3864, 3904), 'numpy.empty', 'np.empty', (['(self.batch_size, 128, 128, 3)'], {}), '((self.batch_size, 128, 128, 3))\n', (3872, 3904), True, 'import numpy as np\n'), ((3914, 3948), 'numpy.empty', 'np.empty', (['(self.batch_size, 52, 3)'], {}), '((self.batch_size, 52, 3))\n', (3922, 3948), True, 'import numpy as np\n'), ((617, 642), 'glob.glob', 'glob.glob', (['self.input_dir'], {}), '(self.input_dir)\n', (626, 642), False, 'import glob\n'), ((678, 704), 'glob.glob', 'glob.glob', (['self.output_dir'], {}), '(self.output_dir)\n', (687, 704), False, 'import glob\n'), ((1271, 1304), 'numpy.random.shuffle', 'np.random.shuffle', (['self.all_files'], {}), '(self.all_files)\n', (1288, 1304), True, 'import numpy as np\n'), ((1689, 1711), 'numpy.load', 'np.load', (['batch_file[0]'], {}), '(batch_file[0])\n', (1696, 1711), True, 'import numpy as np\n'), ((1731, 1753), 'numpy.load', 'np.load', (['batch_file[1]'], {}), '(batch_file[1])\n', (1738, 1753), True, 'import numpy as np\n'), ((2964, 2989), 'glob.glob', 'glob.glob', (['self.input_dir'], {}), '(self.input_dir)\n', (2973, 2989), False, 'import glob\n'), ((3025, 3051), 'glob.glob', 'glob.glob', (['self.output_dir'], {}), '(self.output_dir)\n', (3034, 3051), False, 'import glob\n'), ((3618, 3651), 'numpy.random.shuffle', 'np.random.shuffle', (['self.all_files'], {}), '(self.all_files)\n', (3635, 3651), True, 'import numpy as np\n'), ((4041, 4063), 'numpy.load', 'np.load', (['batch_file[0]'], {}), '(batch_file[0])\n', (4048, 4063), True, 'import numpy as np\n'), ((4083, 4105), 'numpy.load', 'np.load', (['batch_file[1]'], {}), '(batch_file[1])\n', (4090, 4105), True, 'import numpy as np\n')] |
"""Author: <NAME>, Copyright 2019.
Functions to help serialize a caption dataset.
"""
import nltk
import numpy as np
import collections
stemmer = nltk.stem.snowball.SnowballStemmer("english")
def sentence_to_ngrams(sentence, n):
current_grams = collections.defaultdict(int)
lemmas = tuple(stemmer.stem(x) for x in nltk.tokenize.word_tokenize(
sentence.strip().lower()))
for i in range(len(lemmas) - n + 1):
current_grams[lemmas[i:(i + n)]] += 1
return current_grams
def load_ngrams(id_to_captions, n):
id_to_ngrams = {}
document_frequencies = collections.OrderedDict()
for image_id, captions in id_to_captions.items():
id_to_ngrams[image_id] = []
unique_ngrams = set()
for caption in captions:
ngrams = sentence_to_ngrams(caption, n)
id_to_ngrams[image_id].append(ngrams)
for gram in ngrams:
if gram not in document_frequencies:
document_frequencies[gram] = 0
if gram not in unique_ngrams:
unique_ngrams.add(gram)
document_frequencies[gram] += 1
return id_to_ngrams, document_frequencies
def tf_idf(id_to_ngrams, document_frequencies, n, image_id, candidate):
candidate_ngrams = sentence_to_ngrams(candidate, n)
total_frequency = sum(candidate_ngrams.values())
if total_frequency == 0:
return np.zeros(len(document_frequencies))
num_examples = len(id_to_ngrams)
tf_idf_weight = []
for gram in document_frequencies.keys():
tf_idf_weight.append(candidate_ngrams[gram] / total_frequency * np.log(
num_examples / document_frequencies[gram]))
return np.array(tf_idf_weight)
def tf_idf_known(id_to_ngrams, document_frequencies, image_id, reference_ngrams):
total_frequency = sum(reference_ngrams.values())
if total_frequency == 0:
return np.zeros(len(document_frequencies))
num_examples = len(id_to_ngrams)
tf_idf_weight = []
for gram in document_frequencies.keys():
tf_idf_weight.append(reference_ngrams[gram] / total_frequency * np.log(
num_examples / document_frequencies[gram]))
return np.array(tf_idf_weight)
def cider_n(id_to_ngrams, document_frequencies, n, image_id, candidate):
candidate_tf_idf_weight = tf_idf(id_to_ngrams, document_frequencies, n,
image_id, candidate)
candidate_norm = np.linalg.norm(candidate_tf_idf_weight)
normalized_candidate_vector = (candidate_tf_idf_weight /
candidate_norm if candidate_norm > 0 else candidate_tf_idf_weight)
cider_n_score = 0.0
for reference_ngrams in id_to_ngrams[image_id]:
reference_tf_idf_weight = tf_idf_known(id_to_ngrams, document_frequencies,
image_id, reference_ngrams)
reference_norm = np.linalg.norm(reference_tf_idf_weight)
normalized_reference_vector = (reference_tf_idf_weight /
reference_norm if reference_norm > 0 else reference_tf_idf_weight)
cider_n_score += normalized_candidate_vector.dot(normalized_reference_vector)
return cider_n_score / len(id_to_ngrams[image_id])
def cider(list_of_id_to_ngrams, list_of_document_frequencies, list_of_n,
image_id, candidate):
cider_score = 0.0
for id_to_ngrams, document_frequencies, n in zip(
list_of_id_to_ngrams, list_of_document_frequencies, list_of_n):
cider_score += cider_n(id_to_ngrams, document_frequencies, n,
image_id, candidate)
return cider_score / len(list_of_n)
def build_cider_scorer(id_to_captions, max_n):
list_of_n = list(range(1, max_n + 1))
list_of_id_to_ngrams = []
list_of_document_frequencies = []
for n in list_of_n:
id_to_ngrams, document_frequencies = load_ngrams(id_to_captions, n)
list_of_id_to_ngrams.append(id_to_ngrams)
list_of_document_frequencies.append(document_frequencies)
def score_function(candidate):
return cider(list_of_id_to_ngrams, list_of_document_frequencies, list_of_n,
image_id, candidate)
return score_function
if __name__ == "__main__":
id_to_captions = {
0: ["a women riding a black motor cycle.", "a biker on the road."],
1: ["an apple sitting on a table.", "a red piece of fruit on a wooden table."],
2: ["a young boy swinging a bat.", "a picture of a baseball game."],
3: ["a computer at a desk with a mug.", "someone was working in their office."],
4: ["a forest with a trail.", "hikers exploring the outdoors."],
}
image_id = 1
candidate = "a red apple sitting on a wooden table."
scorer = build_cider_scorer(id_to_captions, 4)
print("CIDEr score for \"{}\" was {}".format(candidate, scorer(candidate))) | [
"collections.OrderedDict",
"numpy.log",
"numpy.array",
"nltk.stem.snowball.SnowballStemmer",
"collections.defaultdict",
"numpy.linalg.norm"
] | [((160, 205), 'nltk.stem.snowball.SnowballStemmer', 'nltk.stem.snowball.SnowballStemmer', (['"""english"""'], {}), "('english')\n", (194, 205), False, 'import nltk\n'), ((269, 297), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (292, 297), False, 'import collections\n'), ((615, 640), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (638, 640), False, 'import collections\n'), ((1761, 1784), 'numpy.array', 'np.array', (['tf_idf_weight'], {}), '(tf_idf_weight)\n', (1769, 1784), True, 'import numpy as np\n'), ((2266, 2289), 'numpy.array', 'np.array', (['tf_idf_weight'], {}), '(tf_idf_weight)\n', (2274, 2289), True, 'import numpy as np\n'), ((2498, 2537), 'numpy.linalg.norm', 'np.linalg.norm', (['candidate_tf_idf_weight'], {}), '(candidate_tf_idf_weight)\n', (2512, 2537), True, 'import numpy as np\n'), ((2907, 2946), 'numpy.linalg.norm', 'np.linalg.norm', (['reference_tf_idf_weight'], {}), '(reference_tf_idf_weight)\n', (2921, 2946), True, 'import numpy as np\n'), ((1684, 1733), 'numpy.log', 'np.log', (['(num_examples / document_frequencies[gram])'], {}), '(num_examples / document_frequencies[gram])\n', (1690, 1733), True, 'import numpy as np\n'), ((2189, 2238), 'numpy.log', 'np.log', (['(num_examples / document_frequencies[gram])'], {}), '(num_examples / document_frequencies[gram])\n', (2195, 2238), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
"""Visualization of grid data using OpenGL.
This module provides a widget for displaying patch plots of
scalar data assigned to 2D-grids using OpenGL. This widget is not
intended to be used directly. Instead, use
:meth:`~pymor.discretizers.builtin.gui.qt.visualize_patch` or
:class:`~pymor.discretizers.builtin.gui.qt.PatchVisualizer`.
"""
from pymor.core.config import config
config.require('QT')
config.require('QTOPENGL')
config.require('GL')
import math as m
import numpy as np
import OpenGL.GL as gl
from qtpy.QtWidgets import QSizePolicy, QOpenGLWidget
from qtpy.QtGui import QPainter, QFontMetrics
from ctypes import c_void_p
from pymor.core.defaults import defaults
from pymor.discretizers.builtin.grids.constructions import flatten_grid
from pymor.discretizers.builtin.grids.referenceelements import triangle, square
def compile_shader(source, vertex=True):
"""Compile a vertex shader from source."""
shader_type = gl.GL_VERTEX_SHADER if vertex else gl.GL_FRAGMENT_SHADER
shader = gl.glCreateShader(shader_type)
gl.glShaderSource(shader, source)
gl.glCompileShader(shader)
# check compilation error
result = gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS)
if not result:
raise RuntimeError(gl.glGetShaderInfoLog(shader))
return shader
def link_shader_program(shaders):
"""Create a shader program with from compiled shaders."""
program = gl.glCreateProgram()
for shader in shaders:
gl.glAttachShader(program, shader)
gl.glLinkProgram(program)
# check linking error
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
if not result:
raise RuntimeError(gl.glGetProgramInfoLog(program))
return program
VS = """
#version 120
// Attribute variable that contains coordinates of the vertices.
attribute vec3 position;
varying float value;
void main()
{
gl_Position.xy = position.xy;
gl_Position.z = 0.;
gl_Position.w = 1.;
value = position.z;
}
"""
FS = """
#version 120
uniform sampler1D colormap;
varying float value;
void main()
{
gl_FragColor = texture1D(colormap, value);
}
"""
@defaults('name')
def colormap_texture(name='viridis'):
resolution = min(gl.GL_MAX_TEXTURE_SIZE, 1024)
colormap_id = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_1D, colormap_id)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
colormap = np.empty((resolution, 4), dtype='f4')
from matplotlib.pyplot import get_cmap
try:
cmap = get_cmap(name)
except ValueError:
from pymor.core.logger import getLogger
# this is our default which might not exist for older matplotlib
# so a warning would be annoying
if name != 'viridis':
msg = f'Unknown colormap {name}, using default colormap'
getLogger('pymor.discretizers.builtin.gui.gl.colormap_texture').warning(msg)
cmap = get_cmap()
colormap[:] = cmap(np.linspace(0., 1., resolution))
gl.glTexImage1D(gl.GL_TEXTURE_1D, 0, gl.GL_RGBA, resolution, 0, gl.GL_RGBA, gl.GL_FLOAT, colormap)
gl.glBindTexture(gl.GL_TEXTURE_1D, 0)
return colormap_id
class GLPatchWidget(QOpenGLWidget):
def __init__(self, parent, grid, vmin=None, vmax=None, bounding_box=([0, 0], [1, 1]), codim=2):
assert grid.reference_element in (triangle, square)
assert grid.dim == 2
assert codim in (0, 2)
super().__init__(parent)
self.setMinimumSize(300, 300)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
subentities, coordinates, entity_map = flatten_grid(grid)
self.subentities = subentities
self.entity_map = entity_map
self.reference_element = grid.reference_element
self.vmin = vmin
self.vmax = vmax
self.bounding_box = bounding_box
self.codim = codim
self.update_vbo = False
bb = self.bounding_box
self.size = np.array([bb[1][0] - bb[0][0], bb[1][1] - bb[0][1]])
self.scale = 2 / self.size
self.shift = - np.array(bb[0]) - self.size / 2
# setup buffers
buffer_dtype = [('position', 'f4', 2), ('color', 'f4')]
if self.reference_element == triangle:
if codim == 2:
self.vertex_data = np.empty(len(coordinates), dtype=buffer_dtype)
self.indices = subentities
else:
self.vertex_data = np.empty(len(subentities) * 3, dtype=buffer_dtype)
self.indices = np.arange(len(subentities) * 3, dtype=np.uint32)
else:
if codim == 2:
self.vertex_data = np.empty(len(coordinates), dtype=buffer_dtype)
self.indices = np.vstack((subentities[:, 0:3], subentities[:, [0, 2, 3]]))
else:
self.vertex_data = np.empty(len(subentities) * 6, dtype=buffer_dtype)
self.indices = np.arange(len(subentities) * 6, dtype=np.uint32)
self.indices = np.ascontiguousarray(self.indices)
self.vertex_data['color'] = 1
self.set_coordinates(coordinates)
self.set(np.zeros(grid.size(codim)))
def resizeGL(self, w, h):
gl.glViewport(0, 0, w, h)
gl.glLoadIdentity()
self.update()
def initializeGL(self):
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
self.shaders_program = link_shader_program([compile_shader(VS, vertex=True),
compile_shader(FS, vertex=False)])
gl.glUseProgram(self.shaders_program)
self.vertices_id = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertices_id)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.vertex_data, gl.GL_DYNAMIC_DRAW)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
self.indices_id = gl.glGenBuffers(1)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.indices_id)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, self.indices, gl.GL_STATIC_DRAW)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, 0)
self.colormap_id = colormap_texture()
self.colormap_location = gl.glGetUniformLocation(self.shaders_program, 'colormap')
def paintGL(self):
if self.update_vbo:
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertices_id)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.vertex_data, gl.GL_DYNAMIC_DRAW)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
self.update_vbo = False
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glUniform1i(self.colormap_location, 0)
gl.glActiveTexture(gl.GL_TEXTURE0 + 0)
gl.glBindTexture(gl.GL_TEXTURE_1D, self.colormap_id)
gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertices_id)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.indices_id)
gl.glVertexPointer(3, gl.GL_FLOAT, 0, c_void_p(None))
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glDrawElements(gl.GL_TRIANGLES, self.indices.size, gl.GL_UNSIGNED_INT, None)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, 0)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
gl.glPopClientAttrib()
def set_coordinates(self, coordinates):
if self.codim == 2:
self.vertex_data['position'][:, 0:2] = coordinates
self.vertex_data['position'][:, 0:2] += self.shift
self.vertex_data['position'][:, 0:2] *= self.scale
elif self.reference_element == triangle:
VERTEX_POS = coordinates[self.subentities]
VERTEX_POS += self.shift
VERTEX_POS *= self.scale
self.vertex_data['position'][:, 0:2] = VERTEX_POS.reshape((-1, 2))
else:
num_entities = len(self.subentities)
VERTEX_POS = coordinates[self.subentities]
VERTEX_POS += self.shift
VERTEX_POS *= self.scale
self.vertex_data['position'][0:num_entities * 3, 0:2] = VERTEX_POS[:, 0:3, :].reshape((-1, 2))
self.vertex_data['position'][num_entities * 3:, 0:2] = VERTEX_POS[:, [0, 2, 3], :].reshape((-1, 2))
self.update_vbo = True
self.update()
def set(self, U, vmin=None, vmax=None):
self.vmin = self.vmin if vmin is None else vmin
self.vmax = self.vmax if vmax is None else vmax
U_buffer = self.vertex_data['color']
if self.codim == 2:
U_buffer[:] = U[self.entity_map]
elif self.reference_element == triangle:
U_buffer[:] = np.repeat(U, 3)
else:
U_buffer[:] = np.tile(np.repeat(U, 3), 2)
# normalize
vmin = np.min(U) if self.vmin is None else self.vmin
vmax = np.max(U) if self.vmax is None else self.vmax
U_buffer -= vmin
if (vmax - vmin) > 0:
U_buffer /= float(vmax - vmin)
self.update_vbo = True
self.update()
class ColorBarWidget(QOpenGLWidget):
def __init__(self, parent, U=None, vmin=None, vmax=None):
super().__init__(parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
self.set(U, vmin, vmax)
def resizeGL(self, w, h):
gl.glViewport(0, 0, w, h)
gl.glLoadIdentity()
self.update()
def initializeGL(self):
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
self.shaders_program = link_shader_program([compile_shader(VS, vertex=True),
compile_shader(FS, vertex=False)])
gl.glUseProgram(self.shaders_program)
self.colormap_id = colormap_texture()
self.colormap_location = gl.glGetUniformLocation(self.shaders_program, 'colormap')
def set(self, U=None, vmin=None, vmax=None):
# normalize U
fm = QFontMetrics(self.font())
self.vmin = vmin if vmin is not None else (np.min(U) if U is not None else 0.)
self.vmax = vmax if vmax is not None else (np.max(U) if U is not None else 1.)
difference = abs(self.vmin - self.vmax)
if difference == 0:
precision = 3
else:
precision = m.log(max(abs(self.vmin), abs(self.vmax)) / difference, 10) + 1
precision = int(min(max(precision, 3), 8))
self.vmin_str = format(('{:.' + str(precision) + '}').format(self.vmin))
self.vmax_str = format(('{:.' + str(precision) + '}').format(self.vmax))
self.vmin_width = fm.width(self.vmin_str)
self.vmax_width = fm.width(self.vmax_str)
self.text_height = fm.height() * 1.5
self.text_ascent = fm.ascent() * 1.5
self.text_descent = fm.descent() * 1.5
self.setMinimumSize(max(self.vmin_width, self.vmax_width) + 20, 300)
self.update()
def paintGL(self):
p = QPainter(self)
p.beginNativePainting()
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glUseProgram(self.shaders_program)
gl.glUniform1i(self.colormap_location, 0)
gl.glActiveTexture(gl.GL_TEXTURE0 + 0)
gl.glBindTexture(gl.GL_TEXTURE_1D, self.colormap_id)
gl.glBegin(gl.GL_QUAD_STRIP)
bar_start = -1 + self.text_height / self.height() * 2
bar_height = (1 - 2 * self.text_height / self.height()) * 2
steps = 40
for i in range(steps + 1):
y = i * (1 / steps)
# gl.glColor(y, 0, 0)
gl.glVertex(-0.5, (bar_height*y + bar_start), y)
gl.glVertex(0.5, (bar_height*y + bar_start), y)
gl.glEnd()
p.endNativePainting()
p.drawText(round((self.width() - self.vmax_width)/2), self.text_ascent, self.vmax_str)
p.drawText(round((self.width() - self.vmin_width)/2), self.height() - self.text_height + self.text_ascent,
self.vmin_str)
p.end()
| [
"OpenGL.GL.glGetProgramiv",
"numpy.ascontiguousarray",
"numpy.array",
"ctypes.c_void_p",
"OpenGL.GL.glCreateShader",
"OpenGL.GL.glAttachShader",
"OpenGL.GL.glEnableClientState",
"pymor.core.logger.getLogger",
"OpenGL.GL.glViewport",
"OpenGL.GL.glGetProgramInfoLog",
"OpenGL.GL.glDrawElements",
... | [((593, 613), 'pymor.core.config.config.require', 'config.require', (['"""QT"""'], {}), "('QT')\n", (607, 613), False, 'from pymor.core.config import config\n'), ((614, 640), 'pymor.core.config.config.require', 'config.require', (['"""QTOPENGL"""'], {}), "('QTOPENGL')\n", (628, 640), False, 'from pymor.core.config import config\n'), ((641, 661), 'pymor.core.config.config.require', 'config.require', (['"""GL"""'], {}), "('GL')\n", (655, 661), False, 'from pymor.core.config import config\n'), ((2331, 2347), 'pymor.core.defaults.defaults', 'defaults', (['"""name"""'], {}), "('name')\n", (2339, 2347), False, 'from pymor.core.defaults import defaults\n'), ((1223, 1253), 'OpenGL.GL.glCreateShader', 'gl.glCreateShader', (['shader_type'], {}), '(shader_type)\n', (1240, 1253), True, 'import OpenGL.GL as gl\n'), ((1258, 1291), 'OpenGL.GL.glShaderSource', 'gl.glShaderSource', (['shader', 'source'], {}), '(shader, source)\n', (1275, 1291), True, 'import OpenGL.GL as gl\n'), ((1296, 1322), 'OpenGL.GL.glCompileShader', 'gl.glCompileShader', (['shader'], {}), '(shader)\n', (1314, 1322), True, 'import OpenGL.GL as gl\n'), ((1366, 1412), 'OpenGL.GL.glGetShaderiv', 'gl.glGetShaderiv', (['shader', 'gl.GL_COMPILE_STATUS'], {}), '(shader, gl.GL_COMPILE_STATUS)\n', (1382, 1412), True, 'import OpenGL.GL as gl\n'), ((1620, 1640), 'OpenGL.GL.glCreateProgram', 'gl.glCreateProgram', ([], {}), '()\n', (1638, 1640), True, 'import OpenGL.GL as gl\n'), ((1715, 1740), 'OpenGL.GL.glLinkProgram', 'gl.glLinkProgram', (['program'], {}), '(program)\n', (1731, 1740), True, 'import OpenGL.GL as gl\n'), ((1780, 1825), 'OpenGL.GL.glGetProgramiv', 'gl.glGetProgramiv', (['program', 'gl.GL_LINK_STATUS'], {}), '(program, gl.GL_LINK_STATUS)\n', (1797, 1825), True, 'import OpenGL.GL as gl\n'), ((2455, 2474), 'OpenGL.GL.glGenTextures', 'gl.glGenTextures', (['(1)'], {}), '(1)\n', (2471, 2474), True, 'import OpenGL.GL as gl\n'), ((2479, 2526), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_1D', 'colormap_id'], {}), '(gl.GL_TEXTURE_1D, colormap_id)\n', (2495, 2526), True, 'import OpenGL.GL as gl\n'), ((2531, 2608), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_1D', 'gl.GL_TEXTURE_MAG_FILTER', 'gl.GL_NEAREST'], {}), '(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n', (2549, 2608), True, 'import OpenGL.GL as gl\n'), ((2613, 2690), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_1D', 'gl.GL_TEXTURE_MIN_FILTER', 'gl.GL_NEAREST'], {}), '(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n', (2631, 2690), True, 'import OpenGL.GL as gl\n'), ((2695, 2774), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_1D', 'gl.GL_TEXTURE_WRAP_S', 'gl.GL_CLAMP_TO_EDGE'], {}), '(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)\n', (2713, 2774), True, 'import OpenGL.GL as gl\n'), ((2790, 2827), 'numpy.empty', 'np.empty', (['(resolution, 4)'], {'dtype': '"""f4"""'}), "((resolution, 4), dtype='f4')\n", (2798, 2827), True, 'import numpy as np\n'), ((3369, 3471), 'OpenGL.GL.glTexImage1D', 'gl.glTexImage1D', (['gl.GL_TEXTURE_1D', '(0)', 'gl.GL_RGBA', 'resolution', '(0)', 'gl.GL_RGBA', 'gl.GL_FLOAT', 'colormap'], {}), '(gl.GL_TEXTURE_1D, 0, gl.GL_RGBA, resolution, 0, gl.GL_RGBA,\n gl.GL_FLOAT, colormap)\n', (3384, 3471), True, 'import OpenGL.GL as gl\n'), ((3472, 3509), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_1D', '(0)'], {}), '(gl.GL_TEXTURE_1D, 0)\n', (3488, 3509), True, 'import OpenGL.GL as gl\n'), ((1676, 1710), 'OpenGL.GL.glAttachShader', 'gl.glAttachShader', (['program', 'shader'], {}), '(program, shader)\n', (1693, 1710), True, 'import OpenGL.GL as gl\n'), ((2895, 2909), 'matplotlib.pyplot.get_cmap', 'get_cmap', (['name'], {}), '(name)\n', (2903, 2909), False, 'from matplotlib.pyplot import get_cmap\n'), ((3332, 3365), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'resolution'], {}), '(0.0, 1.0, resolution)\n', (3343, 3365), True, 'import numpy as np\n'), ((3997, 4015), 'pymor.discretizers.builtin.grids.constructions.flatten_grid', 'flatten_grid', (['grid'], {}), '(grid)\n', (4009, 4015), False, 'from pymor.discretizers.builtin.grids.constructions import flatten_grid\n'), ((4350, 4402), 'numpy.array', 'np.array', (['[bb[1][0] - bb[0][0], bb[1][1] - bb[0][1]]'], {}), '([bb[1][0] - bb[0][0], bb[1][1] - bb[0][1]])\n', (4358, 4402), True, 'import numpy as np\n'), ((5386, 5420), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.indices'], {}), '(self.indices)\n', (5406, 5420), True, 'import numpy as np\n'), ((5587, 5612), 'OpenGL.GL.glViewport', 'gl.glViewport', (['(0)', '(0)', 'w', 'h'], {}), '(0, 0, w, h)\n', (5600, 5612), True, 'import OpenGL.GL as gl\n'), ((5621, 5640), 'OpenGL.GL.glLoadIdentity', 'gl.glLoadIdentity', ([], {}), '()\n', (5638, 5640), True, 'import OpenGL.GL as gl\n'), ((5700, 5735), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['(1.0)', '(1.0)', '(1.0)', '(1.0)'], {}), '(1.0, 1.0, 1.0, 1.0)\n', (5715, 5735), True, 'import OpenGL.GL as gl\n'), ((5917, 5954), 'OpenGL.GL.glUseProgram', 'gl.glUseProgram', (['self.shaders_program'], {}), '(self.shaders_program)\n', (5932, 5954), True, 'import OpenGL.GL as gl\n'), ((5983, 6001), 'OpenGL.GL.glGenBuffers', 'gl.glGenBuffers', (['(1)'], {}), '(1)\n', (5998, 6001), True, 'import OpenGL.GL as gl\n'), ((6010, 6063), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ARRAY_BUFFER', 'self.vertices_id'], {}), '(gl.GL_ARRAY_BUFFER, self.vertices_id)\n', (6025, 6063), True, 'import OpenGL.GL as gl\n'), ((6072, 6145), 'OpenGL.GL.glBufferData', 'gl.glBufferData', (['gl.GL_ARRAY_BUFFER', 'self.vertex_data', 'gl.GL_DYNAMIC_DRAW'], {}), '(gl.GL_ARRAY_BUFFER, self.vertex_data, gl.GL_DYNAMIC_DRAW)\n', (6087, 6145), True, 'import OpenGL.GL as gl\n'), ((6154, 6192), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ARRAY_BUFFER', '(0)'], {}), '(gl.GL_ARRAY_BUFFER, 0)\n', (6169, 6192), True, 'import OpenGL.GL as gl\n'), ((6220, 6238), 'OpenGL.GL.glGenBuffers', 'gl.glGenBuffers', (['(1)'], {}), '(1)\n', (6235, 6238), True, 'import OpenGL.GL as gl\n'), ((6247, 6307), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ELEMENT_ARRAY_BUFFER', 'self.indices_id'], {}), '(gl.GL_ELEMENT_ARRAY_BUFFER, self.indices_id)\n', (6262, 6307), True, 'import OpenGL.GL as gl\n'), ((6316, 6392), 'OpenGL.GL.glBufferData', 'gl.glBufferData', (['gl.GL_ELEMENT_ARRAY_BUFFER', 'self.indices', 'gl.GL_STATIC_DRAW'], {}), '(gl.GL_ELEMENT_ARRAY_BUFFER, self.indices, gl.GL_STATIC_DRAW)\n', (6331, 6392), True, 'import OpenGL.GL as gl\n'), ((6401, 6447), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ELEMENT_ARRAY_BUFFER', '(0)'], {}), '(gl.GL_ELEMENT_ARRAY_BUFFER, 0)\n', (6416, 6447), True, 'import OpenGL.GL as gl\n'), ((6528, 6585), 'OpenGL.GL.glGetUniformLocation', 'gl.glGetUniformLocation', (['self.shaders_program', '"""colormap"""'], {}), "(self.shaders_program, 'colormap')\n", (6551, 6585), True, 'import OpenGL.GL as gl\n'), ((6886, 6945), 'OpenGL.GL.glClear', 'gl.glClear', (['(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)'], {}), '(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n', (6896, 6945), True, 'import OpenGL.GL as gl\n'), ((6955, 6996), 'OpenGL.GL.glUniform1i', 'gl.glUniform1i', (['self.colormap_location', '(0)'], {}), '(self.colormap_location, 0)\n', (6969, 6996), True, 'import OpenGL.GL as gl\n'), ((7005, 7043), 'OpenGL.GL.glActiveTexture', 'gl.glActiveTexture', (['(gl.GL_TEXTURE0 + 0)'], {}), '(gl.GL_TEXTURE0 + 0)\n', (7023, 7043), True, 'import OpenGL.GL as gl\n'), ((7052, 7104), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_1D', 'self.colormap_id'], {}), '(gl.GL_TEXTURE_1D, self.colormap_id)\n', (7068, 7104), True, 'import OpenGL.GL as gl\n'), ((7114, 7166), 'OpenGL.GL.glPushClientAttrib', 'gl.glPushClientAttrib', (['gl.GL_CLIENT_VERTEX_ARRAY_BIT'], {}), '(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n', (7135, 7166), True, 'import OpenGL.GL as gl\n'), ((7175, 7228), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ARRAY_BUFFER', 'self.vertices_id'], {}), '(gl.GL_ARRAY_BUFFER, self.vertices_id)\n', (7190, 7228), True, 'import OpenGL.GL as gl\n'), ((7237, 7297), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ELEMENT_ARRAY_BUFFER', 'self.indices_id'], {}), '(gl.GL_ELEMENT_ARRAY_BUFFER, self.indices_id)\n', (7252, 7297), True, 'import OpenGL.GL as gl\n'), ((7369, 7411), 'OpenGL.GL.glEnableClientState', 'gl.glEnableClientState', (['gl.GL_VERTEX_ARRAY'], {}), '(gl.GL_VERTEX_ARRAY)\n', (7391, 7411), True, 'import OpenGL.GL as gl\n'), ((7420, 7499), 'OpenGL.GL.glDrawElements', 'gl.glDrawElements', (['gl.GL_TRIANGLES', 'self.indices.size', 'gl.GL_UNSIGNED_INT', 'None'], {}), '(gl.GL_TRIANGLES, self.indices.size, gl.GL_UNSIGNED_INT, None)\n', (7437, 7499), True, 'import OpenGL.GL as gl\n'), ((7508, 7554), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ELEMENT_ARRAY_BUFFER', '(0)'], {}), '(gl.GL_ELEMENT_ARRAY_BUFFER, 0)\n', (7523, 7554), True, 'import OpenGL.GL as gl\n'), ((7563, 7601), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ARRAY_BUFFER', '(0)'], {}), '(gl.GL_ARRAY_BUFFER, 0)\n', (7578, 7601), True, 'import OpenGL.GL as gl\n'), ((7610, 7632), 'OpenGL.GL.glPopClientAttrib', 'gl.glPopClientAttrib', ([], {}), '()\n', (7630, 7632), True, 'import OpenGL.GL as gl\n'), ((9634, 9659), 'OpenGL.GL.glViewport', 'gl.glViewport', (['(0)', '(0)', 'w', 'h'], {}), '(0, 0, w, h)\n', (9647, 9659), True, 'import OpenGL.GL as gl\n'), ((9668, 9687), 'OpenGL.GL.glLoadIdentity', 'gl.glLoadIdentity', ([], {}), '()\n', (9685, 9687), True, 'import OpenGL.GL as gl\n'), ((9747, 9782), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['(1.0)', '(1.0)', '(1.0)', '(1.0)'], {}), '(1.0, 1.0, 1.0, 1.0)\n', (9762, 9782), True, 'import OpenGL.GL as gl\n'), ((9963, 10000), 'OpenGL.GL.glUseProgram', 'gl.glUseProgram', (['self.shaders_program'], {}), '(self.shaders_program)\n', (9978, 10000), True, 'import OpenGL.GL as gl\n'), ((10081, 10138), 'OpenGL.GL.glGetUniformLocation', 'gl.glGetUniformLocation', (['self.shaders_program', '"""colormap"""'], {}), "(self.shaders_program, 'colormap')\n", (10104, 10138), True, 'import OpenGL.GL as gl\n'), ((11217, 11231), 'qtpy.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (11225, 11231), False, 'from qtpy.QtGui import QPainter, QFontMetrics\n'), ((11272, 11331), 'OpenGL.GL.glClear', 'gl.glClear', (['(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)'], {}), '(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n', (11282, 11331), True, 'import OpenGL.GL as gl\n'), ((11340, 11377), 'OpenGL.GL.glUseProgram', 'gl.glUseProgram', (['self.shaders_program'], {}), '(self.shaders_program)\n', (11355, 11377), True, 'import OpenGL.GL as gl\n'), ((11386, 11427), 'OpenGL.GL.glUniform1i', 'gl.glUniform1i', (['self.colormap_location', '(0)'], {}), '(self.colormap_location, 0)\n', (11400, 11427), True, 'import OpenGL.GL as gl\n'), ((11436, 11474), 'OpenGL.GL.glActiveTexture', 'gl.glActiveTexture', (['(gl.GL_TEXTURE0 + 0)'], {}), '(gl.GL_TEXTURE0 + 0)\n', (11454, 11474), True, 'import OpenGL.GL as gl\n'), ((11483, 11535), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_1D', 'self.colormap_id'], {}), '(gl.GL_TEXTURE_1D, self.colormap_id)\n', (11499, 11535), True, 'import OpenGL.GL as gl\n'), ((11545, 11573), 'OpenGL.GL.glBegin', 'gl.glBegin', (['gl.GL_QUAD_STRIP'], {}), '(gl.GL_QUAD_STRIP)\n', (11555, 11573), True, 'import OpenGL.GL as gl\n'), ((11953, 11963), 'OpenGL.GL.glEnd', 'gl.glEnd', ([], {}), '()\n', (11961, 11963), True, 'import OpenGL.GL as gl\n'), ((1459, 1488), 'OpenGL.GL.glGetShaderInfoLog', 'gl.glGetShaderInfoLog', (['shader'], {}), '(shader)\n', (1480, 1488), True, 'import OpenGL.GL as gl\n'), ((1872, 1903), 'OpenGL.GL.glGetProgramInfoLog', 'gl.glGetProgramInfoLog', (['program'], {}), '(program)\n', (1894, 1903), True, 'import OpenGL.GL as gl\n'), ((3298, 3308), 'matplotlib.pyplot.get_cmap', 'get_cmap', ([], {}), '()\n', (3306, 3308), False, 'from matplotlib.pyplot import get_cmap\n'), ((3890, 3947), 'qtpy.QtWidgets.QSizePolicy', 'QSizePolicy', (['QSizePolicy.Expanding', 'QSizePolicy.Expanding'], {}), '(QSizePolicy.Expanding, QSizePolicy.Expanding)\n', (3901, 3947), False, 'from qtpy.QtWidgets import QSizePolicy, QOpenGLWidget\n'), ((6650, 6703), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ARRAY_BUFFER', 'self.vertices_id'], {}), '(gl.GL_ARRAY_BUFFER, self.vertices_id)\n', (6665, 6703), True, 'import OpenGL.GL as gl\n'), ((6716, 6789), 'OpenGL.GL.glBufferData', 'gl.glBufferData', (['gl.GL_ARRAY_BUFFER', 'self.vertex_data', 'gl.GL_DYNAMIC_DRAW'], {}), '(gl.GL_ARRAY_BUFFER, self.vertex_data, gl.GL_DYNAMIC_DRAW)\n', (6731, 6789), True, 'import OpenGL.GL as gl\n'), ((6802, 6840), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gl.GL_ARRAY_BUFFER', '(0)'], {}), '(gl.GL_ARRAY_BUFFER, 0)\n', (6817, 6840), True, 'import OpenGL.GL as gl\n'), ((7345, 7359), 'ctypes.c_void_p', 'c_void_p', (['None'], {}), '(None)\n', (7353, 7359), False, 'from ctypes import c_void_p\n'), ((9087, 9096), 'numpy.min', 'np.min', (['U'], {}), '(U)\n', (9093, 9096), True, 'import numpy as np\n'), ((9148, 9157), 'numpy.max', 'np.max', (['U'], {}), '(U)\n', (9154, 9157), True, 'import numpy as np\n'), ((9508, 9561), 'qtpy.QtWidgets.QSizePolicy', 'QSizePolicy', (['QSizePolicy.Fixed', 'QSizePolicy.Expanding'], {}), '(QSizePolicy.Fixed, QSizePolicy.Expanding)\n', (9519, 9561), False, 'from qtpy.QtWidgets import QSizePolicy, QOpenGLWidget\n'), ((11836, 11884), 'OpenGL.GL.glVertex', 'gl.glVertex', (['(-0.5)', '(bar_height * y + bar_start)', 'y'], {}), '(-0.5, bar_height * y + bar_start, y)\n', (11847, 11884), True, 'import OpenGL.GL as gl\n'), ((11897, 11944), 'OpenGL.GL.glVertex', 'gl.glVertex', (['(0.5)', '(bar_height * y + bar_start)', 'y'], {}), '(0.5, bar_height * y + bar_start, y)\n', (11908, 11944), True, 'import OpenGL.GL as gl\n'), ((4461, 4476), 'numpy.array', 'np.array', (['bb[0]'], {}), '(bb[0])\n', (4469, 4476), True, 'import numpy as np\n'), ((5119, 5178), 'numpy.vstack', 'np.vstack', (['(subentities[:, 0:3], subentities[:, [0, 2, 3]])'], {}), '((subentities[:, 0:3], subentities[:, [0, 2, 3]]))\n', (5128, 5178), True, 'import numpy as np\n'), ((8967, 8982), 'numpy.repeat', 'np.repeat', (['U', '(3)'], {}), '(U, 3)\n', (8976, 8982), True, 'import numpy as np\n'), ((10301, 10310), 'numpy.min', 'np.min', (['U'], {}), '(U)\n', (10307, 10310), True, 'import numpy as np\n'), ((10388, 10397), 'numpy.max', 'np.max', (['U'], {}), '(U)\n', (10394, 10397), True, 'import numpy as np\n'), ((9031, 9046), 'numpy.repeat', 'np.repeat', (['U', '(3)'], {}), '(U, 3)\n', (9040, 9046), True, 'import numpy as np\n'), ((3206, 3269), 'pymor.core.logger.getLogger', 'getLogger', (['"""pymor.discretizers.builtin.gui.gl.colormap_texture"""'], {}), "('pymor.discretizers.builtin.gui.gl.colormap_texture')\n", (3215, 3269), False, 'from pymor.core.logger import getLogger\n')] |
import copy
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class HemaAutoRegDataset(Dataset):
"""Hema Auto dataset
Args:
root_dir (str): the root of data dir
data_prefix (str): the prefix of data path
ann_file (str | None): the annotation file. When ann_file is str, the data list is expected to read from the ann_file.
pipeline (list): a list of dict, where each element represents
a operation defined in `mmcls.datasets.pipelines`
test_mode (bool): in train mode or test mode
"""
CLASSES = None
def __init__(self,
root_dir,
data_prefix,
ann_file,
pipeline,
max_total=150,
test_mode=False):
super(Dataset, self).__init__()
self.root_dir = root_dir
self.data_prefix = data_prefix
self.ann_file = ann_file
self.max_total = max_total
self.pipeline = Compose(pipeline)
self.data_infos = self.load_annotations()
self.test_mode = test_mode
def load_annotations(self):
with open(self.ann_file) as f:
data_list = [x.strip() for x in f.readlines()]
self.data_list = data_list
data_infos = []
for filename in self.data_list:
img_path = str(Path(self.root_dir) / ('images/' + filename + '.png'))
label_path = str(Path(self.root_dir) / ('annotations/' + filename + '.npy'))
label = np.load(label_path)
info = {'img_prefix': self.data_prefix}
info['img_info'] = {'filename': img_path}
label[..., 0 ] = np.clip(label[..., 0 ] / self.max_total, a_min=0, a_max=1)
label[..., 1] = label[..., 1] / (label[..., 0] + 1e-8)
info['gt_label'] = label.astype(np.float32)
data_infos.append(info)
return data_infos
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
np.ndarray: categories for all images.
"""
gt_labels = np.array([data['gt_label'] for data in self.data_infos])
return gt_labels
def prepare_data(self, idx):
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
return len(self.data_infos)
def __getitem__(self, idx):
return self.prepare_data(idx)
def evaluate(self,
results,
metric='mae',
indices=None,
logger=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `mae`.
indices (list, optional): The indices of samples corresponding to
the results. Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
allowed_metrics = [
'mse', 'mae'
]
eval_results = {}
results = torch.from_numpy(np.array(results).transpose(0, 2, 3, 1))
gt_labels = torch.from_numpy(self.get_gt_labels())
if indices is not None:
gt_labels = gt_labels[indices]
num_imgs = len(results)
assert len(gt_labels) == num_imgs, 'dataset testing results should '\
'be of the same length as gt_labels.'
invalid_metrics = set(metrics) - set(allowed_metrics)
if len(invalid_metrics) != 0:
raise ValueError(f'metric {invalid_metrics} is not supported.')
if 'mse' in metrics:
mse = F.mse_loss(results, gt_labels)
mse = mse.cpu().detach()
eval_results_ = {'mse': mse}
if 'mae' in metrics:
mae = F.l1_loss(results, gt_labels)
mae = mae.cpu().detach()
eval_results_ = {'mae': mae}
eval_results.update(
{k: v.item()
for k, v in eval_results_.items()})
return eval_results
| [
"numpy.clip",
"torch.nn.functional.mse_loss",
"torch.nn.functional.l1_loss",
"pathlib.Path",
"numpy.array",
"copy.deepcopy",
"numpy.load"
] | [((2256, 2312), 'numpy.array', 'np.array', (["[data['gt_label'] for data in self.data_infos]"], {}), "([data['gt_label'] for data in self.data_infos])\n", (2264, 2312), True, 'import numpy as np\n'), ((2394, 2429), 'copy.deepcopy', 'copy.deepcopy', (['self.data_infos[idx]'], {}), '(self.data_infos[idx])\n', (2407, 2429), False, 'import copy\n'), ((1672, 1691), 'numpy.load', 'np.load', (['label_path'], {}), '(label_path)\n', (1679, 1691), True, 'import numpy as np\n'), ((1827, 1884), 'numpy.clip', 'np.clip', (['(label[..., 0] / self.max_total)'], {'a_min': '(0)', 'a_max': '(1)'}), '(label[..., 0] / self.max_total, a_min=0, a_max=1)\n', (1834, 1884), True, 'import numpy as np\n'), ((4103, 4133), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['results', 'gt_labels'], {}), '(results, gt_labels)\n', (4113, 4133), True, 'import torch.nn.functional as F\n'), ((4260, 4289), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['results', 'gt_labels'], {}), '(results, gt_labels)\n', (4269, 4289), True, 'import torch.nn.functional as F\n'), ((1508, 1527), 'pathlib.Path', 'Path', (['self.root_dir'], {}), '(self.root_dir)\n', (1512, 1527), False, 'from pathlib import Path\n'), ((1592, 1611), 'pathlib.Path', 'Path', (['self.root_dir'], {}), '(self.root_dir)\n', (1596, 1611), False, 'from pathlib import Path\n'), ((3534, 3551), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (3542, 3551), True, 'import numpy as np\n')] |
import sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
class WingPlot3d:
def __init__(self):
self.xyz_to_yzx = True
self.elem_step = 4
filename = sys.argv[1]
h5f= h5py.File(filename,'r')
self.fig_ax_lim = 0.003
self.ax_array = np.array(h5f['ax_array'])
self.le_array = np.array(h5f['le_array'])
self.te_array = np.array(h5f['te_array'])
_, self.num_elem, self.num_t = self.ax_array.shape
self.fig = plt.figure()
self.fig_ax = self.fig.gca(projection='3d')
self.t_ind = 0
self.le_line_dict = {}
self.te_line_dict = {}
for elem_ind in range(0,self.num_elem,self.elem_step):
x,y,z = self.get_le_xyz(elem_ind, self.t_ind)
line, = self.fig_ax.plot(x, y, z, 'b')
self.le_line_dict[elem_ind] = line
x,y,z = self.get_te_xyz(elem_ind, self.t_ind)
line, = self.fig_ax.plot(x, y, z, 'b')
self.te_line_dict[elem_ind] = line
if self.xyz_to_yzx:
self.fig_ax.set_xlabel('z')
self.fig_ax.set_ylabel('x')
self.fig_ax.set_zlabel('y')
else:
self.fig_ax.set_xlabel('x')
self.fig_ax.set_ylabel('y')
self.fig_ax.set_zlabel('z')
self.fig_ax.set_xlim(-self.fig_ax_lim, self.fig_ax_lim)
self.fig_ax.set_ylim(-self.fig_ax_lim, self.fig_ax_lim)
self.fig_ax.set_zlim(-self.fig_ax_lim, self.fig_ax_lim)
def run(self):
print('run')
self.animation = animation.FuncAnimation(self.fig, self.update, interval=0.001)
plt.show()
def update(self,dummy):
self.t_ind += 1
if self.t_ind >= self.num_t:
self.t_ind = 0
print(self.t_ind)
for elem_ind in range(0,self.num_elem,self.elem_step):
x,y,z = self.get_le_xyz(elem_ind, self.t_ind)
le_line = self.le_line_dict[elem_ind]
le_line.set_data_3d(x,y,z)
x,y,z = self.get_te_xyz(elem_ind, self.t_ind)
te_line = self.te_line_dict[elem_ind]
te_line.set_data_3d(x,y,z)
def get_le_xyz(self, elem_ind, t_ind):
ax_vect = self.ax_array[:, elem_ind, t_ind]
le_vect = self.le_array[:, elem_ind, t_ind]
te_vect = self.te_array[:, elem_ind, t_ind]
if self.xyz_to_yzx:
y = [ax_vect[0], le_vect[0]]
z = [ax_vect[1], le_vect[1]]
x = [ax_vect[2], le_vect[2]]
else:
x = [ax_vect[0], le_vect[0]]
y = [ax_vect[1], le_vect[1]]
z = [ax_vect[2], le_vect[2]]
return x,y,z
def get_te_xyz(self, elem_ind, t_ind):
ax_vect = self.ax_array[:, elem_ind, t_ind]
le_vect = self.le_array[:, elem_ind, t_ind]
te_vect = self.te_array[:, elem_ind, t_ind]
if self.xyz_to_yzx:
y = [ax_vect[0], te_vect[0]]
z = [ax_vect[1], te_vect[1]]
x = [ax_vect[2], te_vect[2]]
pass
else:
x = [ax_vect[0], te_vect[0]]
y = [ax_vect[1], te_vect[1]]
z = [ax_vect[2], te_vect[2]]
return x,y,z
# ---------------------------------------------------------------------------------------
if __name__ == '__main__':
wing_plot = WingPlot3d()
wing_plot.run()
| [
"matplotlib.animation.FuncAnimation",
"h5py.File",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((297, 321), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (306, 321), False, 'import h5py\n'), ((386, 411), 'numpy.array', 'np.array', (["h5f['ax_array']"], {}), "(h5f['ax_array'])\n", (394, 411), True, 'import numpy as np\n'), ((436, 461), 'numpy.array', 'np.array', (["h5f['le_array']"], {}), "(h5f['le_array'])\n", (444, 461), True, 'import numpy as np\n'), ((486, 511), 'numpy.array', 'np.array', (["h5f['te_array']"], {}), "(h5f['te_array'])\n", (494, 511), True, 'import numpy as np\n'), ((608, 620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (618, 620), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1764), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update'], {'interval': '(0.001)'}), '(self.fig, self.update, interval=0.001)\n', (1725, 1764), False, 'from matplotlib import animation\n'), ((1773, 1783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1781, 1783), True, 'import matplotlib.pyplot as plt\n')] |
import functools
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from caffe2.python.operator_test.adagrad_test_helper import (
adagrad_sparse_test_helper,
ref_adagrad,
)
from hypothesis import HealthCheck, given, settings
class TestAdagrad(serial.SerializedTestCase):
@given(
inputs=hu.tensors(n=3),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
weight_decay=st.sampled_from([0.0, 0.1]),
**hu.gcs
)
@settings(deadline=10000)
def test_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Adagrad",
["param", "momentum", "grad", "lr"],
["param", "momentum"],
epsilon=epsilon,
weight_decay=weight_decay,
device_option=gc,
)
self.assertReferenceChecks(
gc,
op,
[param, momentum, grad, lr],
functools.partial(ref_adagrad, epsilon=epsilon, weight_decay=weight_decay),
)
@given(
inputs=hu.tensors(n=3),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
weight_decay=st.sampled_from([0.0, 0.1]),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_adagrad_output_effective_lr(
self, inputs, lr, epsilon, weight_decay, gc, dc
):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Adagrad",
["param", "momentum", "grad", "lr"],
["param", "momentum", "effective_lr"],
epsilon=epsilon,
weight_decay=weight_decay,
device_option=gc,
)
self.assertReferenceChecks(
gc,
op,
[param, momentum, grad, lr],
functools.partial(
ref_adagrad,
epsilon=epsilon,
output_effective_lr=True,
weight_decay=weight_decay,
),
)
@given(
inputs=hu.tensors(n=3),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_adagrad_output_effective_lr_and_update(self, inputs, lr, epsilon, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
"Adagrad",
["param", "momentum", "grad", "lr"],
["param", "momentum", "effective_lr", "update"],
epsilon=epsilon,
device_option=gc,
)
self.assertReferenceChecks(
gc,
op,
[param, momentum, grad, lr],
functools.partial(
ref_adagrad, epsilon=epsilon, output_effective_lr_and_update=True
),
)
# Suppress filter_too_much health check.
# Likely caused by `assume` call falling through too often.
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(
inputs=hu.tensors(n=3),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
weight_decay=st.sampled_from([0.0, 0.1]),
**hu.gcs
)
def test_sparse_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
adagrad_sparse_test_helper(
self,
inputs,
lr,
epsilon,
None,
ref_adagrad,
gc,
dc,
weight_decay=weight_decay,
)
@given(
inputs=hu.tensors(n=2),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
**hu.gcs
)
@settings(deadline=10000)
def test_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
param, momentum = inputs
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
ref_using_fp16_values = [False]
if gc == hu.gpu_do:
ref_using_fp16_values.append(True)
for ref_using_fp16 in ref_using_fp16_values:
if ref_using_fp16:
print("test_sparse_adagrad_empty with half precision embedding")
momentum_i = momentum.astype(np.float16)
param_i = param.astype(np.float16)
else:
print("test_sparse_adagrad_empty with full precision embedding")
momentum_i = momentum.astype(np.float32)
param_i = param.astype(np.float32)
adagrad_sparse_test_helper(
self,
[param_i, momentum_i, grad],
lr,
epsilon,
None,
ref_adagrad,
gc,
dc,
)
# Suppress filter_too_much health check.
# Likely caused by `assume` call falling through too often.
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(
inputs=hu.tensors(n=3),
lr=st.sampled_from([0.01, 0.99]),
epsilon=st.sampled_from([0.01, 0.99]),
weight_decay=st.sampled_from([0.0, 0.1]),
counter_halflife=st.sampled_from([-1, 5]),
**hu.gcs
)
def test_row_wise_sparse_adagrad(
self, inputs, lr, epsilon, weight_decay, counter_halflife, gc, dc
):
adagrad_sparse_test_helper(
self,
inputs,
lr,
epsilon,
None,
functools.partial(ref_adagrad, row_wise=True),
gc,
dc,
row_wise=True,
weight_decay=weight_decay,
counter_halflife=counter_halflife,
)
@given(
inputs=hu.tensors(n=2),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
epsilon=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
**hu.gcs
)
@settings(deadline=None)
def test_row_wise_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
param, momentum = inputs
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
adagrad_sparse_test_helper(
self,
[param, momentum, grad],
lr,
epsilon,
None,
ref_adagrad,
gc,
dc,
row_wise=True,
)
| [
"numpy.abs",
"hypothesis.strategies.sampled_from",
"caffe2.python.hypothesis_test_util.tensors",
"caffe2.python.operator_test.adagrad_test_helper.adagrad_sparse_test_helper",
"hypothesis.strategies.floats",
"numpy.array",
"numpy.empty",
"functools.partial",
"hypothesis.settings",
"caffe2.python.co... | [((815, 839), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (823, 839), False, 'from hypothesis import HealthCheck, given, settings\n'), ((1890, 1914), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (1898, 1914), False, 'from hypothesis import HealthCheck, given, settings\n'), ((3076, 3100), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (3084, 3100), False, 'from hypothesis import HealthCheck, given, settings\n'), ((3935, 4012), 'hypothesis.settings', 'settings', ([], {'suppress_health_check': '[HealthCheck.filter_too_much]', 'deadline': '(10000)'}), '(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)\n', (3943, 4012), False, 'from hypothesis import HealthCheck, given, settings\n'), ((5021, 5045), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (5029, 5045), False, 'from hypothesis import HealthCheck, given, settings\n'), ((6223, 6300), 'hypothesis.settings', 'settings', ([], {'suppress_health_check': '[HealthCheck.filter_too_much]', 'deadline': '(10000)'}), '(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)\n', (6231, 6300), False, 'from hypothesis import HealthCheck, given, settings\n'), ((7364, 7387), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (7372, 7387), False, 'from hypothesis import HealthCheck, given, settings\n'), ((972, 988), 'numpy.abs', 'np.abs', (['momentum'], {}), '(momentum)\n', (978, 988), True, 'import numpy as np\n'), ((1003, 1035), 'numpy.array', 'np.array', (['[lr]'], {'dtype': 'np.float32'}), '([lr], dtype=np.float32)\n', (1011, 1035), True, 'import numpy as np\n'), ((1052, 1213), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Adagrad"""', "['param', 'momentum', 'grad', 'lr']", "['param', 'momentum']"], {'epsilon': 'epsilon', 'weight_decay': 'weight_decay', 'device_option': 'gc'}), "('Adagrad', ['param', 'momentum', 'grad', 'lr'], [\n 'param', 'momentum'], epsilon=epsilon, weight_decay=weight_decay,\n device_option=gc)\n", (1071, 1213), False, 'from caffe2.python import core\n'), ((2083, 2099), 'numpy.abs', 'np.abs', (['momentum'], {}), '(momentum)\n', (2089, 2099), True, 'import numpy as np\n'), ((2114, 2146), 'numpy.array', 'np.array', (['[lr]'], {'dtype': 'np.float32'}), '([lr], dtype=np.float32)\n', (2122, 2146), True, 'import numpy as np\n'), ((2163, 2341), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Adagrad"""', "['param', 'momentum', 'grad', 'lr']", "['param', 'momentum', 'effective_lr']"], {'epsilon': 'epsilon', 'weight_decay': 'weight_decay', 'device_option': 'gc'}), "('Adagrad', ['param', 'momentum', 'grad', 'lr'], [\n 'param', 'momentum', 'effective_lr'], epsilon=epsilon, weight_decay=\n weight_decay, device_option=gc)\n", (2182, 2341), False, 'from caffe2.python import core\n'), ((3250, 3266), 'numpy.abs', 'np.abs', (['momentum'], {}), '(momentum)\n', (3256, 3266), True, 'import numpy as np\n'), ((3281, 3313), 'numpy.array', 'np.array', (['[lr]'], {'dtype': 'np.float32'}), '([lr], dtype=np.float32)\n', (3289, 3313), True, 'import numpy as np\n'), ((3330, 3490), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Adagrad"""', "['param', 'momentum', 'grad', 'lr']", "['param', 'momentum', 'effective_lr', 'update']"], {'epsilon': 'epsilon', 'device_option': 'gc'}), "('Adagrad', ['param', 'momentum', 'grad', 'lr'], [\n 'param', 'momentum', 'effective_lr', 'update'], epsilon=epsilon,\n device_option=gc)\n", (3349, 3490), False, 'from caffe2.python import core\n'), ((4464, 4575), 'caffe2.python.operator_test.adagrad_test_helper.adagrad_sparse_test_helper', 'adagrad_sparse_test_helper', (['self', 'inputs', 'lr', 'epsilon', 'None', 'ref_adagrad', 'gc', 'dc'], {'weight_decay': 'weight_decay'}), '(self, inputs, lr, epsilon, None, ref_adagrad, gc,\n dc, weight_decay=weight_decay)\n', (4490, 4575), False, 'from caffe2.python.operator_test.adagrad_test_helper import adagrad_sparse_test_helper, ref_adagrad\n'), ((5167, 5223), 'numpy.empty', 'np.empty', ([], {'shape': '((0,) + param.shape[1:])', 'dtype': 'np.float32'}), '(shape=(0,) + param.shape[1:], dtype=np.float32)\n', (5175, 5223), True, 'import numpy as np\n'), ((7518, 7574), 'numpy.empty', 'np.empty', ([], {'shape': '((0,) + param.shape[1:])', 'dtype': 'np.float32'}), '(shape=(0,) + param.shape[1:], dtype=np.float32)\n', (7526, 7574), True, 'import numpy as np\n'), ((7584, 7700), 'caffe2.python.operator_test.adagrad_test_helper.adagrad_sparse_test_helper', 'adagrad_sparse_test_helper', (['self', '[param, momentum, grad]', 'lr', 'epsilon', 'None', 'ref_adagrad', 'gc', 'dc'], {'row_wise': '(True)'}), '(self, [param, momentum, grad], lr, epsilon, None,\n ref_adagrad, gc, dc, row_wise=True)\n', (7610, 7700), False, 'from caffe2.python.operator_test.adagrad_test_helper import adagrad_sparse_test_helper, ref_adagrad\n'), ((1423, 1497), 'functools.partial', 'functools.partial', (['ref_adagrad'], {'epsilon': 'epsilon', 'weight_decay': 'weight_decay'}), '(ref_adagrad, epsilon=epsilon, weight_decay=weight_decay)\n', (1440, 1497), False, 'import functools\n'), ((475, 490), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(3)'}), '(n=3)\n', (485, 490), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((504, 589), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (513, 589), True, 'import hypothesis.strategies as st\n'), ((627, 712), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (636, 712), True, 'import hypothesis.strategies as st\n'), ((755, 782), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0.0, 0.1]'], {}), '([0.0, 0.1])\n', (770, 782), True, 'import hypothesis.strategies as st\n'), ((2550, 2654), 'functools.partial', 'functools.partial', (['ref_adagrad'], {'epsilon': 'epsilon', 'output_effective_lr': '(True)', 'weight_decay': 'weight_decay'}), '(ref_adagrad, epsilon=epsilon, output_effective_lr=True,\n weight_decay=weight_decay)\n', (2567, 2654), False, 'import functools\n'), ((1541, 1556), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(3)'}), '(n=3)\n', (1551, 1556), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((1570, 1655), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (1579, 1655), True, 'import hypothesis.strategies as st\n'), ((1693, 1778), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (1702, 1778), True, 'import hypothesis.strategies as st\n'), ((1821, 1848), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0.0, 0.1]'], {}), '([0.0, 0.1])\n', (1836, 1848), True, 'import hypothesis.strategies as st\n'), ((3687, 3775), 'functools.partial', 'functools.partial', (['ref_adagrad'], {'epsilon': 'epsilon', 'output_effective_lr_and_update': '(True)'}), '(ref_adagrad, epsilon=epsilon,\n output_effective_lr_and_update=True)\n', (3704, 3775), False, 'import functools\n'), ((2778, 2793), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(3)'}), '(n=3)\n', (2788, 2793), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((2807, 2892), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (2816, 2892), True, 'import hypothesis.strategies as st\n'), ((2930, 3015), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (2939, 3015), True, 'import hypothesis.strategies as st\n'), ((4042, 4057), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(3)'}), '(n=3)\n', (4052, 4057), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((4071, 4156), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (4080, 4156), True, 'import hypothesis.strategies as st\n'), ((4194, 4279), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (4203, 4279), True, 'import hypothesis.strategies as st\n'), ((4322, 4349), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0.0, 0.1]'], {}), '([0.0, 0.1])\n', (4337, 4349), True, 'import hypothesis.strategies as st\n'), ((5850, 5955), 'caffe2.python.operator_test.adagrad_test_helper.adagrad_sparse_test_helper', 'adagrad_sparse_test_helper', (['self', '[param_i, momentum_i, grad]', 'lr', 'epsilon', 'None', 'ref_adagrad', 'gc', 'dc'], {}), '(self, [param_i, momentum_i, grad], lr, epsilon,\n None, ref_adagrad, gc, dc)\n', (5876, 5955), False, 'from caffe2.python.operator_test.adagrad_test_helper import adagrad_sparse_test_helper, ref_adagrad\n'), ((4732, 4747), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(2)'}), '(n=2)\n', (4742, 4747), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((4761, 4846), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (4770, 4846), True, 'import hypothesis.strategies as st\n'), ((4884, 4969), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (4893, 4969), True, 'import hypothesis.strategies as st\n'), ((6836, 6881), 'functools.partial', 'functools.partial', (['ref_adagrad'], {'row_wise': '(True)'}), '(ref_adagrad, row_wise=True)\n', (6853, 6881), False, 'import functools\n'), ((6330, 6345), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(3)'}), '(n=3)\n', (6340, 6345), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((6359, 6388), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0.01, 0.99]'], {}), '([0.01, 0.99])\n', (6374, 6388), True, 'import hypothesis.strategies as st\n'), ((6407, 6436), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0.01, 0.99]'], {}), '([0.01, 0.99])\n', (6422, 6436), True, 'import hypothesis.strategies as st\n'), ((6460, 6487), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[0.0, 0.1]'], {}), '([0.0, 0.1])\n', (6475, 6487), True, 'import hypothesis.strategies as st\n'), ((6515, 6539), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[-1, 5]'], {}), '([-1, 5])\n', (6530, 6539), True, 'import hypothesis.strategies as st\n'), ((7075, 7090), 'caffe2.python.hypothesis_test_util.tensors', 'hu.tensors', ([], {'n': '(2)'}), '(n=2)\n', (7085, 7090), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((7104, 7189), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (7113, 7189), True, 'import hypothesis.strategies as st\n'), ((7227, 7312), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.01)', 'max_value': '(0.99)', 'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False\n )\n', (7236, 7312), True, 'import hypothesis.strategies as st\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from decorator_helper import prog_scope
import paddle.fluid as fluid
import numpy as np
import six
class TestMathOpPatchesVarBase(unittest.TestCase):
def setUp(self):
self.shape = [10, 10]
self.dtype = np.float32
def test_add(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a + b
self.assertTrue(np.array_equal(res.numpy(), a_np + b_np))
def test_sub(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a - b
self.assertTrue(np.array_equal(res.numpy(), a_np - b_np))
def test_mul(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a * b
self.assertTrue(np.array_equal(res.numpy(), a_np * b_np))
def test_div(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a / b
self.assertTrue(np.array_equal(res.numpy(), a_np / b_np))
def test_add_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = 0.1
res = a + b
self.assertTrue(np.array_equal(res.numpy(), a_np + b))
def test_add_scalar_reverse(self):
a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = 0.1
res = b + a
self.assertTrue(np.array_equal(res.numpy(), b + a_np))
def test_sub_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = 0.1
res = a - b
self.assertTrue(np.array_equal(res.numpy(), a_np - b))
def test_sub_scalar_reverse(self):
a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = 0.1
res = b - a
self.assertTrue(np.array_equal(res.numpy(), b - a_np))
def test_mul_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = 0.1
res = a * b
self.assertTrue(np.array_equal(res.numpy(), a_np * b))
# div_scalar, not equal
def test_div_scalar(self):
a_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = 0.1
res = a / b
self.assertTrue(np.allclose(res.numpy(), a_np / b))
# pow of float type, not equal
def test_pow(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a**b
self.assertTrue(np.allclose(res.numpy(), a_np**b_np))
def test_floor_div(self):
a_np = np.random.randint(1, 100, size=self.shape)
b_np = np.random.randint(1, 100, size=self.shape)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a // b
self.assertTrue(np.array_equal(res.numpy(), a_np // b_np))
def test_mod(self):
a_np = np.random.randint(1, 100, size=self.shape)
b_np = np.random.randint(1, 100, size=self.shape)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = a % b
self.assertTrue(np.array_equal(res.numpy(), a_np % b_np))
# for logical compare
def test_equal(self):
a_np = np.asarray([1, 2, 3, 4, 5])
b_np = np.asarray([1, 2, 3, 4, 5])
c_np = np.asarray([1, 2, 2, 4, 5])
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
c = fluid.dygraph.to_variable(c_np)
res1 = (a == b)
res2 = (a == c)
self.assertTrue(np.array_equal(res1.numpy(), a_np == b_np))
self.assertTrue(np.array_equal(res2.numpy(), a_np == c_np))
def test_not_equal(self):
a_np = np.asarray([1, 2, 3, 4, 5])
b_np = np.asarray([1, 2, 3, 4, 5])
c_np = np.asarray([1, 2, 2, 4, 5])
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
c = fluid.dygraph.to_variable(c_np)
res1 = (a != b)
res2 = (a != c)
self.assertTrue(np.array_equal(res1.numpy(), a_np != b_np))
self.assertTrue(np.array_equal(res2.numpy(), a_np != c_np))
def test_less_than(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = (a < b)
self.assertTrue(np.array_equal(res.numpy(), a_np < b_np))
def test_less_equal(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = (a <= b)
self.assertTrue(np.array_equal(res.numpy(), a_np <= b_np))
def test_greater_than(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = (a > b)
self.assertTrue(np.array_equal(res.numpy(), a_np > b_np))
def test_greater_equal(self):
a_np = np.random.random(self.shape).astype(self.dtype)
b_np = np.random.random(self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
res = (a >= b)
self.assertTrue(np.array_equal(res.numpy(), a_np >= b_np))
def test_neg(self):
a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
res = -a
self.assertTrue(np.array_equal(res.numpy(), -a_np))
def test_float_int_long(self):
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(np.array([100.1]))
self.assertTrue(float(a) == 100.1)
self.assertTrue(int(a) == 100)
if six.PY2:
self.assertTrue(long(a) == 100)
else:
self.assertTrue(int(a) == 100)
def test_len(self):
a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
with fluid.dygraph.guard():
a = fluid.dygraph.to_variable(a_np)
self.assertTrue(len(a) == 10)
def test_index(self):
with fluid.dygraph.guard():
var1 = fluid.dygraph.to_variable(np.array([2]))
i_tmp = 0
for i in range(var1):
self.assertTrue(i == i_tmp)
i_tmp = i_tmp + 1
list1 = [1, 2, 3, 4, 5]
self.assertTrue(list1[var1] == 3)
str1 = "just test"
self.assertTrue(str1[var1] == 's')
if __name__ == '__main__':
unittest.main()
| [
"paddle.fluid.dygraph.guard",
"paddle.fluid.dygraph.to_variable",
"numpy.random.random",
"numpy.asarray",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"unittest.main"
] | [((9292, 9307), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9305, 9307), False, 'import unittest\n'), ((4645, 4687), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {'size': 'self.shape'}), '(1, 100, size=self.shape)\n', (4662, 4687), True, 'import numpy as np\n'), ((4703, 4745), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {'size': 'self.shape'}), '(1, 100, size=self.shape)\n', (4720, 4745), True, 'import numpy as np\n'), ((5014, 5056), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {'size': 'self.shape'}), '(1, 100, size=self.shape)\n', (5031, 5056), True, 'import numpy as np\n'), ((5072, 5114), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {'size': 'self.shape'}), '(1, 100, size=self.shape)\n', (5089, 5114), True, 'import numpy as np\n'), ((5409, 5436), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (5419, 5436), True, 'import numpy as np\n'), ((5452, 5479), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (5462, 5479), True, 'import numpy as np\n'), ((5495, 5522), 'numpy.asarray', 'np.asarray', (['[1, 2, 2, 4, 5]'], {}), '([1, 2, 2, 4, 5])\n', (5505, 5522), True, 'import numpy as np\n'), ((5949, 5976), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (5959, 5976), True, 'import numpy as np\n'), ((5992, 6019), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (6002, 6019), True, 'import numpy as np\n'), ((6035, 6062), 'numpy.asarray', 'np.asarray', (['[1, 2, 2, 4, 5]'], {}), '([1, 2, 2, 4, 5])\n', (6045, 6062), True, 'import numpy as np\n'), ((1067, 1088), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (1086, 1088), True, 'import paddle.fluid as fluid\n'), ((1106, 1137), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (1131, 1137), True, 'import paddle.fluid as fluid\n'), ((1154, 1185), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (1179, 1185), True, 'import paddle.fluid as fluid\n'), ((1444, 1465), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (1463, 1465), True, 'import paddle.fluid as fluid\n'), ((1483, 1514), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (1508, 1514), True, 'import paddle.fluid as fluid\n'), ((1531, 1562), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (1556, 1562), True, 'import paddle.fluid as fluid\n'), ((1821, 1842), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (1840, 1842), True, 'import paddle.fluid as fluid\n'), ((1860, 1891), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (1885, 1891), True, 'import paddle.fluid as fluid\n'), ((1908, 1939), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (1933, 1939), True, 'import paddle.fluid as fluid\n'), ((2198, 2219), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (2217, 2219), True, 'import paddle.fluid as fluid\n'), ((2237, 2268), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (2262, 2268), True, 'import paddle.fluid as fluid\n'), ((2285, 2316), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (2310, 2316), True, 'import paddle.fluid as fluid\n'), ((2519, 2540), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (2538, 2540), True, 'import paddle.fluid as fluid\n'), ((2558, 2589), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (2583, 2589), True, 'import paddle.fluid as fluid\n'), ((2817, 2838), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (2836, 2838), True, 'import paddle.fluid as fluid\n'), ((2856, 2887), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (2881, 2887), True, 'import paddle.fluid as fluid\n'), ((3107, 3128), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (3126, 3128), True, 'import paddle.fluid as fluid\n'), ((3146, 3177), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (3171, 3177), True, 'import paddle.fluid as fluid\n'), ((3405, 3426), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (3424, 3426), True, 'import paddle.fluid as fluid\n'), ((3444, 3475), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (3469, 3475), True, 'import paddle.fluid as fluid\n'), ((3695, 3716), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (3714, 3716), True, 'import paddle.fluid as fluid\n'), ((3734, 3765), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (3759, 3765), True, 'import paddle.fluid as fluid\n'), ((4013, 4034), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (4032, 4034), True, 'import paddle.fluid as fluid\n'), ((4052, 4083), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (4077, 4083), True, 'import paddle.fluid as fluid\n'), ((4391, 4412), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (4410, 4412), True, 'import paddle.fluid as fluid\n'), ((4430, 4461), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (4455, 4461), True, 'import paddle.fluid as fluid\n'), ((4478, 4509), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (4503, 4509), True, 'import paddle.fluid as fluid\n'), ((4759, 4780), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (4778, 4780), True, 'import paddle.fluid as fluid\n'), ((4798, 4829), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (4823, 4829), True, 'import paddle.fluid as fluid\n'), ((4846, 4877), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (4871, 4877), True, 'import paddle.fluid as fluid\n'), ((5128, 5149), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (5147, 5149), True, 'import paddle.fluid as fluid\n'), ((5167, 5198), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (5192, 5198), True, 'import paddle.fluid as fluid\n'), ((5215, 5246), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (5240, 5246), True, 'import paddle.fluid as fluid\n'), ((5536, 5557), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (5555, 5557), True, 'import paddle.fluid as fluid\n'), ((5575, 5606), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (5600, 5606), True, 'import paddle.fluid as fluid\n'), ((5623, 5654), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (5648, 5654), True, 'import paddle.fluid as fluid\n'), ((5671, 5702), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['c_np'], {}), '(c_np)\n', (5696, 5702), True, 'import paddle.fluid as fluid\n'), ((6076, 6097), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (6095, 6097), True, 'import paddle.fluid as fluid\n'), ((6115, 6146), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (6140, 6146), True, 'import paddle.fluid as fluid\n'), ((6163, 6194), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (6188, 6194), True, 'import paddle.fluid as fluid\n'), ((6211, 6242), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['c_np'], {}), '(c_np)\n', (6236, 6242), True, 'import paddle.fluid as fluid\n'), ((6613, 6634), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (6632, 6634), True, 'import paddle.fluid as fluid\n'), ((6652, 6683), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (6677, 6683), True, 'import paddle.fluid as fluid\n'), ((6700, 6731), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (6725, 6731), True, 'import paddle.fluid as fluid\n'), ((6999, 7020), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (7018, 7020), True, 'import paddle.fluid as fluid\n'), ((7038, 7069), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (7063, 7069), True, 'import paddle.fluid as fluid\n'), ((7086, 7117), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (7111, 7117), True, 'import paddle.fluid as fluid\n'), ((7389, 7410), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (7408, 7410), True, 'import paddle.fluid as fluid\n'), ((7428, 7459), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (7453, 7459), True, 'import paddle.fluid as fluid\n'), ((7476, 7507), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (7501, 7507), True, 'import paddle.fluid as fluid\n'), ((7778, 7799), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (7797, 7799), True, 'import paddle.fluid as fluid\n'), ((7817, 7848), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (7842, 7848), True, 'import paddle.fluid as fluid\n'), ((7865, 7896), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['b_np'], {}), '(b_np)\n', (7890, 7896), True, 'import paddle.fluid as fluid\n'), ((8104, 8125), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (8123, 8125), True, 'import paddle.fluid as fluid\n'), ((8143, 8174), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (8168, 8174), True, 'import paddle.fluid as fluid\n'), ((8309, 8330), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (8328, 8330), True, 'import paddle.fluid as fluid\n'), ((8729, 8750), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (8748, 8750), True, 'import paddle.fluid as fluid\n'), ((8768, 8799), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['a_np'], {}), '(a_np)\n', (8793, 8799), True, 'import paddle.fluid as fluid\n'), ((8882, 8903), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (8901, 8903), True, 'import paddle.fluid as fluid\n'), ((943, 971), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (959, 971), True, 'import numpy as np\n'), ((1006, 1034), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (1022, 1034), True, 'import numpy as np\n'), ((1320, 1348), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (1336, 1348), True, 'import numpy as np\n'), ((1383, 1411), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (1399, 1411), True, 'import numpy as np\n'), ((1697, 1725), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (1713, 1725), True, 'import numpy as np\n'), ((1760, 1788), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (1776, 1788), True, 'import numpy as np\n'), ((2074, 2102), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (2090, 2102), True, 'import numpy as np\n'), ((2137, 2165), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (2153, 2165), True, 'import numpy as np\n'), ((2458, 2486), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (2474, 2486), True, 'import numpy as np\n'), ((2756, 2784), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (2772, 2784), True, 'import numpy as np\n'), ((3046, 3074), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (3062, 3074), True, 'import numpy as np\n'), ((3344, 3372), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (3360, 3372), True, 'import numpy as np\n'), ((3634, 3662), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (3650, 3662), True, 'import numpy as np\n'), ((3952, 3980), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (3968, 3980), True, 'import numpy as np\n'), ((4267, 4295), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (4283, 4295), True, 'import numpy as np\n'), ((4330, 4358), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (4346, 4358), True, 'import numpy as np\n'), ((6489, 6517), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (6505, 6517), True, 'import numpy as np\n'), ((6552, 6580), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (6568, 6580), True, 'import numpy as np\n'), ((6875, 6903), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (6891, 6903), True, 'import numpy as np\n'), ((6938, 6966), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (6954, 6966), True, 'import numpy as np\n'), ((7265, 7293), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (7281, 7293), True, 'import numpy as np\n'), ((7328, 7356), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (7344, 7356), True, 'import numpy as np\n'), ((7654, 7682), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (7670, 7682), True, 'import numpy as np\n'), ((7717, 7745), 'numpy.random.random', 'np.random.random', (['self.shape'], {}), '(self.shape)\n', (7733, 7745), True, 'import numpy as np\n'), ((8035, 8071), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (8052, 8071), True, 'import numpy as np\n'), ((8374, 8391), 'numpy.array', 'np.array', (['[100.1]'], {}), '([100.1])\n', (8382, 8391), True, 'import numpy as np\n'), ((8660, 8696), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (8677, 8696), True, 'import numpy as np\n'), ((8950, 8963), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (8958, 8963), True, 'import numpy as np\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Nipype translation of ANTs' workflows."""
# general purpose
from collections import OrderedDict
from multiprocessing import cpu_count
from pkg_resources import resource_filename as pkgr_fn
from warnings import warn
# nipype
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces.ants import (
AI,
Atropos,
ImageMath,
MultiplyImages,
N4BiasFieldCorrection,
ThresholdImage,
)
from ..utils.misc import get_template_specs
from ..utils.connections import pop_file as _pop
# niworkflows
from ..interfaces.fixes import (
FixHeaderRegistration as Registration,
FixHeaderApplyTransforms as ApplyTransforms,
)
from ..interfaces.nibabel import ApplyMask, RegridToZooms
from ..interfaces.header import CopyXForm
ATROPOS_MODELS = {
"T1w": OrderedDict([("nclasses", 3), ("csf", 1), ("gm", 2), ("wm", 3)]),
"T2w": OrderedDict([("nclasses", 3), ("csf", 3), ("gm", 2), ("wm", 1)]),
"FLAIR": OrderedDict([("nclasses", 3), ("csf", 1), ("gm", 3), ("wm", 2)]),
}
def init_brain_extraction_wf(
name="brain_extraction_wf",
in_template="OASIS30ANTs",
template_spec=None,
use_float=True,
normalization_quality="precise",
omp_nthreads=None,
mem_gb=3.0,
bids_suffix="T1w",
atropos_refine=True,
atropos_use_random_seed=True,
atropos_model=None,
use_laplacian=True,
bspline_fitting_distance=200,
):
"""
Build a workflow for atlas-based brain extraction on anatomical MRI data.
This is a Nipype implementation of atlas-based brain extraction inspired by
the official ANTs' ``antsBrainExtraction.sh`` workflow (only for 3D images).
The workflow follows the following structure:
1. Step 1 performs several clerical tasks (preliminary INU correction,
calculating the Laplacian of inputs, affine initialization) and the
core spatial normalization.
2. Maps the brain mask into target space using the normalization
calculated in 1.
3. Superstep 1b: binarization of the brain mask
4. Maps the WM (white matter) probability map from the template, if such prior exists.
Combines the BS (brainstem) probability map before mapping if the WM
and BS are given separately (as it is the case for ``OASIS30ANTs``.)
5. Run a second N4 INU correction round, using the prior mapped into
individual step in step 4 if available.
6. Superstep 6: apply ATROPOS on the INU-corrected result of step 5, and
massage its outputs
7. Superstep 7: use results from 4 to refine the brain mask
8. If exist, use priors from step 4, calculate the overlap of the posteriors
estimated in step 4 to select that overlapping the most with the WM+BS
prior from the template. Combine that posterior with the refined brain
mask and pass it on to the next step.
9. Apply a final N4 using the refined brain mask (or the map calculated in
step 8 if priors were found) as weights map for the algorithm.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from niworkflows.anat.ants import init_brain_extraction_wf
wf = init_brain_extraction_wf()
Parameters
----------
in_template : str
Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or
path).
The brain template from which regions will be projected
Anatomical template created using e.g. LPBA40 data set with
``buildtemplateparallel.sh`` in ANTs.
The workflow will automatically search for a brain probability
mask created using e.g. LPBA40 data set which have brain masks
defined, and warped to anatomical template and
averaged resulting in a probability image.
use_float : bool
Whether single precision should be used
normalization_quality : str
Use more precise or faster registration parameters
(default: ``precise``, other possible values: ``testing``)
omp_nthreads : int
Maximum number of threads an individual process may use
mem_gb : float
Estimated peak memory consumption of the most hungry nodes
in the workflow
bids_suffix : str
Sequence type of the first input image. For a list of acceptable values
see https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
atropos_refine : bool
Enables or disables the whole ATROPOS sub-workflow
atropos_use_random_seed : bool
Whether ATROPOS should generate a random seed based on the
system's clock
atropos_model : tuple or None
Allows to specify a particular segmentation model, overwriting
the defaults based on ``bids_suffix``
use_laplacian : bool
Enables or disables alignment of the Laplacian as an additional
criterion for image registration quality (default: True)
bspline_fitting_distance : float
The size of the b-spline mesh grid elements, in mm (default: 200)
name : str, optional
Workflow name (default: antsBrainExtraction)
Inputs
------
in_files : list
List of input anatomical images to be brain-extracted,
typically T1-weighted.
If a list of anatomical images is provided, subsequently
specified images are used during the segmentation process.
However, only the first image is used in the registration
of priors.
Our suggestion would be to specify the T1w as the first image.
in_mask : list, optional
Mask used for registration to limit the metric
computation to a specific region.
Outputs
-------
out_file : str
Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
out_mask : str
Calculated brain mask
bias_corrected : str
The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
correction, before skull-stripping.
bias_image : str
The :abbr:`INU (intensity non-uniformity)` field estimated for each
input in ``in_files``
out_segm : str
Output segmentation by ATROPOS
out_tpms : str
Output :abbr:`TPMs (tissue probability maps)` by ATROPOS
"""
from packaging.version import parse as parseversion, Version
from templateflow.api import get as get_template
wf = pe.Workflow(name)
template_spec = template_spec or {}
# suffix passed via spec takes precedence
template_spec["suffix"] = template_spec.get("suffix", bids_suffix)
tpl_target_path, common_spec = get_template_specs(
in_template, template_spec=template_spec
)
# Get probabilistic brain mask if available
tpl_mask_path = get_template(
in_template, label="brain", suffix="probseg", **common_spec
) or get_template(in_template, desc="brain", suffix="mask", **common_spec)
if omp_nthreads is None or omp_nthreads < 1:
omp_nthreads = cpu_count()
inputnode = pe.Node(
niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode"
)
# Try to find a registration mask, set if available
tpl_regmask_path = get_template(
in_template, desc="BrainCerebellumExtraction", suffix="mask", **common_spec
)
if tpl_regmask_path:
inputnode.inputs.in_mask = str(tpl_regmask_path)
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"out_file",
"out_mask",
"bias_corrected",
"bias_image",
"out_segm",
"out_tpms",
]
),
name="outputnode",
)
trunc = pe.MapNode(
ImageMath(
operation="TruncateImageIntensity", op2="0.01 0.999 256", copy_header=True
),
name="truncate_images",
iterfield=["op1"],
)
inu_n4 = pe.MapNode(
N4BiasFieldCorrection(
dimension=3,
save_bias=False,
copy_header=True,
n_iterations=[50] * 4,
convergence_threshold=1e-7,
shrink_factor=4,
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
name="inu_n4",
iterfield=["input_image"],
)
res_tmpl = pe.Node(
RegridToZooms(in_file=tpl_target_path, zooms=(4, 4, 4), smooth=True),
name="res_tmpl",
)
res_target = pe.Node(RegridToZooms(zooms=(4, 4, 4), smooth=True), name="res_target")
lap_tmpl = pe.Node(
ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), name="lap_tmpl"
)
lap_tmpl.inputs.op1 = tpl_target_path
lap_target = pe.Node(
ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
name="lap_target",
)
mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
mrg_tmpl.inputs.in1 = tpl_target_path
mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
# Initialize transforms with antsAI
init_aff = pe.Node(
AI(
metric=("Mattes", 32, "Regular", 0.25),
transform=("Affine", 0.1),
search_factor=(15, 0.1),
principal_axes=False,
convergence=(10, 1e-6, 10),
verbose=True,
),
name="init_aff",
n_procs=omp_nthreads,
)
# Tolerate missing ANTs at construction time
try:
init_aff.inputs.search_grid = (40, (0, 40, 40))
except ValueError:
warn(
"antsAI's option --search-grid was added in ANTS 2.3.0 "
f"({init_aff.interface.version} found.)"
)
# Set up spatial normalization
settings_file = (
"antsBrainExtraction_%s.json"
if use_laplacian
else "antsBrainExtractionNoLaplacian_%s.json"
)
norm = pe.Node(
Registration(
from_file=pkgr_fn("niworkflows.data", settings_file % normalization_quality)
),
name="norm",
n_procs=omp_nthreads,
mem_gb=mem_gb,
)
norm.inputs.float = use_float
fixed_mask_trait = "fixed_image_mask"
if norm.interface.version and parseversion(norm.interface.version) >= Version(
"2.2.0"
):
fixed_mask_trait += "s"
map_brainmask = pe.Node(
ApplyTransforms(interpolation="Gaussian"), name="map_brainmask", mem_gb=1,
)
map_brainmask.inputs.input_image = str(tpl_mask_path)
thr_brainmask = pe.Node(
ThresholdImage(
dimension=3,
th_low=0.5,
th_high=1.0,
inside_value=1,
outside_value=0,
copy_header=True,
),
name="thr_brainmask",
)
# Refine INU correction
inu_n4_final = pe.MapNode(
N4BiasFieldCorrection(
dimension=3,
save_bias=True,
copy_header=True,
n_iterations=[50] * 5,
convergence_threshold=1e-7,
shrink_factor=4,
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
name="inu_n4_final",
iterfield=["input_image"],
)
try:
inu_n4_final.inputs.rescale_intensities = True
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
UserWarning,
)
# Apply mask
apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask")
# fmt: off
wf.connect([
(inputnode, trunc, [("in_files", "op1")]),
(inputnode, inu_n4_final, [("in_files", "input_image")]),
(inputnode, init_aff, [("in_mask", "fixed_image_mask")]),
(inputnode, norm, [("in_mask", fixed_mask_trait)]),
(inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
(trunc, inu_n4, [("output_image", "input_image")]),
(inu_n4, res_target, [(("output_image", _pop), "in_file")]),
(res_tmpl, init_aff, [("out_file", "fixed_image")]),
(res_target, init_aff, [("out_file", "moving_image")]),
(init_aff, norm, [("output_transform", "initial_moving_transform")]),
(norm, map_brainmask, [
("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags"),
]),
(map_brainmask, thr_brainmask, [("output_image", "input_image")]),
(map_brainmask, inu_n4_final, [("output_image", "weight_image")]),
(inu_n4_final, apply_mask, [("output_image", "in_file")]),
(thr_brainmask, apply_mask, [("output_image", "in_mask")]),
(thr_brainmask, outputnode, [("output_image", "out_mask")]),
(inu_n4_final, outputnode, [("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
(apply_mask, outputnode, [("out_file", "out_file")]),
])
# fmt: on
wm_tpm = (
get_template(in_template, label="WM", suffix="probseg", **common_spec) or None
)
if wm_tpm:
map_wmmask = pe.Node(
ApplyTransforms(interpolation="Gaussian"), name="map_wmmask", mem_gb=1,
)
# Add the brain stem if it is found.
bstem_tpm = (
get_template(in_template, label="BS", suffix="probseg", **common_spec)
or None
)
if bstem_tpm:
full_wm = pe.Node(niu.Function(function=_imsum), name="full_wm")
full_wm.inputs.op1 = str(wm_tpm)
full_wm.inputs.op2 = str(bstem_tpm)
# fmt: off
wf.connect([
(full_wm, map_wmmask, [("out", "input_image")])
])
# fmt: on
else:
map_wmmask.inputs.input_image = str(wm_tpm)
# fmt: off
wf.disconnect([
(map_brainmask, inu_n4_final, [("output_image", "weight_image")]),
])
wf.connect([
(inputnode, map_wmmask, [(("in_files", _pop), "reference_image")]),
(norm, map_wmmask, [
("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags"),
]),
(map_wmmask, inu_n4_final, [("output_image", "weight_image")]),
])
# fmt: on
if use_laplacian:
lap_tmpl = pe.Node(
ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
name="lap_tmpl",
)
lap_tmpl.inputs.op1 = tpl_target_path
lap_target = pe.Node(
ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
name="lap_target",
)
mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
mrg_tmpl.inputs.in1 = tpl_target_path
mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
# fmt: off
wf.connect([
(inu_n4, lap_target, [(("output_image", _pop), "op1")]),
(lap_tmpl, mrg_tmpl, [("output_image", "in2")]),
(inu_n4, mrg_target, [("output_image", "in1")]),
(lap_target, mrg_target, [("output_image", "in2")]),
(mrg_tmpl, norm, [("out", "fixed_image")]),
(mrg_target, norm, [("out", "moving_image")]),
])
# fmt: on
else:
norm.inputs.fixed_image = tpl_target_path
# fmt: off
wf.connect([
(inu_n4, norm, [(("output_image", _pop), "moving_image")]),
])
# fmt: on
if atropos_refine:
atropos_model = atropos_model or list(ATROPOS_MODELS[bids_suffix].values())
atropos_wf = init_atropos_wf(
use_random_seed=atropos_use_random_seed,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
in_segmentation_model=atropos_model,
bspline_fitting_distance=bspline_fitting_distance,
wm_prior=bool(wm_tpm),
)
# fmt: off
wf.disconnect([
(thr_brainmask, outputnode, [("output_image", "out_mask")]),
(inu_n4_final, outputnode, [("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
(apply_mask, outputnode, [("out_file", "out_file")]),
])
wf.connect([
(inputnode, atropos_wf, [("in_files", "inputnode.in_files")]),
(inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]),
(thr_brainmask, atropos_wf, [("output_image", "inputnode.in_mask")]),
(atropos_wf, outputnode, [
("outputnode.out_file", "out_file"),
("outputnode.bias_corrected", "bias_corrected"),
("outputnode.bias_image", "bias_image"),
("outputnode.out_mask", "out_mask"),
("outputnode.out_segm", "out_segm"),
("outputnode.out_tpms", "out_tpms"),
]),
])
# fmt: on
if wm_tpm:
# fmt: off
wf.connect([
(map_wmmask, atropos_wf, [("output_image", "inputnode.wm_prior")]),
])
# fmt: on
return wf
def init_atropos_wf(
name="atropos_wf",
use_random_seed=True,
omp_nthreads=None,
mem_gb=3.0,
padding=10,
in_segmentation_model=tuple(ATROPOS_MODELS["T1w"].values()),
bspline_fitting_distance=200,
wm_prior=False,
):
"""
Create an ANTs' ATROPOS workflow for brain tissue segmentation.
Re-interprets supersteps 6 and 7 of ``antsBrainExtraction.sh``,
which refine the mask previously computed with the spatial
normalization to the template.
The workflow also executes steps 8 and 9 of the brain extraction
workflow.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from niworkflows.anat.ants import init_atropos_wf
wf = init_atropos_wf()
Parameters
----------
name : str, optional
Workflow name (default: "atropos_wf").
use_random_seed : bool
Whether ATROPOS should generate a random seed based on the
system's clock
omp_nthreads : int
Maximum number of threads an individual process may use
mem_gb : float
Estimated peak memory consumption of the most hungry nodes
in the workflow
padding : int
Pad images with zeros before processing
in_segmentation_model : tuple
A k-means segmentation is run to find gray or white matter
around the edge of the initial brain mask warped from the
template.
This produces a segmentation image with :math:`$K$` classes,
ordered by mean intensity in increasing order.
With this option, you can control :math:`$K$` and tell the script which
classes represent CSF, gray and white matter.
Format (K, csfLabel, gmLabel, wmLabel).
Examples:
``(3,1,2,3)`` for T1 with K=3, CSF=1, GM=2, WM=3 (default),
``(3,3,2,1)`` for T2 with K=3, CSF=3, GM=2, WM=1,
``(3,1,3,2)`` for FLAIR with K=3, CSF=1 GM=3, WM=2,
``(4,4,2,3)`` uses K=4, CSF=4, GM=2, WM=3.
bspline_fitting_distance : float
The size of the b-spline mesh grid elements, in mm (default: 200)
wm_prior : :obj:`bool`
Whether the WM posterior obtained with ATROPOS should be regularized with a prior
map (typically, mapped from the template). When ``wm_prior`` is ``True`` the input
field ``wm_prior`` of the input node must be connected.
Inputs
------
in_files : list
The original anatomical images passed in to the brain-extraction workflow.
in_corrected : list
:abbr:`INU (intensity non-uniformity)`-corrected files.
in_mask : str
Brain mask calculated previously.
wm_prior : :obj:`str`
Path to the WM prior probability map, aligned with the individual data.
Outputs
-------
out_file : :obj:`str`
Path of the corrected and brain-extracted result, using the ATROPOS refinement.
bias_corrected : :obj:`str`
Path of the corrected and result, using the ATROPOS refinement.
bias_image : :obj:`str`
Path of the estimated INU bias field, using the ATROPOS refinement.
out_mask : str
Refined brain mask
out_segm : str
Output segmentation
out_tpms : str
Output :abbr:`TPMs (tissue probability maps)`
"""
wf = pe.Workflow(name)
out_fields = ["bias_corrected", "bias_image", "out_mask", "out_segm", "out_tpms"]
inputnode = pe.Node(
niu.IdentityInterface(
fields=["in_files", "in_corrected", "in_mask", "wm_prior"]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_file"] + out_fields), name="outputnode"
)
copy_xform = pe.Node(
CopyXForm(fields=out_fields), name="copy_xform", run_without_submitting=True
)
# Morphological dilation, radius=2
dil_brainmask = pe.Node(
ImageMath(operation="MD", op2="2", copy_header=True), name="dil_brainmask"
)
# Get largest connected component
get_brainmask = pe.Node(
ImageMath(operation="GetLargestComponent", copy_header=True),
name="get_brainmask",
)
# Run atropos (core node)
atropos = pe.Node(
Atropos(
convergence_threshold=0.0,
dimension=3,
initialization="KMeans",
likelihood_model="Gaussian",
mrf_radius=[1, 1, 1],
mrf_smoothing_factor=0.1,
n_iterations=3,
number_of_tissue_classes=in_segmentation_model[0],
save_posteriors=True,
use_random_seed=use_random_seed,
),
name="01_atropos",
n_procs=omp_nthreads,
mem_gb=mem_gb,
)
# massage outputs
pad_segm = pe.Node(
ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False),
name="02_pad_segm",
)
pad_mask = pe.Node(
ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False),
name="03_pad_mask",
)
# Split segmentation in binary masks
sel_labels = pe.Node(
niu.Function(
function=_select_labels, output_names=["out_wm", "out_gm", "out_csf"]
),
name="04_sel_labels",
)
sel_labels.inputs.labels = list(reversed(in_segmentation_model[1:]))
# Select largest components (GM, WM)
# ImageMath ${DIMENSION} ${EXTRACTION_WM} GetLargestComponent ${EXTRACTION_WM}
get_wm = pe.Node(ImageMath(operation="GetLargestComponent"), name="05_get_wm")
get_gm = pe.Node(ImageMath(operation="GetLargestComponent"), name="06_get_gm")
# Fill holes and calculate intersection
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM}
fill_gm = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="07_fill_gm")
mult_gm = pe.Node(
MultiplyImages(dimension=3, output_product_image="08_mult_gm.nii.gz"),
name="08_mult_gm",
)
# MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM}
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} ME ${EXTRACTION_CSF} 10
relabel_wm = pe.Node(
MultiplyImages(
dimension=3,
second_input=in_segmentation_model[-1],
output_product_image="09_relabel_wm.nii.gz",
),
name="09_relabel_wm",
)
me_csf = pe.Node(ImageMath(operation="ME", op2="10"), name="10_me_csf")
# ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP}
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM}
# ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM}
add_gm = pe.Node(ImageMath(operation="addtozero"), name="11_add_gm")
relabel_gm = pe.Node(
MultiplyImages(
dimension=3,
second_input=in_segmentation_model[-2],
output_product_image="12_relabel_gm.nii.gz",
),
name="12_relabel_gm",
)
add_gm_wm = pe.Node(ImageMath(operation="addtozero"), name="13_add_gm_wm")
# Superstep 7
# Split segmentation in binary masks
sel_labels2 = pe.Node(
niu.Function(function=_select_labels, output_names=["out_gm", "out_wm"]),
name="14_sel_labels2",
)
sel_labels2.inputs.labels = in_segmentation_model[2:]
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} ${EXTRACTION_TMP}
add_7 = pe.Node(ImageMath(operation="addtozero"), name="15_add_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 2
me_7 = pe.Node(ImageMath(operation="ME", op2="2"), name="16_me_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} GetLargestComponent ${EXTRACTION_MASK}
comp_7 = pe.Node(ImageMath(operation="GetLargestComponent"), name="17_comp_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 4
md_7 = pe.Node(ImageMath(operation="MD", op2="4"), name="18_md_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} FillHoles ${EXTRACTION_MASK} 2
fill_7 = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="19_fill_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} \
# ${EXTRACTION_MASK_PRIOR_WARPED}
add_7_2 = pe.Node(ImageMath(operation="addtozero"), name="20_add_7_2")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 5
md_7_2 = pe.Node(ImageMath(operation="MD", op2="5"), name="21_md_7_2")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 5
me_7_2 = pe.Node(ImageMath(operation="ME", op2="5"), name="22_me_7_2")
# De-pad
depad_mask = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="23_depad_mask"
)
depad_segm = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="24_depad_segm"
)
depad_gm = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="25_depad_gm"
)
depad_wm = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="26_depad_wm"
)
depad_csf = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="27_depad_csf"
)
msk_conform = pe.Node(niu.Function(function=_conform_mask), name="msk_conform")
merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name="merge_tpms")
sel_wm = pe.Node(niu.Select(), name="sel_wm", run_without_submitting=True)
if not wm_prior:
sel_wm.inputs.index = in_segmentation_model[-1] - 1
copy_xform_wm = pe.Node(
CopyXForm(fields=["wm_map"]), name="copy_xform_wm", run_without_submitting=True
)
# Refine INU correction
inu_n4_final = pe.MapNode(
N4BiasFieldCorrection(
dimension=3,
save_bias=True,
copy_header=True,
n_iterations=[50] * 5,
convergence_threshold=1e-7,
shrink_factor=4,
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
name="inu_n4_final",
iterfield=["input_image"],
)
try:
inu_n4_final.inputs.rescale_intensities = True
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
UserWarning,
)
# Apply mask
apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask")
# fmt: off
wf.connect([
(inputnode, dil_brainmask, [("in_mask", "op1")]),
(inputnode, copy_xform, [(("in_files", _pop), "hdr_file")]),
(inputnode, copy_xform_wm, [(("in_files", _pop), "hdr_file")]),
(inputnode, pad_mask, [("in_mask", "op1")]),
(inputnode, atropos, [("in_corrected", "intensity_images")]),
(inputnode, inu_n4_final, [("in_files", "input_image")]),
(inputnode, msk_conform, [(("in_files", _pop), "in_reference")]),
(dil_brainmask, get_brainmask, [("output_image", "op1")]),
(get_brainmask, atropos, [("output_image", "mask_image")]),
(atropos, pad_segm, [("classified_image", "op1")]),
(pad_segm, sel_labels, [("output_image", "in_segm")]),
(sel_labels, get_wm, [("out_wm", "op1")]),
(sel_labels, get_gm, [("out_gm", "op1")]),
(get_gm, fill_gm, [("output_image", "op1")]),
(get_gm, mult_gm, [("output_image", "first_input")]),
(fill_gm, mult_gm, [("output_image", "second_input")]),
(get_wm, relabel_wm, [("output_image", "first_input")]),
(sel_labels, me_csf, [("out_csf", "op1")]),
(mult_gm, add_gm, [("output_product_image", "op1")]),
(me_csf, add_gm, [("output_image", "op2")]),
(add_gm, relabel_gm, [("output_image", "first_input")]),
(relabel_wm, add_gm_wm, [("output_product_image", "op1")]),
(relabel_gm, add_gm_wm, [("output_product_image", "op2")]),
(add_gm_wm, sel_labels2, [("output_image", "in_segm")]),
(sel_labels2, add_7, [("out_wm", "op1"), ("out_gm", "op2")]),
(add_7, me_7, [("output_image", "op1")]),
(me_7, comp_7, [("output_image", "op1")]),
(comp_7, md_7, [("output_image", "op1")]),
(md_7, fill_7, [("output_image", "op1")]),
(fill_7, add_7_2, [("output_image", "op1")]),
(pad_mask, add_7_2, [("output_image", "op2")]),
(add_7_2, md_7_2, [("output_image", "op1")]),
(md_7_2, me_7_2, [("output_image", "op1")]),
(me_7_2, depad_mask, [("output_image", "op1")]),
(add_gm_wm, depad_segm, [("output_image", "op1")]),
(relabel_wm, depad_wm, [("output_product_image", "op1")]),
(relabel_gm, depad_gm, [("output_product_image", "op1")]),
(sel_labels, depad_csf, [("out_csf", "op1")]),
(depad_csf, merge_tpms, [("output_image", "in1")]),
(depad_gm, merge_tpms, [("output_image", "in2")]),
(depad_wm, merge_tpms, [("output_image", "in3")]),
(depad_mask, msk_conform, [("output_image", "in_mask")]),
(msk_conform, copy_xform, [("out", "out_mask")]),
(depad_segm, copy_xform, [("output_image", "out_segm")]),
(merge_tpms, copy_xform, [("out", "out_tpms")]),
(atropos, sel_wm, [("posteriors", "inlist")]),
(sel_wm, copy_xform_wm, [("out", "wm_map")]),
(copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]),
(inu_n4_final, copy_xform, [("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
(copy_xform, apply_mask, [("bias_corrected", "in_file"),
("out_mask", "in_mask")]),
(apply_mask, outputnode, [("out_file", "out_file")]),
(copy_xform, outputnode, [
("bias_corrected", "bias_corrected"),
("bias_image", "bias_image"),
("out_mask", "out_mask"),
("out_segm", "out_segm"),
("out_tpms", "out_tpms"),
]),
])
# fmt: on
if wm_prior:
from nipype.algorithms.metrics import FuzzyOverlap
def _argmax(in_dice):
import numpy as np
return np.argmax(in_dice)
match_wm = pe.Node(
niu.Function(function=_matchlen),
name="match_wm",
run_without_submitting=True,
)
overlap = pe.Node(FuzzyOverlap(), name="overlap", run_without_submitting=True)
apply_wm_prior = pe.Node(niu.Function(function=_improd), name="apply_wm_prior")
# fmt: off
wf.disconnect([
(copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]),
])
wf.connect([
(inputnode, apply_wm_prior, [("in_mask", "in_mask"),
("wm_prior", "op2")]),
(inputnode, match_wm, [("wm_prior", "value")]),
(atropos, match_wm, [("posteriors", "reference")]),
(atropos, overlap, [("posteriors", "in_ref")]),
(match_wm, overlap, [("out", "in_tst")]),
(overlap, sel_wm, [(("class_fdi", _argmax), "index")]),
(copy_xform_wm, apply_wm_prior, [("wm_map", "op1")]),
(apply_wm_prior, inu_n4_final, [("out", "weight_image")]),
])
# fmt: on
return wf
def init_n4_only_wf(
atropos_model=None,
atropos_refine=True,
atropos_use_random_seed=True,
bids_suffix="T1w",
mem_gb=3.0,
name="n4_only_wf",
omp_nthreads=None,
):
"""
Build a workflow to sidetrack brain extraction on skull-stripped datasets.
An alternative workflow to "init_brain_extraction_wf", for anatomical
images which have already been brain extracted.
1. Creates brain mask assuming all zero voxels are outside the brain
2. Applies N4 bias field correction
3. (Optional) apply ATROPOS and massage its outputs
4. Use results from 3 to refine N4 bias field correction
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from niworkflows.anat.ants import init_n4_only_wf
wf = init_n4_only_wf()
Parameters
----------
omp_nthreads : int
Maximum number of threads an individual process may use
mem_gb : float
Estimated peak memory consumption of the most hungry nodes
bids_suffix : str
Sequence type of the first input image. For a list of acceptable values see
https://bids-specification.readthedocs.io/en/latest/04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
atropos_refine : bool
Enables or disables the whole ATROPOS sub-workflow
atropos_use_random_seed : bool
Whether ATROPOS should generate a random seed based on the
system's clock
atropos_model : tuple or None
Allows to specify a particular segmentation model, overwriting
the defaults based on ``bids_suffix``
name : str, optional
Workflow name (default: ``'n4_only_wf'``).
Inputs
------
in_files
List of input anatomical images to be bias corrected,
typically T1-weighted.
If a list of anatomical images is provided, subsequently
specified images are used during the segmentation process.
However, only the first image is used in the registration
of priors.
Our suggestion would be to specify the T1w as the first image.
Outputs
-------
out_file
:abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
out_mask
Calculated brain mask
bias_corrected
Same as "out_file", provided for consistency with brain extraction
bias_image
The :abbr:`INU (intensity non-uniformity)` field estimated for each
input in ``in_files``
out_segm
Output segmentation by ATROPOS
out_tpms
Output :abbr:`TPMs (tissue probability maps)` by ATROPOS
"""
from ..interfaces.nibabel import Binarize
wf = pe.Workflow(name)
inputnode = pe.Node(
niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode"
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"out_file",
"out_mask",
"bias_corrected",
"bias_image",
"out_segm",
"out_tpms",
]
),
name="outputnode",
)
# Create brain mask
thr_brainmask = pe.Node(Binarize(thresh_low=2), name="binarize")
# INU correction
inu_n4_final = pe.MapNode(
N4BiasFieldCorrection(
dimension=3,
save_bias=True,
copy_header=True,
n_iterations=[50] * 5,
convergence_threshold=1e-7,
shrink_factor=4,
bspline_fitting_distance=200,
),
n_procs=omp_nthreads,
name="inu_n4_final",
iterfield=["input_image"],
)
# Check ANTs version
try:
inu_n4_final.inputs.rescale_intensities = True
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
UserWarning,
)
# fmt: off
wf.connect([
(inputnode, inu_n4_final, [("in_files", "input_image")]),
(inputnode, thr_brainmask, [(("in_files", _pop), "in_file")]),
(thr_brainmask, outputnode, [("out_mask", "out_mask")]),
(inu_n4_final, outputnode, [("output_image", "out_file"),
("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
])
# fmt: on
# If atropos refine, do in4 twice
if atropos_refine:
atropos_model = atropos_model or list(ATROPOS_MODELS[bids_suffix].values())
atropos_wf = init_atropos_wf(
use_random_seed=atropos_use_random_seed,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
in_segmentation_model=atropos_model,
)
# fmt: off
wf.disconnect([
(inu_n4_final, outputnode, [("output_image", "out_file"),
("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
])
wf.connect([
(inputnode, atropos_wf, [("in_files", "inputnode.in_files")]),
(inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]),
(thr_brainmask, atropos_wf, [("out_mask", "inputnode.in_mask")]),
(atropos_wf, outputnode, [
("outputnode.out_file", "out_file"),
("outputnode.bias_corrected", "bias_corrected"),
("outputnode.bias_image", "bias_image"),
("outputnode.out_segm", "out_segm"),
("outputnode.out_tpms", "out_tpms"),
]),
])
# fmt: on
return wf
def _select_labels(in_segm, labels):
from os import getcwd
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
out_files = []
cwd = getcwd()
nii = nb.load(in_segm)
label_data = np.asanyarray(nii.dataobj).astype("uint8")
for label in labels:
newnii = nii.__class__(np.uint8(label_data == label), nii.affine, nii.header)
newnii.set_data_dtype("uint8")
out_file = fname_presuffix(in_segm, suffix="_class-%02d" % label, newpath=cwd)
newnii.to_filename(out_file)
out_files.append(out_file)
return out_files
def _conform_mask(in_mask, in_reference):
"""Ensures the mask headers make sense and match those of the T1w"""
from pathlib import Path
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
ref = nb.load(in_reference)
nii = nb.load(in_mask)
hdr = nii.header.copy()
hdr.set_data_dtype("int16")
hdr.set_slope_inter(1, 0)
qform, qcode = ref.header.get_qform(coded=True)
if qcode is not None:
hdr.set_qform(qform, int(qcode))
sform, scode = ref.header.get_sform(coded=True)
if scode is not None:
hdr.set_sform(sform, int(scode))
if "_maths" in in_mask: # Cut the name at first _maths occurrence
ext = "".join(Path(in_mask).suffixes)
basename = Path(in_mask).name
in_mask = basename.split("_maths")[0] + ext
out_file = fname_presuffix(in_mask, suffix="_mask", newpath=str(Path()))
nii.__class__(
np.asanyarray(nii.dataobj).astype("int16"), ref.affine, hdr
).to_filename(out_file)
return out_file
def _matchlen(value, reference):
return [value] * len(reference)
def _imsum(op1, op2, out_file=None):
import nibabel as nb
im1 = nb.load(op1)
data = im1.get_fdata(dtype="float32") + nb.load(op2).get_fdata(dtype="float32")
data /= data.max()
nii = nb.Nifti1Image(data, im1.affine, im1.header)
if out_file is None:
from pathlib import Path
out_file = str((Path() / "summap.nii.gz").absolute())
nii.to_filename(out_file)
return out_file
def _improd(op1, op2, in_mask, out_file=None):
import nibabel as nb
im1 = nb.load(op1)
data = im1.get_fdata(dtype="float32") * nb.load(op2).get_fdata(dtype="float32")
mskdata = nb.load(in_mask).get_fdata() > 0
data[~mskdata] = 0
data[data < 0] = 0
data /= data.max()
data = 0.5 * (data + mskdata)
nii = nb.Nifti1Image(data, im1.affine, im1.header)
if out_file is None:
from pathlib import Path
out_file = str((Path() / "prodmap.nii.gz").absolute())
nii.to_filename(out_file)
return out_file
| [
"numpy.uint8",
"nipype.algorithms.metrics.FuzzyOverlap",
"nibabel.load",
"multiprocessing.cpu_count",
"numpy.asanyarray",
"nipype.interfaces.utility.IdentityInterface",
"nipype.pipeline.engine.Workflow",
"nipype.interfaces.utility.Select",
"pathlib.Path",
"nipype.interfaces.ants.N4BiasFieldCorrect... | [((939, 1003), 'collections.OrderedDict', 'OrderedDict', (["[('nclasses', 3), ('csf', 1), ('gm', 2), ('wm', 3)]"], {}), "([('nclasses', 3), ('csf', 1), ('gm', 2), ('wm', 3)])\n", (950, 1003), False, 'from collections import OrderedDict\n'), ((1016, 1080), 'collections.OrderedDict', 'OrderedDict', (["[('nclasses', 3), ('csf', 3), ('gm', 2), ('wm', 1)]"], {}), "([('nclasses', 3), ('csf', 3), ('gm', 2), ('wm', 1)])\n", (1027, 1080), False, 'from collections import OrderedDict\n'), ((1095, 1159), 'collections.OrderedDict', 'OrderedDict', (["[('nclasses', 3), ('csf', 1), ('gm', 3), ('wm', 2)]"], {}), "([('nclasses', 3), ('csf', 1), ('gm', 3), ('wm', 2)])\n", (1106, 1159), False, 'from collections import OrderedDict\n'), ((6643, 6660), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', (['name'], {}), '(name)\n', (6654, 6660), True, 'from nipype.pipeline import engine as pe\n'), ((7438, 7531), 'templateflow.api.get', 'get_template', (['in_template'], {'desc': '"""BrainCerebellumExtraction"""', 'suffix': '"""mask"""'}), "(in_template, desc='BrainCerebellumExtraction', suffix='mask',\n **common_spec)\n", (7450, 7531), True, 'from templateflow.api import get as get_template\n'), ((20699, 20716), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', (['name'], {}), '(name)\n', (20710, 20716), True, 'from nipype.pipeline import engine as pe\n'), ((35494, 35511), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', (['name'], {}), '(name)\n', (35505, 35511), True, 'from nipype.pipeline import engine as pe\n'), ((38730, 38738), 'os.getcwd', 'getcwd', ([], {}), '()\n', (38736, 38738), False, 'from os import getcwd\n'), ((38749, 38765), 'nibabel.load', 'nb.load', (['in_segm'], {}), '(in_segm)\n', (38756, 38765), True, 'import nibabel as nb\n'), ((39416, 39437), 'nibabel.load', 'nb.load', (['in_reference'], {}), '(in_reference)\n', (39423, 39437), True, 'import nibabel as nb\n'), ((39448, 39464), 'nibabel.load', 'nb.load', (['in_mask'], {}), '(in_mask)\n', (39455, 39464), True, 'import nibabel as nb\n'), ((40362, 40374), 'nibabel.load', 'nb.load', (['op1'], {}), '(op1)\n', (40369, 40374), True, 'import nibabel as nb\n'), ((40493, 40537), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im1.affine', 'im1.header'], {}), '(data, im1.affine, im1.header)\n', (40507, 40537), True, 'import nibabel as nb\n'), ((40796, 40808), 'nibabel.load', 'nb.load', (['op1'], {}), '(op1)\n', (40803, 40808), True, 'import nibabel as nb\n'), ((41054, 41098), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data', 'im1.affine', 'im1.header'], {}), '(data, im1.affine, im1.header)\n', (41068, 41098), True, 'import nibabel as nb\n'), ((7000, 7073), 'templateflow.api.get', 'get_template', (['in_template'], {'label': '"""brain"""', 'suffix': '"""probseg"""'}), "(in_template, label='brain', suffix='probseg', **common_spec)\n", (7012, 7073), True, 'from templateflow.api import get as get_template\n'), ((7091, 7160), 'templateflow.api.get', 'get_template', (['in_template'], {'desc': '"""brain"""', 'suffix': '"""mask"""'}), "(in_template, desc='brain', suffix='mask', **common_spec)\n", (7103, 7160), True, 'from templateflow.api import get as get_template\n'), ((7234, 7245), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (7243, 7245), False, 'from multiprocessing import cpu_count\n'), ((7280, 7333), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_files', 'in_mask']"}), "(fields=['in_files', 'in_mask'])\n", (7301, 7333), True, 'from nipype.interfaces import utility as niu\n'), ((7659, 7773), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_mask', 'bias_corrected', 'bias_image', 'out_segm', 'out_tpms'\n ]"}), "(fields=['out_file', 'out_mask', 'bias_corrected',\n 'bias_image', 'out_segm', 'out_tpms'])\n", (7680, 7773), True, 'from nipype.interfaces import utility as niu\n'), ((7970, 8059), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""TruncateImageIntensity"""', 'op2': '"""0.01 0.999 256"""', 'copy_header': '(True)'}), "(operation='TruncateImageIntensity', op2='0.01 0.999 256',\n copy_header=True)\n", (7979, 8059), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((8177, 8374), 'nipype.interfaces.ants.N4BiasFieldCorrection', 'N4BiasFieldCorrection', ([], {'dimension': '(3)', 'save_bias': '(False)', 'copy_header': '(True)', 'n_iterations': '([50] * 4)', 'convergence_threshold': '(1e-07)', 'shrink_factor': '(4)', 'bspline_fitting_distance': 'bspline_fitting_distance'}), '(dimension=3, save_bias=False, copy_header=True,\n n_iterations=[50] * 4, convergence_threshold=1e-07, shrink_factor=4,\n bspline_fitting_distance=bspline_fitting_distance)\n', (8198, 8374), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((8812, 8875), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""Laplacian"""', 'op2': '"""1.5 1"""', 'copy_header': '(True)'}), "(operation='Laplacian', op2='1.5 1', copy_header=True)\n", (8821, 8875), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((8975, 9038), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""Laplacian"""', 'op2': '"""1.5 1"""', 'copy_header': '(True)'}), "(operation='Laplacian', op2='1.5 1', copy_header=True)\n", (8984, 9038), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((9096, 9108), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (9105, 9108), True, 'from nipype.interfaces import utility as niu\n'), ((9194, 9206), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (9203, 9206), True, 'from nipype.interfaces import utility as niu\n'), ((9300, 9468), 'nipype.interfaces.ants.AI', 'AI', ([], {'metric': "('Mattes', 32, 'Regular', 0.25)", 'transform': "('Affine', 0.1)", 'search_factor': '(15, 0.1)', 'principal_axes': '(False)', 'convergence': '(10, 1e-06, 10)', 'verbose': '(True)'}), "(metric=('Mattes', 32, 'Regular', 0.25), transform=('Affine', 0.1),\n search_factor=(15, 0.1), principal_axes=False, convergence=(10, 1e-06, \n 10), verbose=True)\n", (9302, 9468), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((10721, 10828), 'nipype.interfaces.ants.ThresholdImage', 'ThresholdImage', ([], {'dimension': '(3)', 'th_low': '(0.5)', 'th_high': '(1.0)', 'inside_value': '(1)', 'outside_value': '(0)', 'copy_header': '(True)'}), '(dimension=3, th_low=0.5, th_high=1.0, inside_value=1,\n outside_value=0, copy_header=True)\n', (10735, 10828), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((11013, 11209), 'nipype.interfaces.ants.N4BiasFieldCorrection', 'N4BiasFieldCorrection', ([], {'dimension': '(3)', 'save_bias': '(True)', 'copy_header': '(True)', 'n_iterations': '([50] * 5)', 'convergence_threshold': '(1e-07)', 'shrink_factor': '(4)', 'bspline_fitting_distance': 'bspline_fitting_distance'}), '(dimension=3, save_bias=True, copy_header=True,\n n_iterations=[50] * 5, convergence_threshold=1e-07, shrink_factor=4,\n bspline_fitting_distance=bspline_fitting_distance)\n', (11034, 11209), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((13257, 13327), 'templateflow.api.get', 'get_template', (['in_template'], {'label': '"""WM"""', 'suffix': '"""probseg"""'}), "(in_template, label='WM', suffix='probseg', **common_spec)\n", (13269, 13327), True, 'from templateflow.api import get as get_template\n'), ((20838, 20923), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_files', 'in_corrected', 'in_mask', 'wm_prior']"}), "(fields=['in_files', 'in_corrected', 'in_mask',\n 'wm_prior'])\n", (20859, 20923), True, 'from nipype.interfaces import utility as niu\n'), ((21009, 21064), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "(['out_file'] + out_fields)"}), "(fields=['out_file'] + out_fields)\n", (21030, 21064), True, 'from nipype.interfaces import utility as niu\n'), ((21285, 21337), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""MD"""', 'op2': '"""2"""', 'copy_header': '(True)'}), "(operation='MD', op2='2', copy_header=True)\n", (21294, 21337), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((21441, 21501), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""GetLargestComponent"""', 'copy_header': '(True)'}), "(operation='GetLargestComponent', copy_header=True)\n", (21450, 21501), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((21601, 21885), 'nipype.interfaces.ants.Atropos', 'Atropos', ([], {'convergence_threshold': '(0.0)', 'dimension': '(3)', 'initialization': '"""KMeans"""', 'likelihood_model': '"""Gaussian"""', 'mrf_radius': '[1, 1, 1]', 'mrf_smoothing_factor': '(0.1)', 'n_iterations': '(3)', 'number_of_tissue_classes': 'in_segmentation_model[0]', 'save_posteriors': '(True)', 'use_random_seed': 'use_random_seed'}), "(convergence_threshold=0.0, dimension=3, initialization='KMeans',\n likelihood_model='Gaussian', mrf_radius=[1, 1, 1], mrf_smoothing_factor\n =0.1, n_iterations=3, number_of_tissue_classes=in_segmentation_model[0],\n save_posteriors=True, use_random_seed=use_random_seed)\n", (21608, 21885), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((22146, 22214), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': 'f"""{padding}"""', 'copy_header': '(False)'}), "(operation='PadImage', op2=f'{padding}', copy_header=False)\n", (22155, 22214), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((22282, 22350), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': 'f"""{padding}"""', 'copy_header': '(False)'}), "(operation='PadImage', op2=f'{padding}', copy_header=False)\n", (22291, 22350), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((22462, 22549), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_select_labels', 'output_names': "['out_wm', 'out_gm', 'out_csf']"}), "(function=_select_labels, output_names=['out_wm', 'out_gm',\n 'out_csf'])\n", (22474, 22549), True, 'from nipype.interfaces import utility as niu\n'), ((22824, 22866), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""GetLargestComponent"""'}), "(operation='GetLargestComponent')\n", (22833, 22866), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((22907, 22949), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""GetLargestComponent"""'}), "(operation='GetLargestComponent')\n", (22916, 22949), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((23198, 23239), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""FillHoles"""', 'op2': '"""2"""'}), "(operation='FillHoles', op2='2')\n", (23207, 23239), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((23291, 23360), 'nipype.interfaces.ants.MultiplyImages', 'MultiplyImages', ([], {'dimension': '(3)', 'output_product_image': '"""08_mult_gm.nii.gz"""'}), "(dimension=3, output_product_image='08_mult_gm.nii.gz')\n", (23305, 23360), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((23595, 23711), 'nipype.interfaces.ants.MultiplyImages', 'MultiplyImages', ([], {'dimension': '(3)', 'second_input': 'in_segmentation_model[-1]', 'output_product_image': '"""09_relabel_wm.nii.gz"""'}), "(dimension=3, second_input=in_segmentation_model[-1],\n output_product_image='09_relabel_wm.nii.gz')\n", (23609, 23711), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((23813, 23848), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""ME"""', 'op2': '"""10"""'}), "(operation='ME', op2='10')\n", (23822, 23848), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((24175, 24207), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""addtozero"""'}), "(operation='addtozero')\n", (24184, 24207), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((24261, 24377), 'nipype.interfaces.ants.MultiplyImages', 'MultiplyImages', ([], {'dimension': '(3)', 'second_input': 'in_segmentation_model[-2]', 'output_product_image': '"""12_relabel_gm.nii.gz"""'}), "(dimension=3, second_input=in_segmentation_model[-2],\n output_product_image='12_relabel_gm.nii.gz')\n", (24275, 24377), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((24482, 24514), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""addtozero"""'}), "(operation='addtozero')\n", (24491, 24514), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((24632, 24704), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_select_labels', 'output_names': "['out_gm', 'out_wm']"}), "(function=_select_labels, output_names=['out_gm', 'out_wm'])\n", (24644, 24704), True, 'from nipype.interfaces import utility as niu\n'), ((24917, 24949), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""addtozero"""'}), "(operation='addtozero')\n", (24926, 24949), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((25059, 25093), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""ME"""', 'op2': '"""2"""'}), "(operation='ME', op2='2')\n", (25068, 25093), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((25219, 25261), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""GetLargestComponent"""'}), "(operation='GetLargestComponent')\n", (25228, 25261), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((25372, 25406), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""MD"""', 'op2': '"""4"""'}), "(operation='MD', op2='4')\n", (25381, 25406), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((25524, 25565), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""FillHoles"""', 'op2': '"""2"""'}), "(operation='FillHoles', op2='2')\n", (25533, 25565), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((25724, 25756), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""addtozero"""'}), "(operation='addtozero')\n", (25733, 25756), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((25870, 25904), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""MD"""', 'op2': '"""5"""'}), "(operation='MD', op2='5')\n", (25879, 25904), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26017, 26051), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""ME"""', 'op2': '"""5"""'}), "(operation='ME', op2='5')\n", (26026, 26051), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26119, 26171), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': "('-%d' % padding)"}), "(operation='PadImage', op2='-%d' % padding)\n", (26128, 26171), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26234, 26286), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': "('-%d' % padding)"}), "(operation='PadImage', op2='-%d' % padding)\n", (26243, 26286), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26347, 26399), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': "('-%d' % padding)"}), "(operation='PadImage', op2='-%d' % padding)\n", (26356, 26399), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26458, 26510), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': "('-%d' % padding)"}), "(operation='PadImage', op2='-%d' % padding)\n", (26467, 26510), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26570, 26622), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""PadImage"""', 'op2': "('-%d' % padding)"}), "(operation='PadImage', op2='-%d' % padding)\n", (26579, 26622), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((26677, 26713), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_conform_mask'}), '(function=_conform_mask)\n', (26689, 26713), True, 'from nipype.interfaces import utility as niu\n'), ((26760, 26795), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['in_segmentation_model[0]'], {}), '(in_segmentation_model[0])\n', (26769, 26795), True, 'from nipype.interfaces import utility as niu\n'), ((26838, 26850), 'nipype.interfaces.utility.Select', 'niu.Select', ([], {}), '()\n', (26848, 26850), True, 'from nipype.interfaces import utility as niu\n'), ((27169, 27365), 'nipype.interfaces.ants.N4BiasFieldCorrection', 'N4BiasFieldCorrection', ([], {'dimension': '(3)', 'save_bias': '(True)', 'copy_header': '(True)', 'n_iterations': '([50] * 5)', 'convergence_threshold': '(1e-07)', 'shrink_factor': '(4)', 'bspline_fitting_distance': 'bspline_fitting_distance'}), '(dimension=3, save_bias=True, copy_header=True,\n n_iterations=[50] * 5, convergence_threshold=1e-07, shrink_factor=4,\n bspline_fitting_distance=bspline_fitting_distance)\n', (27190, 27365), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((35546, 35599), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_files', 'in_mask']"}), "(fields=['in_files', 'in_mask'])\n", (35567, 35599), True, 'from nipype.interfaces import utility as niu\n'), ((35659, 35773), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_mask', 'bias_corrected', 'bias_image', 'out_segm', 'out_tpms'\n ]"}), "(fields=['out_file', 'out_mask', 'bias_corrected',\n 'bias_image', 'out_segm', 'out_tpms'])\n", (35680, 35773), True, 'from nipype.interfaces import utility as niu\n'), ((36092, 36267), 'nipype.interfaces.ants.N4BiasFieldCorrection', 'N4BiasFieldCorrection', ([], {'dimension': '(3)', 'save_bias': '(True)', 'copy_header': '(True)', 'n_iterations': '([50] * 5)', 'convergence_threshold': '(1e-07)', 'shrink_factor': '(4)', 'bspline_fitting_distance': '(200)'}), '(dimension=3, save_bias=True, copy_header=True,\n n_iterations=[50] * 5, convergence_threshold=1e-07, shrink_factor=4,\n bspline_fitting_distance=200)\n', (36113, 36267), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((38995, 39062), 'nipype.utils.filemanip.fname_presuffix', 'fname_presuffix', (['in_segm'], {'suffix': "('_class-%02d' % label)", 'newpath': 'cwd'}), "(in_segm, suffix='_class-%02d' % label, newpath=cwd)\n", (39010, 39062), False, 'from nipype.utils.filemanip import fname_presuffix\n'), ((9750, 9860), 'warnings.warn', 'warn', (['f"""antsAI\'s option --search-grid was added in ANTS 2.3.0 ({init_aff.interface.version} found.)"""'], {}), '(\n f"antsAI\'s option --search-grid was added in ANTS 2.3.0 ({init_aff.interface.version} found.)"\n )\n', (9754, 9860), False, 'from warnings import warn\n'), ((10402, 10438), 'packaging.version.parse', 'parseversion', (['norm.interface.version'], {}), '(norm.interface.version)\n', (10414, 10438), True, 'from packaging.version import parse as parseversion, Version\n'), ((10442, 10458), 'packaging.version.Version', 'Version', (['"""2.2.0"""'], {}), "('2.2.0')\n", (10449, 10458), False, 'from packaging.version import parse as parseversion, Version\n'), ((11492, 11669), 'warnings.warn', 'warn', (['f"""N4BiasFieldCorrection\'s --rescale-intensities option was added in ANTS 2.1.0 ({inu_n4_final.interface.version} found.) Please consider upgrading."""', 'UserWarning'], {}), '(\n f"N4BiasFieldCorrection\'s --rescale-intensities option was added in ANTS 2.1.0 ({inu_n4_final.interface.version} found.) Please consider upgrading."\n , UserWarning)\n', (11496, 11669), False, 'from warnings import warn\n'), ((13561, 13631), 'templateflow.api.get', 'get_template', (['in_template'], {'label': '"""BS"""', 'suffix': '"""probseg"""'}), "(in_template, label='BS', suffix='probseg', **common_spec)\n", (13573, 13631), True, 'from templateflow.api import get as get_template\n'), ((14646, 14709), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""Laplacian"""', 'op2': '"""1.5 1"""', 'copy_header': '(True)'}), "(operation='Laplacian', op2='1.5 1', copy_header=True)\n", (14655, 14709), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((14838, 14901), 'nipype.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""Laplacian"""', 'op2': '"""1.5 1"""', 'copy_header': '(True)'}), "(operation='Laplacian', op2='1.5 1', copy_header=True)\n", (14847, 14901), False, 'from nipype.interfaces.ants import AI, Atropos, ImageMath, MultiplyImages, N4BiasFieldCorrection, ThresholdImage\n'), ((14971, 14983), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (14980, 14983), True, 'from nipype.interfaces import utility as niu\n'), ((15077, 15089), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (15086, 15089), True, 'from nipype.interfaces import utility as niu\n'), ((27649, 27826), 'warnings.warn', 'warn', (['f"""N4BiasFieldCorrection\'s --rescale-intensities option was added in ANTS 2.1.0 ({inu_n4_final.interface.version} found.) Please consider upgrading."""', 'UserWarning'], {}), '(\n f"N4BiasFieldCorrection\'s --rescale-intensities option was added in ANTS 2.1.0 ({inu_n4_final.interface.version} found.) Please consider upgrading."\n , UserWarning)\n', (27653, 27826), False, 'from warnings import warn\n'), ((31661, 31679), 'numpy.argmax', 'np.argmax', (['in_dice'], {}), '(in_dice)\n', (31670, 31679), True, 'import numpy as np\n'), ((31721, 31753), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_matchlen'}), '(function=_matchlen)\n', (31733, 31753), True, 'from nipype.interfaces import utility as niu\n'), ((31861, 31875), 'nipype.algorithms.metrics.FuzzyOverlap', 'FuzzyOverlap', ([], {}), '()\n', (31873, 31875), False, 'from nipype.algorithms.metrics import FuzzyOverlap\n'), ((31956, 31986), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_improd'}), '(function=_improd)\n', (31968, 31986), True, 'from nipype.interfaces import utility as niu\n'), ((36576, 36753), 'warnings.warn', 'warn', (['f"""N4BiasFieldCorrection\'s --rescale-intensities option was added in ANTS 2.1.0 ({inu_n4_final.interface.version} found.) Please consider upgrading."""', 'UserWarning'], {}), '(\n f"N4BiasFieldCorrection\'s --rescale-intensities option was added in ANTS 2.1.0 ({inu_n4_final.interface.version} found.) Please consider upgrading."\n , UserWarning)\n', (36580, 36753), False, 'from warnings import warn\n'), ((38783, 38809), 'numpy.asanyarray', 'np.asanyarray', (['nii.dataobj'], {}), '(nii.dataobj)\n', (38796, 38809), True, 'import numpy as np\n'), ((38882, 38911), 'numpy.uint8', 'np.uint8', (['(label_data == label)'], {}), '(label_data == label)\n', (38890, 38911), True, 'import numpy as np\n'), ((39932, 39945), 'pathlib.Path', 'Path', (['in_mask'], {}), '(in_mask)\n', (39936, 39945), False, 'from pathlib import Path\n'), ((10133, 10199), 'pkg_resources.resource_filename', 'pkgr_fn', (['"""niworkflows.data"""', '(settings_file % normalization_quality)'], {}), "('niworkflows.data', settings_file % normalization_quality)\n", (10140, 10199), True, 'from pkg_resources import resource_filename as pkgr_fn\n'), ((13714, 13743), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_imsum'}), '(function=_imsum)\n', (13726, 13743), True, 'from nipype.interfaces import utility as niu\n'), ((39889, 39902), 'pathlib.Path', 'Path', (['in_mask'], {}), '(in_mask)\n', (39893, 39902), False, 'from pathlib import Path\n'), ((40072, 40078), 'pathlib.Path', 'Path', ([], {}), '()\n', (40076, 40078), False, 'from pathlib import Path\n'), ((40420, 40432), 'nibabel.load', 'nb.load', (['op2'], {}), '(op2)\n', (40427, 40432), True, 'import nibabel as nb\n'), ((40854, 40866), 'nibabel.load', 'nb.load', (['op2'], {}), '(op2)\n', (40861, 40866), True, 'import nibabel as nb\n'), ((40908, 40924), 'nibabel.load', 'nb.load', (['in_mask'], {}), '(in_mask)\n', (40915, 40924), True, 'import nibabel as nb\n'), ((40108, 40134), 'numpy.asanyarray', 'np.asanyarray', (['nii.dataobj'], {}), '(nii.dataobj)\n', (40121, 40134), True, 'import numpy as np\n'), ((40622, 40628), 'pathlib.Path', 'Path', ([], {}), '()\n', (40626, 40628), False, 'from pathlib import Path\n'), ((41183, 41189), 'pathlib.Path', 'Path', ([], {}), '()\n', (41187, 41189), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
# https://github.com/openai/gym/wiki/CartPole-v0
# Must read - https://www.nervanasys.com/demystifying-deep-reinforcement-learning/
import tensorflow as tf
import gym
import numpy as np
# 하이퍼파라미터
max_episodes = 10000
# 네트워크 클래스 구성
class DQN:
def __init__(self, session, input_size, output_size, name="main"):
# 네트워크 정보 입력
self.session = session
self.input_size = input_size
self.output_size = output_size
self.net_name = name
self.hidden_size = 20
# 네트워크 생성
self.build_network()
def build_network(self):
with tf.variable_scope(self.net_name):
# Vanilla Neural Network (Just one hidden layer)
self.X = tf.placeholder(shape=[None, self.input_size], dtype=tf.float32)
self.Y = tf.placeholder(shape=[None], dtype=tf.float32)
W1 = tf.Variable(tf.truncated_normal(shape=[self.input_size, self.hidden_size], mean=0.0, stddev=1.0))
B1 = tf.Variable(tf.zeros(shape=[self.hidden_size]))
W2 = tf.Variable(tf.truncated_normal(shape=[self.hidden_size, self.output_size], mean=0.0, stddev=1.0))
B2 = tf.Variable(tf.zeros(shape=[self.output_size]))
L1 = tf.nn.relu(tf.matmul(self.X, W1) + B1)
self.Qpred = tf.matmul(L1, W2) + B2
# 예측한 Q값 구하기
def predict(self, state):
x = np.reshape(state, newshape=[1, self.input_size])
return self.session.run(self.Qpred, feed_dict={self.X: x})
def bot_play(DQN, env):
"""
See our trained network in action
"""
state = env.reset()
reward_sum = 0
done = False
while not done:
env.render()
action = np.argmax(DQN.predict(state))
new_state, reward, done, info = env.step(action)
reward_sum += reward
state = new_state
def restoreModel(session, path='./cartpole.ckpt'):
tf.train.Saver().restore(sess=session, save_path=path)
print("Model restored successfully.")
if __name__ == "__main__":
env = gym.make('CartPole-v0')
input_size = env.observation_space.shape[0] # 4
output_size = env.action_space.n # 2
# 미니배치 - 꺼내서 사용할 리플레이 갯수
BATCH_SIZE = 32
with tf.Session() as sess:
# DQN 클래스의 mainDQN 인스턴스 생성
mainDQN = DQN(sess, input_size, output_size, name='main')
restoreModel(sess, "./cartpole.ckpt")
for episode in range(max_episodes):
bot_play(mainDQN, env)
env.reset()
env.close() | [
"numpy.reshape",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.matmul",
"tensorflow.zeros",
"gym.make",
"tensorflow.truncated_normal"
] | [((2041, 2064), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (2049, 2064), False, 'import gym\n'), ((1392, 1440), 'numpy.reshape', 'np.reshape', (['state'], {'newshape': '[1, self.input_size]'}), '(state, newshape=[1, self.input_size])\n', (1402, 1440), True, 'import numpy as np\n'), ((2237, 2249), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2247, 2249), True, 'import tensorflow as tf\n'), ((616, 648), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.net_name'], {}), '(self.net_name)\n', (633, 648), True, 'import tensorflow as tf\n'), ((732, 795), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.input_size]', 'dtype': 'tf.float32'}), '(shape=[None, self.input_size], dtype=tf.float32)\n', (746, 795), True, 'import tensorflow as tf\n'), ((817, 863), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None]', 'dtype': 'tf.float32'}), '(shape=[None], dtype=tf.float32)\n', (831, 863), True, 'import tensorflow as tf\n'), ((1905, 1921), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1919, 1921), True, 'import tensorflow as tf\n'), ((894, 982), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[self.input_size, self.hidden_size]', 'mean': '(0.0)', 'stddev': '(1.0)'}), '(shape=[self.input_size, self.hidden_size], mean=0.0,\n stddev=1.0)\n', (913, 982), True, 'import tensorflow as tf\n'), ((1009, 1043), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.hidden_size]'}), '(shape=[self.hidden_size])\n', (1017, 1043), True, 'import tensorflow as tf\n'), ((1074, 1163), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': '[self.hidden_size, self.output_size]', 'mean': '(0.0)', 'stddev': '(1.0)'}), '(shape=[self.hidden_size, self.output_size], mean=0.0,\n stddev=1.0)\n', (1093, 1163), True, 'import tensorflow as tf\n'), ((1190, 1224), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.output_size]'}), '(shape=[self.output_size])\n', (1198, 1224), True, 'import tensorflow as tf\n'), ((1309, 1326), 'tensorflow.matmul', 'tf.matmul', (['L1', 'W2'], {}), '(L1, W2)\n', (1318, 1326), True, 'import tensorflow as tf\n'), ((1255, 1276), 'tensorflow.matmul', 'tf.matmul', (['self.X', 'W1'], {}), '(self.X, W1)\n', (1264, 1276), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
import sys
sys.path.append('.')
import os
import shutil
import tools_for_tests
import numpy as np
import pytest
import eval_pp
import analysis_driver
# directory of test input files
main_test_inputs_dir = os.path.join(os.getcwd(), 'tests', 'test_inputs_integration')
def test_eval_pp_main_no_converge():
"""
raises NoCutoffConvergence if there is no gcut convergence
"""
test_inputs_dir = os.path.join(main_test_inputs_dir, 'eval_pp_main_test')
with pytest.raises(eval_pp.NoCutoffConvergence):
with tools_for_tests.TemporaryDirectory() as tmp_dir:
# set up a mock work directory:
shutil.copy(os.path.join('..', 'calc_nflops'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'configurations.in.example'), 'configurations.in')
shutil.copy(os.path.join(test_inputs_dir, 'allelectron_forces.dat.example'), 'allelectron_forces.dat')
os.mkdir('workdir.example')
os.chdir('workdir.example')
shutil.copy(os.path.join(test_inputs_dir, 'argvf.template'), 'argvf.template')
shutil.copy(os.path.join(test_inputs_dir, 'crystal.template'), 'crystal.template')
shutil.copy(os.path.join(test_inputs_dir, 'PAW.Si'), 'PAW.Si')
shutil.copy(os.path.join(test_inputs_dir, 'PAW.Ge'), 'PAW.Ge')
# run eval_pp
gcuts = [20., 30., 40.]
energy_tol = 1.e-100 # set impossible tolerance so it doesn't converge
objectives = eval_pp.main(['Si', 'Ge'], gcuts, energy_tol)
def test_eval_pp_main():
"""
This should converge at gcut=40 and then return objectives:
accu = 0.12408939054384546
work = 0.009064640532217023
the "correct" accuracy objectives could depend on the socorro build,
and the work objective may depend on some other things such as parallelization.
"""
test_inputs_dir = os.path.join(main_test_inputs_dir, 'eval_pp_main_test')
with tools_for_tests.TemporaryDirectory() as tmp_dir:
# set up a mock work directory:
shutil.copy(os.path.join('..', 'calc_nflops'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'configurations.in.example'), 'configurations.in')
shutil.copy(os.path.join(test_inputs_dir, 'allelectron_forces.dat.example'), 'allelectron_forces.dat')
os.mkdir('workdir.example')
os.chdir('workdir.example')
shutil.copy(os.path.join(test_inputs_dir, 'argvf.template'), 'argvf.template')
shutil.copy(os.path.join(test_inputs_dir, 'crystal.template'), 'crystal.template')
shutil.copy(os.path.join(test_inputs_dir, 'PAW.Si'), 'PAW.Si')
shutil.copy(os.path.join(test_inputs_dir, 'PAW.Ge'), 'PAW.Ge')
# run eval_pp
gcuts = [20., 30., 40., 50.]
energy_tol = 3.e-3
objectives = eval_pp.main(['Si', 'Ge'], gcuts, energy_tol)
assert np.isclose(objectives['accu'], 0.12408939054384546, rtol=0., atol=0.0002)
assert np.isclose(objectives['work'], 0.009064640532217023, rtol=0., atol=0.000001)
def test_analysis_driver_main_Si_noconverge():
"""
For this test, the silicon inputs are bad so atompaw does not converge,
and the analysis driver returns 100s for both objectives
"""
test_inputs_dir = os.path.join(main_test_inputs_dir, 'analysis_driver_main_Si_noconverge')
with tools_for_tests.TemporaryDirectory() as tmp_dir:
# set up a mock work directory:
shutil.copy(os.path.join('..', 'calc_nflops'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'opal.in'), 'opal.in')
shutil.copy(os.path.join(test_inputs_dir, 'configurations.in.example'), 'configurations.in')
shutil.copy(os.path.join(test_inputs_dir, 'allelectron_forces.dat.example'), 'allelectron_forces.dat')
os.mkdir('workdir.example')
os.chdir('workdir.example')
shutil.copy(os.path.join(test_inputs_dir, 'argvf.template'), 'argvf.template')
shutil.copy(os.path.join(test_inputs_dir, 'crystal.template'), 'crystal.template')
shutil.copy(os.path.join(test_inputs_dir, 'Si.in.template'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'Ge.in.template'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'params'), os.getcwd())
# run analysis driver
analysis_driver.main()
with open('results') as fin:
assert fin.readlines()==[' 1.0000000000000000E+02 accu\n', ' 1.0000000000000000E+02 work\n']
def test_analysis_driver_main_success():
"""
"""
test_inputs_dir = os.path.join(main_test_inputs_dir, 'analysis_driver_main_success')
with tools_for_tests.TemporaryDirectory() as tmp_dir:
# set up a mock work directory:
shutil.copy(os.path.join('..', 'calc_nflops'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'opal.in'), 'opal.in')
shutil.copy(os.path.join(test_inputs_dir, 'configurations.in.example'), 'configurations.in')
shutil.copy(os.path.join(test_inputs_dir, 'allelectron_forces.dat.example'), 'allelectron_forces.dat')
os.mkdir('workdir.example')
os.chdir('workdir.example')
shutil.copy(os.path.join(test_inputs_dir, 'argvf.template'), 'argvf.template')
shutil.copy(os.path.join(test_inputs_dir, 'crystal.template'), 'crystal.template')
shutil.copy(os.path.join(test_inputs_dir, 'Si.in.template'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'Ge.in.template'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'params'), os.getcwd())
# run analysis driver
analysis_driver.main()
with open('results') as fin:
assert fin.readlines()==[' 7.6992177462473416E-02 accu\n', ' 8.7573645819723784E-03 work\n']
def test_analysis_driver_main_nogcut_converge():
"""
returns proper obectives of 95 when no gcut convergence
sets impossible energy tolerance in opal.in
"""
test_inputs_dir = os.path.join(main_test_inputs_dir, 'analysis_driver_main_nogcut_converge')
with tools_for_tests.TemporaryDirectory() as tmp_dir:
# set up a mock work directory:
shutil.copy(os.path.join('..', 'calc_nflops'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'opal.in'), 'opal.in')
shutil.copy(os.path.join(test_inputs_dir, 'configurations.in.example'), 'configurations.in')
shutil.copy(os.path.join(test_inputs_dir, 'allelectron_forces.dat.example'), 'allelectron_forces.dat')
os.mkdir('workdir.example')
os.chdir('workdir.example')
shutil.copy(os.path.join(test_inputs_dir, 'argvf.template'), 'argvf.template')
shutil.copy(os.path.join(test_inputs_dir, 'crystal.template'), 'crystal.template')
shutil.copy(os.path.join(test_inputs_dir, 'Si.in.template'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'Ge.in.template'), os.getcwd())
shutil.copy(os.path.join(test_inputs_dir, 'params'), os.getcwd())
# run analysis driver
analysis_driver.main()
with open('results') as fin:
assert fin.readlines()==[' 9.5000000000000000E+01 accu\n', ' 9.5000000000000000E+01 work\n']
| [
"numpy.isclose",
"analysis_driver.main",
"eval_pp.main",
"os.path.join",
"os.getcwd",
"os.chdir",
"tools_for_tests.TemporaryDirectory",
"pytest.raises",
"os.mkdir",
"sys.path.append"
] | [((33, 53), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (48, 53), False, 'import sys\n'), ((241, 252), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (250, 252), False, 'import os\n'), ((431, 486), 'os.path.join', 'os.path.join', (['main_test_inputs_dir', '"""eval_pp_main_test"""'], {}), "(main_test_inputs_dir, 'eval_pp_main_test')\n", (443, 486), False, 'import os\n'), ((1930, 1985), 'os.path.join', 'os.path.join', (['main_test_inputs_dir', '"""eval_pp_main_test"""'], {}), "(main_test_inputs_dir, 'eval_pp_main_test')\n", (1942, 1985), False, 'import os\n'), ((3317, 3389), 'os.path.join', 'os.path.join', (['main_test_inputs_dir', '"""analysis_driver_main_Si_noconverge"""'], {}), "(main_test_inputs_dir, 'analysis_driver_main_Si_noconverge')\n", (3329, 3389), False, 'import os\n'), ((4617, 4683), 'os.path.join', 'os.path.join', (['main_test_inputs_dir', '"""analysis_driver_main_success"""'], {}), "(main_test_inputs_dir, 'analysis_driver_main_success')\n", (4629, 4683), False, 'import os\n'), ((6029, 6103), 'os.path.join', 'os.path.join', (['main_test_inputs_dir', '"""analysis_driver_main_nogcut_converge"""'], {}), "(main_test_inputs_dir, 'analysis_driver_main_nogcut_converge')\n", (6041, 6103), False, 'import os\n'), ((496, 538), 'pytest.raises', 'pytest.raises', (['eval_pp.NoCutoffConvergence'], {}), '(eval_pp.NoCutoffConvergence)\n', (509, 538), False, 'import pytest\n'), ((1995, 2031), 'tools_for_tests.TemporaryDirectory', 'tools_for_tests.TemporaryDirectory', ([], {}), '()\n', (2029, 2031), False, 'import tools_for_tests\n'), ((2372, 2399), 'os.mkdir', 'os.mkdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (2380, 2399), False, 'import os\n'), ((2408, 2435), 'os.chdir', 'os.chdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (2416, 2435), False, 'import os\n'), ((2865, 2910), 'eval_pp.main', 'eval_pp.main', (["['Si', 'Ge']", 'gcuts', 'energy_tol'], {}), "(['Si', 'Ge'], gcuts, energy_tol)\n", (2877, 2910), False, 'import eval_pp\n'), ((2926, 3000), 'numpy.isclose', 'np.isclose', (["objectives['accu']", '(0.12408939054384546)'], {'rtol': '(0.0)', 'atol': '(0.0002)'}), "(objectives['accu'], 0.12408939054384546, rtol=0.0, atol=0.0002)\n", (2936, 3000), True, 'import numpy as np\n'), ((3015, 3089), 'numpy.isclose', 'np.isclose', (["objectives['work']", '(0.009064640532217023)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), "(objectives['work'], 0.009064640532217023, rtol=0.0, atol=1e-06)\n", (3025, 3089), True, 'import numpy as np\n'), ((3399, 3435), 'tools_for_tests.TemporaryDirectory', 'tools_for_tests.TemporaryDirectory', ([], {}), '()\n', (3433, 3435), False, 'import tools_for_tests\n'), ((3849, 3876), 'os.mkdir', 'os.mkdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (3857, 3876), False, 'import os\n'), ((3885, 3912), 'os.chdir', 'os.chdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (3893, 3912), False, 'import os\n'), ((4368, 4390), 'analysis_driver.main', 'analysis_driver.main', ([], {}), '()\n', (4388, 4390), False, 'import analysis_driver\n'), ((4693, 4729), 'tools_for_tests.TemporaryDirectory', 'tools_for_tests.TemporaryDirectory', ([], {}), '()\n', (4727, 4729), False, 'import tools_for_tests\n'), ((5143, 5170), 'os.mkdir', 'os.mkdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (5151, 5170), False, 'import os\n'), ((5179, 5206), 'os.chdir', 'os.chdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (5187, 5206), False, 'import os\n'), ((5662, 5684), 'analysis_driver.main', 'analysis_driver.main', ([], {}), '()\n', (5682, 5684), False, 'import analysis_driver\n'), ((6113, 6149), 'tools_for_tests.TemporaryDirectory', 'tools_for_tests.TemporaryDirectory', ([], {}), '()\n', (6147, 6149), False, 'import tools_for_tests\n'), ((6563, 6590), 'os.mkdir', 'os.mkdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (6571, 6590), False, 'import os\n'), ((6599, 6626), 'os.chdir', 'os.chdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (6607, 6626), False, 'import os\n'), ((7082, 7104), 'analysis_driver.main', 'analysis_driver.main', ([], {}), '()\n', (7102, 7104), False, 'import analysis_driver\n'), ((553, 589), 'tools_for_tests.TemporaryDirectory', 'tools_for_tests.TemporaryDirectory', ([], {}), '()\n', (587, 589), False, 'import tools_for_tests\n'), ((950, 977), 'os.mkdir', 'os.mkdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (958, 977), False, 'import os\n'), ((990, 1017), 'os.chdir', 'os.chdir', (['"""workdir.example"""'], {}), "('workdir.example')\n", (998, 1017), False, 'import os\n'), ((1530, 1575), 'eval_pp.main', 'eval_pp.main', (["['Si', 'Ge']", 'gcuts', 'energy_tol'], {}), "(['Si', 'Ge'], gcuts, energy_tol)\n", (1542, 1575), False, 'import eval_pp\n'), ((2104, 2137), 'os.path.join', 'os.path.join', (['""".."""', '"""calc_nflops"""'], {}), "('..', 'calc_nflops')\n", (2116, 2137), False, 'import os\n'), ((2139, 2150), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2148, 2150), False, 'import os\n'), ((2172, 2230), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""configurations.in.example"""'], {}), "(test_inputs_dir, 'configurations.in.example')\n", (2184, 2230), False, 'import os\n'), ((2273, 2336), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""allelectron_forces.dat.example"""'], {}), "(test_inputs_dir, 'allelectron_forces.dat.example')\n", (2285, 2336), False, 'import os\n'), ((2456, 2503), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""argvf.template"""'], {}), "(test_inputs_dir, 'argvf.template')\n", (2468, 2503), False, 'import os\n'), ((2543, 2592), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""crystal.template"""'], {}), "(test_inputs_dir, 'crystal.template')\n", (2555, 2592), False, 'import os\n'), ((2634, 2673), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""PAW.Si"""'], {}), "(test_inputs_dir, 'PAW.Si')\n", (2646, 2673), False, 'import os\n'), ((2705, 2744), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""PAW.Ge"""'], {}), "(test_inputs_dir, 'PAW.Ge')\n", (2717, 2744), False, 'import os\n'), ((3508, 3541), 'os.path.join', 'os.path.join', (['""".."""', '"""calc_nflops"""'], {}), "('..', 'calc_nflops')\n", (3520, 3541), False, 'import os\n'), ((3543, 3554), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3552, 3554), False, 'import os\n'), ((3576, 3616), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""opal.in"""'], {}), "(test_inputs_dir, 'opal.in')\n", (3588, 3616), False, 'import os\n'), ((3649, 3707), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""configurations.in.example"""'], {}), "(test_inputs_dir, 'configurations.in.example')\n", (3661, 3707), False, 'import os\n'), ((3750, 3813), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""allelectron_forces.dat.example"""'], {}), "(test_inputs_dir, 'allelectron_forces.dat.example')\n", (3762, 3813), False, 'import os\n'), ((3933, 3980), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""argvf.template"""'], {}), "(test_inputs_dir, 'argvf.template')\n", (3945, 3980), False, 'import os\n'), ((4020, 4069), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""crystal.template"""'], {}), "(test_inputs_dir, 'crystal.template')\n", (4032, 4069), False, 'import os\n'), ((4111, 4158), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""Si.in.template"""'], {}), "(test_inputs_dir, 'Si.in.template')\n", (4123, 4158), False, 'import os\n'), ((4160, 4171), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4169, 4171), False, 'import os\n'), ((4193, 4240), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""Ge.in.template"""'], {}), "(test_inputs_dir, 'Ge.in.template')\n", (4205, 4240), False, 'import os\n'), ((4242, 4253), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4251, 4253), False, 'import os\n'), ((4275, 4314), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""params"""'], {}), "(test_inputs_dir, 'params')\n", (4287, 4314), False, 'import os\n'), ((4316, 4327), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4325, 4327), False, 'import os\n'), ((4802, 4835), 'os.path.join', 'os.path.join', (['""".."""', '"""calc_nflops"""'], {}), "('..', 'calc_nflops')\n", (4814, 4835), False, 'import os\n'), ((4837, 4848), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4846, 4848), False, 'import os\n'), ((4870, 4910), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""opal.in"""'], {}), "(test_inputs_dir, 'opal.in')\n", (4882, 4910), False, 'import os\n'), ((4943, 5001), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""configurations.in.example"""'], {}), "(test_inputs_dir, 'configurations.in.example')\n", (4955, 5001), False, 'import os\n'), ((5044, 5107), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""allelectron_forces.dat.example"""'], {}), "(test_inputs_dir, 'allelectron_forces.dat.example')\n", (5056, 5107), False, 'import os\n'), ((5227, 5274), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""argvf.template"""'], {}), "(test_inputs_dir, 'argvf.template')\n", (5239, 5274), False, 'import os\n'), ((5314, 5363), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""crystal.template"""'], {}), "(test_inputs_dir, 'crystal.template')\n", (5326, 5363), False, 'import os\n'), ((5405, 5452), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""Si.in.template"""'], {}), "(test_inputs_dir, 'Si.in.template')\n", (5417, 5452), False, 'import os\n'), ((5454, 5465), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5463, 5465), False, 'import os\n'), ((5487, 5534), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""Ge.in.template"""'], {}), "(test_inputs_dir, 'Ge.in.template')\n", (5499, 5534), False, 'import os\n'), ((5536, 5547), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5545, 5547), False, 'import os\n'), ((5569, 5608), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""params"""'], {}), "(test_inputs_dir, 'params')\n", (5581, 5608), False, 'import os\n'), ((5610, 5621), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5619, 5621), False, 'import os\n'), ((6222, 6255), 'os.path.join', 'os.path.join', (['""".."""', '"""calc_nflops"""'], {}), "('..', 'calc_nflops')\n", (6234, 6255), False, 'import os\n'), ((6257, 6268), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6266, 6268), False, 'import os\n'), ((6290, 6330), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""opal.in"""'], {}), "(test_inputs_dir, 'opal.in')\n", (6302, 6330), False, 'import os\n'), ((6363, 6421), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""configurations.in.example"""'], {}), "(test_inputs_dir, 'configurations.in.example')\n", (6375, 6421), False, 'import os\n'), ((6464, 6527), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""allelectron_forces.dat.example"""'], {}), "(test_inputs_dir, 'allelectron_forces.dat.example')\n", (6476, 6527), False, 'import os\n'), ((6647, 6694), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""argvf.template"""'], {}), "(test_inputs_dir, 'argvf.template')\n", (6659, 6694), False, 'import os\n'), ((6734, 6783), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""crystal.template"""'], {}), "(test_inputs_dir, 'crystal.template')\n", (6746, 6783), False, 'import os\n'), ((6825, 6872), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""Si.in.template"""'], {}), "(test_inputs_dir, 'Si.in.template')\n", (6837, 6872), False, 'import os\n'), ((6874, 6885), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6883, 6885), False, 'import os\n'), ((6907, 6954), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""Ge.in.template"""'], {}), "(test_inputs_dir, 'Ge.in.template')\n", (6919, 6954), False, 'import os\n'), ((6956, 6967), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6965, 6967), False, 'import os\n'), ((6989, 7028), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""params"""'], {}), "(test_inputs_dir, 'params')\n", (7001, 7028), False, 'import os\n'), ((7030, 7041), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7039, 7041), False, 'import os\n'), ((670, 703), 'os.path.join', 'os.path.join', (['""".."""', '"""calc_nflops"""'], {}), "('..', 'calc_nflops')\n", (682, 703), False, 'import os\n'), ((705, 716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (714, 716), False, 'import os\n'), ((742, 800), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""configurations.in.example"""'], {}), "(test_inputs_dir, 'configurations.in.example')\n", (754, 800), False, 'import os\n'), ((847, 910), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""allelectron_forces.dat.example"""'], {}), "(test_inputs_dir, 'allelectron_forces.dat.example')\n", (859, 910), False, 'import os\n'), ((1042, 1089), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""argvf.template"""'], {}), "(test_inputs_dir, 'argvf.template')\n", (1054, 1089), False, 'import os\n'), ((1133, 1182), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""crystal.template"""'], {}), "(test_inputs_dir, 'crystal.template')\n", (1145, 1182), False, 'import os\n'), ((1228, 1267), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""PAW.Si"""'], {}), "(test_inputs_dir, 'PAW.Si')\n", (1240, 1267), False, 'import os\n'), ((1303, 1342), 'os.path.join', 'os.path.join', (['test_inputs_dir', '"""PAW.Ge"""'], {}), "(test_inputs_dir, 'PAW.Ge')\n", (1315, 1342), False, 'import os\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""" Stochastic Dynamic Programming library
Implements naive methods of Dynamic Programming (Value Iteration)
to solve *simple* Optimal Stochastic Control problems
classes : SysDescription, DPSolver
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import inspect
import itertools
from datetime import datetime
def _zero_cost(*x):
'''zero cost function g(x), used as default terminal cost'''
return 0.
def _enforce_sig_len(fun, args, with_params, shortname=None):
''' Enforces the signature length of `fun` to match `args`
Checks that function `fun` indeed accepts len(`args`) arguments,
raises ValueError othewise.
Also `shortname` is used, if provided, in the error message to
prepend fun.__name__
'''
fun_args = inspect.getargspec(fun).args
kw_args = inspect.getargspec(fun).keywords
err_msg = ''
if shortname is not None:
err_msg += shortname
err_msg += "'{:s}' ".format(fun.__name__)
if not len(fun_args) == len(args):
# Build an error message of the kind
# "dynamics function 'dyn_sto' should accept 3 args (x1, u1, w1), not 4."
err_msg += 'should accept {:d} args ({:s}), not {:d}'.format(
len(args), ', '.join(args), len(fun_args))
raise ValueError(err_msg)
if with_params and kw_args is None:
err_msg += 'should accept extra keyword arguments'
raise ValueError(err_msg)
if not with_params and kw_args is not None:
err_msg += 'should not accept extra keyword arguments'
raise ValueError(err_msg)
else:
return True
# end _enforce_sig_len
class SysDescription(object):
def __init__(self, dims, stationnary=True, name='', params=None):
'''Description of a Dynamical System in the view of optimal (stochastic)
control, using Dynamic Programming approach.
Each system basically has
* a dynamics function x_{k+1} = f_k(x_k, u_k, w_k)
* an instant cost function g_k(x_k, u_k, w_k)
The sum over instants of g_k is the total cost J which is to be minimized
by choosing the control policy
'''
self.name = name
self.stationnary = bool(stationnary)
if params is not None:
self.params = params
else:
self.params = {}
if len(dims) == 3:
dim_state, dim_control, dim_perturb = dims
elif len(dims) == 2:
dim_state, dim_control = dims
dim_perturb = 0
else:
raise ValueError('dims tuple should be of len 2 or 3')
self.state = ['x{:d}'.format(i+1) for i in range(dim_state)]
self.control = ['u{:d}'.format(i+1) for i in range(dim_control)]
self.perturb = ['w{:d}'.format(i+1) for i in range(dim_perturb)]
# Expected signature length of dyn and cost functions:
self._dyn_args = self.state + self.control + self.perturb
if not self.stationnary:
# for unstationnary systems, instant `k` must be provided as 1st argument
self._dyn_args.insert(0, 'time_k')
# Dynamics and Cost functions (to be set separately)
self._dyn = None
self._cost = None
self._control_box = None
self._terminal_cost = _zero_cost
self._perturb_laws = None
@property
def stochastic(self):
'''is the system stochastic or deterministic ?'''
return len(self.perturb) > 0
@property
def dyn(self):
'''dynamics function x_{k+1} = f_k(x_k, u_k, w_k)'''
return self._dyn
@dyn.setter
def dyn(self, dyn):
'''sets the dynamics function'''
# Check the signature length:
with_params = bool(self.params)
if _enforce_sig_len(dyn, self._dyn_args, with_params, 'dynamics function'):
self._dyn = dyn
# Read the variable names from the signature of `dyn`
dyn_args = inspect.getargspec(dyn).args
# Rewrite the internally stored signature
self._dyn_args = dyn_args
# Split the signature between state, control and perturb:
if not self.stationnary:
# drop the first argument
dyn_args = dyn_args[1:]
self.state = dyn_args[0:len(self.state)]
dyn_args = dyn_args[len(self.state):] # drop state variables
self.control = dyn_args[0:len(self.control)]
dyn_args = dyn_args[len(self.control):] # drop control variables
self.perturb = dyn_args[0:len(self.perturb)]
@property
def control_box(self):
'''control description function U_k(x_k), expressed as a box (Hyperrectangle)
which means the admissible control set must be described as a
Cartesian product of intervals U = [u1_min, u1_max] x [u2_min, u2_max] x ...
'''
return self._control_box
@control_box.setter
def control_box(self, control_box):
'''sets the control description function'''
# Check the signature length:
args = list(self.state)
if not self.stationnary:
args.insert(0, 'time_k')
with_params = bool(self.params)
if _enforce_sig_len(control_box, args, with_params, 'control description function'):
self._control_box = control_box
@property
def cost(self):
'''cost function g_k(x_k, u_k, w_k)'''
return self._cost
@cost.setter
def cost(self, cost):
'''sets the cost function'''
# Check the signature length:
with_params = bool(self.params)
if _enforce_sig_len(cost, self._dyn_args, with_params, 'cost function'):
self._cost = cost
@property
def terminal_cost(self):
'''terminal cost function g(x_K)'''
return self._terminal_cost
@terminal_cost.setter
def terminal_cost(self, cost):
'''sets the terminal cost function'''
# Check the signature length:
cost_args = inspect.getargspec(cost).args
if not len(cost_args) == len(self.state):
raise ValueError('cost function should accept '
'{:d} args instead of {:d}'.format(
len(self.state), len(cost_args)))
self._terminal_cost = cost
@property
def perturb_laws(self):
'''distribution laws of perturbations `w_k`'''
return self._perturb_laws
@perturb_laws.setter
def perturb_laws(self, laws):
'''distribution laws of perturbations'''
# Check the number of laws
if not len(laws) == len(self.perturb):
raise ValueError('{:d} perturbation laws should be provided'
.format(len(self.perturb)))
self._perturb_laws = laws
# Check the type of perturbations (continuous vs. discrete)
self.perturb_types = []
for l in laws:
t = None
try:
l.pdf(0) # probability *density* -> continuous
t = 'continuous'
except AttributeError:
try:
l.pmf(0) # probability *mass* -> discrete
t = 'discrete'
except AttributeError:
raise ValueError('perturbation law {:s} should either have a pdf or a pmf method'.format(repr(l)))
self.perturb_types.append(t)
def print_summary(self):
'''summary information about the dynamical system'''
print('Dynamical system "{}" description'.format(self.name))
### 1) general properties
station = 'stationnary' if self.stationnary else 'time dependent'
stoch = 'stochastic' if self.stochastic else 'deterministic'
print('* behavioral properties: {}, {}'.format(station, stoch))
### 2) info about functions:
print('* functions:')
funclist = [('dynamics', self.dyn),
('cost', self.cost),
('control box', self.control_box)]
maxlen = max([len(name) for name, _ in funclist])
for name, fun in funclist:
if fun is not None:
fname = '{0.__module__}.{0.__name__}'.format(fun)
else:
fname = 'None (to be defined)'
print(' - {0:{width}}: {1}'.format(name, fname, width=maxlen+1))
# end for each function
### 1) information about variables
print('* variables')
vectlist = [('state', self.state),
('control', self.control)]
if self.stochastic:
vectlist.append(('perturbation', self.perturb))
maxlen = max([len(name) for name, _ in vectlist])
for name, vect in vectlist:
print(' - {0:{width}}: {1} (dim {2:d})'.format(
name, ', '.join(vect), len(vect), width=maxlen+1 ))
# end for each vector
# end print_summary()
def __repr__(self):
return '<SysDescription "{:s}" at 0x{:x}>'.format(self.name, id(self))
# end __repr__()
# end SysDescription class
################################################################################
# Interpolation class
# TODO : use a nicer n-dim method (like multilinear interpolation)
from scipy.interpolate import RectBivariateSpline
from stodynprog.dolointerpolation.multilinear_cython import multilinear_interpolation
class MlinInterpolator:
'''Multilinear interpolation class
wrapping Pablo Winant's Cython interpolation routine
Note : API of this class is different from Pablo Winant's MultilinInterpolator
'''
def __init__(self, *x_grid):
self.ndim = len(x_grid)
self._xmin = np.array([x[0] for x in x_grid])
self._xmax = np.array([x[-1] for x in x_grid])
self._xshape = np.array([len(x) for x in x_grid], dtype=np.int)
self.values = None
def set_values(self,values):
assert values.ndim == self.ndim
assert values.shape == tuple(self._xshape)
self.values = np.ascontiguousarray(np.atleast_2d(values.ravel()))
def __call__(self, *x_interp):
'''evaluate the interpolated function at coordinates `x_interp`
output shape is the shape of broadcasted coordinate inputs.
'''
assert len(x_interp) == self.ndim
# Prepare the interpolated coordinates array
x_mesh = np.broadcast_arrays(*x_interp)
shape = x_mesh[0].shape
x_stack = np.row_stack([x.astype(float).ravel() for x in x_mesh])
#
a = multilinear_interpolation(self._xmin, self._xmax, self._xshape,
self.values, x_stack)
a = a.reshape(shape)
return a
# end __call__()
# end MlinInterpolator
class RectBivariateSplineBc(RectBivariateSpline):
'''extended RectBivariateSpline class,
where spline evaluation works uses input broadcast
and returns an output with a coherent shape.
'''
#@profile
def __call__(self, x, y):
'''extended `ev` method, which supports array broadcasting
'''
if x.shape != y.shape:
x,y = np.broadcast_arrays(x,y) # costs about 30µs/call
# flatten the inputs after saving their shape:
shape = x.shape
x = np.ravel(x)
y = np.ravel(y)
# Evaluate the spline and reconstruct the dimension:
z = self.ev(x,y)
z = z.reshape(shape)
return z
# end __call__()
# end RectBivariateSplineBc class
################################################################################
# Stochastic Dynamic Programming class
class DPSolver(object):
def __init__(self, sys):
'''Dynamic Programming solver for stochastic dynamic control of `sys`
The dynamical system `sys` should be a `SysDescription` object.
DPSolver implements Value Iteration and Policy Iteration.
For the latter, policy evaluation is done by repeated value iterations.
'''
self.sys = sys
# Initialization of discrete grids:
self.state_grid = [[0.] for s in self.sys.state]
self.perturb_grid = [[0.] for p in self.sys.perturb]
self.perturb_proba = [[1.] for p in self.sys.perturb]
# steps for control discretization
self.control_steps = (1.,)*len(self.sys.control)
# end __init__()
def discretize_perturb(self, *linspace_args):
'''create a regular discrete grid for each perturbation variable
grids are stored in `self.perturb_grid` and can also be set manually
corresponding probability weights are in `self.perturb_proba`
'''
assert len(linspace_args) == len(self.sys.perturb)*3
self.perturb_grid = []
self.perturb_proba = []
for i in range(len(self.sys.perturb)):
# discrete grid for perturbation `i`
grid_wi = np.linspace(*linspace_args[i*3:i*3+3])
if self.sys.perturb_types[i] == 'continuous':
pdf_wi = self.sys.perturb_laws[i].pdf
proba_wi = pdf_wi(grid_wi)
proba_wi /= proba_wi.sum()
else: # discrete perturbation
pmf_wi = self.sys.perturb_laws[i].pmf
proba_wi = pmf_wi(grid_wi)
assert np.allclose(proba_wi.sum(), 1.)
#proba_wi /= proba_wi.sum()
self.perturb_grid.append(grid_wi)
self.perturb_proba.append(proba_wi)
return self.perturb_grid, self.perturb_proba
# end discretize_perturb()
def discretize_state(self, *linspace_args):
'''create a regular discrete grid for each state variable
grids are stored in `self.state_grid` and can also be set manually.
'''
assert len(linspace_args) == len(self.sys.state)*3
state_grid = []
for i in range(len(self.sys.state)):
# discrete grid for state `i`
grid_xi = np.linspace(*linspace_args[i*3:i*3+3])
state_grid.append(grid_xi)
self.state_grid = state_grid
### Store some additional data about the grid
# shape of the grid:
grid_shape = tuple(len(g) for g in self.state_grid)
self._state_grid_shape = grid_shape
# Reference indices (for relative DP algorithm)
# -> take the "middle" of the grid
ref_ind = tuple(nx//2 for nx in grid_shape)
self._state_ref_ind = ref_ind
self._state_ref = tuple(g[i] for g,i in zip(state_grid, ref_ind))
return self.state_grid
# end discretize_state()
@property
def state_grid_full(self):
'''broadcasted state grid
(compared to self.state_grid which is flat)
'''
state_dim = len(self.state_grid)
state_grid = []
for i, x_grid in enumerate(self.state_grid):
shape = [1]*state_dim
shape[i] = -1
state_grid.append(x_grid.reshape(shape))
return np.broadcast_arrays(*state_grid)
def interp_on_state(self, A):
'''returns an interpolating function of matrix A, assuming that A
is expressed on the state grid `self.state_grid`
the shape of A should be (len(g) for g in self.state_grid)
'''
# Check the dimension of A:
expect_shape = self._state_grid_shape
if A.shape != expect_shape:
raise ValueError('array `A` should be of shape {:s}, not {:s}'.format(
str(expect_shape), str(A.shape)) )
if len(expect_shape) <= 5:
A_interp = MlinInterpolator(*self.state_grid)
A_interp.set_values(A)
return A_interp
# if len(expect_shape) == 2:
# x1_grid = self.state_grid[0]
# x2_grid = self.state_grid[1]
# A_interp = RectBivariateSplineBc(x1_grid, x2_grid, A, kx=1, ky=1)
# return A_interp
else:
raise NotImplementedError('interpolation for state dimension >5'
' is not implemented.')
# end interp_on_state()
def control_grids(self, state_k, t_k=None):
'''returns u1_range, u2_range which is a grid on the box
of admissible controls using self.control_steps as hints
'''
# 1) Evaluate the admissible box:
if t_k is not None:
state_k = (t_k,) + state_k
sys_params = self.sys.params
intervals = self.sys.control_box(*state_k, **sys_params)
# 2) Build the dicretization grid for each control:
control_grids = []
control_dims = []
for (u_min, u_max), step in zip(intervals, self.control_steps):
width = u_max - u_min
n_interv = width / step # gives the number of intervals (float)
if n_interv < 0.1:
# step size is much (10x) thinner than the admissible width,
# only keep one control point at the interval center :
npts = 1
u_grid = np.array([(u_min+u_max)/2])
else:
# ensure we take enough points so that the actual discretization step
# is smaller or equal than the `step` hint
npts = int(np.ceil(n_interv) + 1)
u_grid = np.linspace(u_min, u_max, npts)
control_grids.append(u_grid)
control_dims.append(npts)
# end for each control
return control_grids, tuple(control_dims)
# end control_grids()
#@profile
def value_iteration(self, J_next, rel_dp=False, report_time=True):
'''solve one DP step on the entire state space grid,
given and cost-to-go array `J_next` discretized over the state space grid.
If rel_dp is True, J_next should be a (J_next, J_ref) tuple
Returns
(J_k, pol_k)
and J_k is a tuple (J_diff, J_ref) if `rel_dp` is True
'''
t_start = datetime.now()
# Iterator over the state grid:
state_grid = itertools.product(*self.state_grid)
state_dims = tuple(len(grid) for grid in self.state_grid)
state_ind = itertools.product(*[range(d) for d in state_dims])
# Reference state for relative DP:
ref_ind = self._state_ref_ind
if rel_dp:
# Split the cost tuple:
J_next, J_ref = J_next
# Check that the cost-to-go is indeed a *differential* cost
# with a zero at the reference state
assert J_next[ref_ind] == 0.
# number of control variables
nb_control = len(self.sys.control)
# Initialize the output arrays
J_k = np.zeros(state_dims)
pol_k = np.zeros(state_dims + (nb_control,) )
# Interpolating function of the cost-to-go
J_next_interp = self.interp_on_state(J_next)
# Loop over the state grid
if report_time: print('value iteration...', end='')
# # Attempt at doing parallel processing:
# from multiprocessing import Pool
# p = Pool(3)
# args = zip(state_grid,
# itertools.repeat(J_next_interp) )
# out = p.imap(self._value_at_state_vect, args)
# out = np.fromiter(out, float)
for ind_x, x_k in zip(state_ind, state_grid):
J_xk_opt, u_xk_opt = self._value_at_state_vect(x_k, J_next_interp)
# Save the optimal value:
J_k[ind_x] = J_xk_opt
pol_k[ind_x] = u_xk_opt
# Report progress:
# print('\rstate loop {:.1%}...'.format(
# np.ravel_multi_index(ind_x, state_dims) / np.product(state_dims) ),
# end='')
# end for each state value
# Relative DP:
if rel_dp:
J_ref = J_k[ref_ind]
J_k -= J_ref
exec_time = (datetime.now() - t_start).total_seconds()
if report_time: print('\rvalue iteration run in {:.2f} s'.format(exec_time))
if rel_dp:
# pack together the differential and relative costs:
J_k = J_k, J_ref
return J_k, pol_k
# end solve_step
def bellman_recursion(self, t_fin, J_fin, t_ini=0, report_time=True):
'''solve Bellman backward recursion (applicable to
*finite horizon problems*)
for a given time range: starting from `t_fin` (positive int.)
to `t_ini` (defaults to zero).
Supports non-stationnary problems.
Returns
(J_k, pol_k)
'''
t_start = datetime.now()
state_dims = tuple(len(grid) for grid in self.state_grid)
# number of control variables
nb_control = len(self.sys.control)
stationnary = self.sys.stationnary
print('time-dependent problem: {:s}'.format('no' if stationnary else 'yes'))
# Initialize the output arrays
assert t_ini == 0 # t_ini > 0 not tested
J = np.zeros((t_fin-t_ini,) + state_dims)
pol = np.zeros((t_fin-t_ini,) + state_dims + (nb_control,) )
if report_time: print('bellman recursion...', end='')
# backward time iteration:
for t_k in range(t_ini, t_fin)[::-1]:
# Report progress:
print('\rtk = {:3d}...'.format(t_k), end='')
# take the time slice
k = t_k-t_ini
J_k = J[k]
pol_k = pol[k]
# Interpolating function of the cost-to-go
J_next_interp = self.interp_on_state(J_fin) if t_k == (t_fin-1) else \
self.interp_on_state(J[k+1])
# Iterator over the state grid:
state_grid = itertools.product(*self.state_grid)
state_ind = itertools.product(*[range(d) for d in state_dims])
# Loop over the state grid
for ind_x, x_k in zip(state_ind, state_grid):
J_xk_opt, u_xk_opt = self._value_at_state_vect(x_k, J_next_interp, t_k)
# Save the optimal value:
J_k[ind_x] = J_xk_opt
pol_k[ind_x] = u_xk_opt
# end for each state value
exec_time = (datetime.now() - t_start).total_seconds()
if report_time: print('\rvalue iteration run in {:.2f} s'.format(exec_time))
return J, pol
#@profile
def _value_at_state_loop(self, x_k, J_next_interp):
'''find the optimal cost J_k and optimal control u_k
at a given state point `x_k`
This is the *iterative* implentation:
The set of allowed controls is discretized their expected cost
J(x_k, u_k) is computed one after another in a loop.
Best control and cost is memorized within the loop.
Returns (J_xk_opt, u_xk_opt)
'''
# compute an allowed control grid (depends on the state)
u_grids, control_dims = self.control_grids(x_k)
# Iterate over the control grid
J_xk_opt = np.inf
u_xk_opt = None
# grab the 1D perturbation vector
w_k = self.perturb_grid[0]
w_proba = self.perturb_proba[0]
# TODO : implement an nD perturbation
# Iterate over all possible controls:
sys_params = self.sys.params
for u_xk in itertools.product(*u_grids):
### Compute the expected cost of control u_xk ###
args = x_k + u_xk + (w_k,)
# Compute a grid of next steps:
x_next = self.sys.dyn(*args, **sys_params)
# Compute a grid of costs:
J_k_grid = self.sys.cost(*args, **sys_params) # instant cost
J_k_grid += J_next_interp(*x_next) # add the cost-to-go
# Expected (weighted mean) cost:
J = np.inner(J_k_grid, w_proba)
# Check optimality of the cost:
if J < J_xk_opt:
J_xk_opt = J
u_xk_opt = u_xk
# end for each control
return (J_xk_opt, u_xk_opt)
# end _value_at_state_loop()
#@profile
def _value_at_state_vect(self, x_k, J_next_interp, t_k=None):
'''find the optimal cost J_k and optimal control u_k
at a given state point `x_k`
This is the *vectorized* implementation:
The set of allowed controls is discretized and their expected cost
J(x_k, u_k) is computed *all at once*.
Then, the best control and cost is found using `np.argmin`
Returns (J_xk_opt, u_xk_opt)
'''
# Compute the allowed control grid (depends on the state)
u_grids, control_dims = self.control_grids(x_k, t_k)
nb_control = len(u_grids)
# Reshape the control grids to enable broadcasted operations:
for i in range(nb_control):
# create a tuple of ones of length (nb_control + 1) with -1 at index i
# (+1 used for the perturbation)
shape = (1,)*i + (-1,) + (1,)*(nb_control-i)
# inplace reshape:
u_grids[i].shape = shape
nb_perturb = len(self.perturb_grid)
if nb_perturb > 0:
# grab the 1D perturbation vector
w_proba = self.perturb_proba[0]
# TODO : implement nD perturbation
args = x_k + tuple(u_grids) + tuple(self.perturb_grid)
sys_params = self.sys.params
if t_k is not None:
# prepend the time argument for non-stationnary problems:
args = (t_k,) + args
# Compute a grid of next steps:
x_next = self.sys.dyn(*args, **sys_params)
# Compute a grid of costs:
g_k_grid = self.sys.cost(*args, **sys_params) # instant cost
J_k_grid = g_k_grid + J_next_interp(*x_next) # add the cost-to-go
# Expected (weighted mean) cost:
if nb_perturb == 0:
J = J_k_grid
elif nb_perturb == 1:
J = np.inner(J_k_grid, w_proba) # shape dim_control
assert J.shape == control_dims
# Find the lowest cost in array J:
ind_opt = np.unravel_index(J.argmin(),control_dims)
J_xk_opt = J[ind_opt]
u_xk_opt = [u_grids[i].flatten()[ind_opt[i]] for i in range(nb_control)]
return (J_xk_opt, u_xk_opt)
# end _value_at_state_vect()
def eval_policy(self, pol, n_iter, rel_dp=False, J_zero=None,
report_time=True, J_ref_full=False):
'''evaluate the policy `pol` : returns the cost of each state
after `n_iter` steps.
(useful for *policy iteration* algorithm)
If rel_dp is True, uses the relative DP algorithm instead of the
normal summation. False by default
Returns
J_pol (array of shape self._state_grid_shape)
J_pol, J_ref if `rel_dp` is True
'''
t_start = datetime.now()
state_dims = self._state_grid_shape
nb_state = len(self.sys.state)
# Initial cost to start the evaluation with:
if J_zero is None:
J_zero = np.zeros(state_dims)
assert J_zero.shape == state_dims
J_pol = J_zero
# Reference cost :
J_ref = np.zeros(n_iter)
# which state to use as reference:
ref_ind = self._state_ref_ind
# Policy : check the shape
nb_control = len(self.sys.control)
assert pol.shape == state_dims + (nb_control,)
# Perturbation:
w_k = self.perturb_grid[0]
w_proba = self.perturb_proba[0]
# TODO : implement nD perturbation
# Reshape the state grids to enable broadcasted operations:
state_grid = [None]*nb_state
for i in range(nb_state):
# create a tuple of ones of length (nb_state + 1) with -1 at index i
# (+1 used for the perturbation)
shape = (1,)*i + (-1,) + (1,)*(nb_state-i)
# inplace reshape:
state_grid[i] = np.reshape(self.state_grid[i], shape)
state_grid = tuple(state_grid)
# Loop over instants
sys_params = self.sys.params
for k in range(n_iter):
print('\rpolicy evaluation: iter. {:d}/{:d}'.format(k,n_iter), end='')
# Interpolate the cost-to-go
J_pol_interp = self.interp_on_state(J_pol)
# separate the controls
u_k = [pol[..., i].reshape(state_dims+(1,))
for i in range(nb_control)]
args = state_grid + tuple(u_k) + (w_k,)
# Compute a grid of next steps:
x_next = self.sys.dyn(*args, **sys_params)
# Compute a grid of costs:
g_k_grid = self.sys.cost(*args, **sys_params)# instant cost
J_k_grid = g_k_grid + J_pol_interp(*x_next) # add the cost-to-go
# Expected (weighted mean) cost:
J_pol = np.inner(J_k_grid, w_proba)
# end for each state
if rel_dp:
J_ref[k] = J_pol[ref_ind]
J_pol -= J_ref[k]
# end for each instant
exec_time = (datetime.now() - t_start).total_seconds()
if report_time: print('\rpolicy evaluation run in {:.2f} s '.format(exec_time))
if rel_dp:
if not J_ref_full:
# only report the last reference cost:
J_ref = J_ref[-1]
return J_pol, J_ref
else:
return J_pol
# end eval_policy
def policy_iteration(self, pol_init, n_val, n_pol=1, rel_dp=False):
'''policy iteration algorithm
Parameters
pol_init : initial policy to evaluate
n_val : number of value iterations to evaluate the policy
n_pol : number of policy iterations (default to 1)
Returns
(J_pol, pol) arrays
and J_pol is a tuple (J_diff, J_ref) if `rel_dp` is True
'''
pol = pol_init
# First evaluation of the policy:
J_pol = self.eval_policy(pol, n_val, rel_dp)
if rel_dp:
# J_pol is a tuple J_diff, J_ref
J_diff, J_ref = J_pol
print('ref policy cost: {:g}'.format(J_ref))
# Improve the policy:
for k in range(n_pol):
print('policy iteration {:d}/{:d}'.format(k+1, n_pol))
# 1) Improve the policy
_, pol = self.value_iteration(J_pol, rel_dp=rel_dp)
# 2) Evaluate the new policy:
J_pol = self.eval_policy(pol, n_val, rel_dp)
if rel_dp:
# J_pol is a tuple J_diff, J_ref
J_ref = J_pol[1]
print('ref policy cost: {:g}'.format(J_ref))
return J_pol, pol
# end policy_iteration
def print_summary(self):
'''summary information about the state of the SDP solver
'''
print('SDP solver for system "{}"'.format(self.sys.name))
### Print a report on Discretization:
# a) State discretization:
grid_size = 'x'.join([str(len(grid)) for grid in self.state_grid])
print('* state space discretized on a {:s} points grid'.format(grid_size))
for i, grid in enumerate(self.state_grid):
if len(grid) > 1:
step = grid[1] - grid[0]
print(' - Δ{:s} = {:g}'.format(self.sys.state[i], step))
else: # len(grid) == 1
print(' - {:s} fixed at {:g}'.format(self.sys.state[i], grid[0]))
# b) Perturbation discretization:
if self.sys.stochastic:
grid_size = 'x'.join([str(len(grid)) for grid in self.perturb_grid])
print('* perturbation discretized on a {:s} points grid'.format(grid_size))
for i, grid in enumerate(self.perturb_grid):
if len(grid) > 1:
step = grid[1] - grid[0]
print(' - Δ{:s} = {:g}'.format(self.sys.perturb[i], step))
else: # len(grid) == 1
print(' - {:s} fixed at {:g}'.format(self.sys.perturb[i], grid[0]))
# c) Control discretization
# Compute the average number of control points:
control_dims_list = []
t_k = None if self.sys.stationnary else 0
if self.sys.control_box is not None:
for x_k in itertools.product(*self.state_grid):
# Compute the control grid dimension for each state
_, control_dims = self.control_grids(x_k, t_k)
control_dims_list.append(control_dims)
# Convert list to 2D array for easy stats:
cdim = np.array(control_dims_list)
else:
print('Warning: sys.control_box is still to be defined!')
print('* control discretization steps:')
for i in range(len(self.sys.control)):
step = self.control_steps[i]
print(' - Δ{:s} = {:g}'.format(self.sys.control[i], step))
if control_dims_list:
if cdim[:,i].min() != cdim[:,i].max():
#the number of possible values *depends on the state*
print((' yields [{:,d} to {:,d}] possible values'+\
' ({:,.1f} on average)').format(
cdim[:,i].min(), cdim[:,i].max(), cdim[:,i].mean()) )
else:
#the number of possible values is *constant*
print(' yields {:,d} possible values'.format(
cdim[0,i]) )
# end for each control
if control_dims_list and len(self.sys.control)>=2:
cdim_tot = np.prod(cdim, axis=1)
print(' control combinations:' +
' [{:,d} to {:,d}] possible values ({:,.1f} on average)'.format(
cdim_tot.min(), cdim_tot.max(), cdim_tot.mean()) )
# end print_summary()
# end DPSolver
if __name__ == '__main__':
### Example usage with an Energy Storage system
import scipy.stats as stats
### Storage dynamics:
# Storage rated energy and power:
E_rated = 7.2 # [MWh]
P_rated = 2 # [MW]
# storage loss factor
a = 0.05
# Storage request AR(1) model parameters:
P_req_scale = 1.5 # [MW]
phi = 0.8
innov_scale = P_req_scale*np.sqrt(1- phi**2)
innov_law = stats.norm(loc=0, scale=innov_scale)
def dyn_sto(E, P_req, P_sto, innov):
'''state transition function `f(x_k,u_k,w_k)` of a Energy storage
returns (E(k+1), P_req(k+1))
'''
# 1) Stored energy evolution:
E_next = E + P_sto - a*abs(P_sto)# + 0*innov
# 2) Storage request AR(1) model:
P_req_next = phi*P_req + innov
return (E_next, P_req_next)
def admissible_P_sto(E, P_req):
'''returns the set of admissible control U(x_k) of an Energy storage
Control is the stored power P_sto
Returns the cartesian description of the admissible control space
(u1_min, u1_max),
'''
P_neg = np.max(( -E/(1+a), -P_rated))
P_pos = np.min(( (E_rated - E)/(1-a), P_rated))
U1 = (P_neg, P_pos)
return (U1, )
### Cost model
c_dev = 200 # [€/MWh]
def cost_lin(E, P_req, P_sto, innov):
'''cost of one instant (linear penalty on the absolute deviation)'''
P_dev = P_req - P_sto
return c_dev * np.abs(P_dev)
def cost_quad(E, P_req, P_sto, innov):
'''a simple quadratic cost model
which penalizes only the commitment deviation P_dev
'''
P_dev = P_req - P_sto
return P_dev**2
### Create the system description:
sys = SysDescription((2,1,1), name='NaS Storage')
sys.dyn = dyn_sto
sys.control_box = admissible_P_sto
sys.cost = cost_quad
sys.perturb_laws = [innov_law]
sys.print_summary()
print('')
### Create the DP solver:
dpsolv = DPSolver(sys)
# discretize the state space
N_E = 51
N_P_req = 41
dpsolv.discretize_state(0, E_rated, N_E,
-4*P_req_scale, 4*P_req_scale, N_P_req)
# discretize the perturbation
N_w = 11
dpsolv.discretize_perturb(-3*innov_scale, 3*innov_scale, N_w)
# control discretization step:
dpsolv.control_steps=(.1,) # maximum 41 pts when -2,2 MW are admissible
dpsolv.print_summary()
print('')
print('Running 2 value iterations...')
J_N = np.zeros((N_E,N_P_req))
J, u = dpsolv.value_iteration(J_N)
J, u = dpsolv.value_iteration(J)
# Make a quick plot of the optimal controls
fig = plt.figure('optimal controls', figsize=(5,4.5))
ax1 = fig.add_subplot(111, title='Stored power $P_{sto}$',
xlabel=sys.state[1], ylabel=sys.state[0])
im = ax1.imshow(u[:,:,0], interpolation='nearest')
fig.colorbar(im)
plt.show()
| [
"numpy.prod",
"numpy.sqrt",
"numpy.array",
"stodynprog.dolointerpolation.multilinear_cython.multilinear_interpolation",
"numpy.reshape",
"itertools.product",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.abs",
"numpy.ceil",
"numpy.inner",
"numpy.broadcast_arrays",
"matplotlib.pyplot.s... | [((34042, 34078), 'scipy.stats.norm', 'stats.norm', ([], {'loc': '(0)', 'scale': 'innov_scale'}), '(loc=0, scale=innov_scale)\n', (34052, 34078), True, 'import scipy.stats as stats\n'), ((36128, 36152), 'numpy.zeros', 'np.zeros', (['(N_E, N_P_req)'], {}), '((N_E, N_P_req))\n', (36136, 36152), True, 'import numpy as np\n'), ((36287, 36335), 'matplotlib.pyplot.figure', 'plt.figure', (['"""optimal controls"""'], {'figsize': '(5, 4.5)'}), "('optimal controls', figsize=(5, 4.5))\n", (36297, 36335), True, 'import matplotlib.pyplot as plt\n'), ((36546, 36556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36554, 36556), True, 'import matplotlib.pyplot as plt\n'), ((888, 911), 'inspect.getargspec', 'inspect.getargspec', (['fun'], {}), '(fun)\n', (906, 911), False, 'import inspect\n'), ((931, 954), 'inspect.getargspec', 'inspect.getargspec', (['fun'], {}), '(fun)\n', (949, 954), False, 'import inspect\n'), ((9747, 9779), 'numpy.array', 'np.array', (['[x[0] for x in x_grid]'], {}), '([x[0] for x in x_grid])\n', (9755, 9779), True, 'import numpy as np\n'), ((9802, 9835), 'numpy.array', 'np.array', (['[x[-1] for x in x_grid]'], {}), '([x[-1] for x in x_grid])\n', (9810, 9835), True, 'import numpy as np\n'), ((10436, 10466), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*x_interp'], {}), '(*x_interp)\n', (10455, 10466), True, 'import numpy as np\n'), ((10595, 10684), 'stodynprog.dolointerpolation.multilinear_cython.multilinear_interpolation', 'multilinear_interpolation', (['self._xmin', 'self._xmax', 'self._xshape', 'self.values', 'x_stack'], {}), '(self._xmin, self._xmax, self._xshape, self.values,\n x_stack)\n', (10620, 10684), False, 'from stodynprog.dolointerpolation.multilinear_cython import multilinear_interpolation\n'), ((11327, 11338), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (11335, 11338), True, 'import numpy as np\n'), ((11351, 11362), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (11359, 11362), True, 'import numpy as np\n'), ((15000, 15032), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*state_grid'], {}), '(*state_grid)\n', (15019, 15032), True, 'import numpy as np\n'), ((17953, 17967), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17965, 17967), False, 'from datetime import datetime\n'), ((18029, 18064), 'itertools.product', 'itertools.product', (['*self.state_grid'], {}), '(*self.state_grid)\n', (18046, 18064), False, 'import itertools\n'), ((18671, 18691), 'numpy.zeros', 'np.zeros', (['state_dims'], {}), '(state_dims)\n', (18679, 18691), True, 'import numpy as np\n'), ((18708, 18744), 'numpy.zeros', 'np.zeros', (['(state_dims + (nb_control,))'], {}), '(state_dims + (nb_control,))\n', (18716, 18744), True, 'import numpy as np\n'), ((20526, 20540), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20538, 20540), False, 'from datetime import datetime\n'), ((20921, 20960), 'numpy.zeros', 'np.zeros', (['((t_fin - t_ini,) + state_dims)'], {}), '((t_fin - t_ini,) + state_dims)\n', (20929, 20960), True, 'import numpy as np\n'), ((20973, 21028), 'numpy.zeros', 'np.zeros', (['((t_fin - t_ini,) + state_dims + (nb_control,))'], {}), '((t_fin - t_ini,) + state_dims + (nb_control,))\n', (20981, 21028), True, 'import numpy as np\n'), ((23202, 23229), 'itertools.product', 'itertools.product', (['*u_grids'], {}), '(*u_grids)\n', (23219, 23229), False, 'import itertools\n'), ((26685, 26699), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26697, 26699), False, 'from datetime import datetime\n'), ((27015, 27031), 'numpy.zeros', 'np.zeros', (['n_iter'], {}), '(n_iter)\n', (27023, 27031), True, 'import numpy as np\n'), ((34007, 34028), 'numpy.sqrt', 'np.sqrt', (['(1 - phi ** 2)'], {}), '(1 - phi ** 2)\n', (34014, 34028), True, 'import numpy as np\n'), ((34738, 34770), 'numpy.max', 'np.max', (['(-E / (1 + a), -P_rated)'], {}), '((-E / (1 + a), -P_rated))\n', (34744, 34770), True, 'import numpy as np\n'), ((34784, 34826), 'numpy.min', 'np.min', (['((E_rated - E) / (1 - a), P_rated)'], {}), '(((E_rated - E) / (1 - a), P_rated))\n', (34790, 34826), True, 'import numpy as np\n'), ((4055, 4078), 'inspect.getargspec', 'inspect.getargspec', (['dyn'], {}), '(dyn)\n', (4073, 4078), False, 'import inspect\n'), ((6069, 6093), 'inspect.getargspec', 'inspect.getargspec', (['cost'], {}), '(cost)\n', (6087, 6093), False, 'import inspect\n'), ((11187, 11212), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['x', 'y'], {}), '(x, y)\n', (11206, 11212), True, 'import numpy as np\n'), ((12930, 12974), 'numpy.linspace', 'np.linspace', (['*linspace_args[i * 3:i * 3 + 3]'], {}), '(*linspace_args[i * 3:i * 3 + 3])\n', (12941, 12974), True, 'import numpy as np\n'), ((13982, 14026), 'numpy.linspace', 'np.linspace', (['*linspace_args[i * 3:i * 3 + 3]'], {}), '(*linspace_args[i * 3:i * 3 + 3])\n', (13993, 14026), True, 'import numpy as np\n'), ((21637, 21672), 'itertools.product', 'itertools.product', (['*self.state_grid'], {}), '(*self.state_grid)\n', (21654, 21672), False, 'import itertools\n'), ((23672, 23699), 'numpy.inner', 'np.inner', (['J_k_grid', 'w_proba'], {}), '(J_k_grid, w_proba)\n', (23680, 23699), True, 'import numpy as np\n'), ((26885, 26905), 'numpy.zeros', 'np.zeros', (['state_dims'], {}), '(state_dims)\n', (26893, 26905), True, 'import numpy as np\n'), ((27771, 27808), 'numpy.reshape', 'np.reshape', (['self.state_grid[i]', 'shape'], {}), '(self.state_grid[i], shape)\n', (27781, 27808), True, 'import numpy as np\n'), ((28671, 28698), 'numpy.inner', 'np.inner', (['J_k_grid', 'w_proba'], {}), '(J_k_grid, w_proba)\n', (28679, 28698), True, 'import numpy as np\n'), ((32055, 32090), 'itertools.product', 'itertools.product', (['*self.state_grid'], {}), '(*self.state_grid)\n', (32072, 32090), False, 'import itertools\n'), ((32352, 32379), 'numpy.array', 'np.array', (['control_dims_list'], {}), '(control_dims_list)\n', (32360, 32379), True, 'import numpy as np\n'), ((33358, 33379), 'numpy.prod', 'np.prod', (['cdim'], {'axis': '(1)'}), '(cdim, axis=1)\n', (33365, 33379), True, 'import numpy as np\n'), ((35094, 35107), 'numpy.abs', 'np.abs', (['P_dev'], {}), '(P_dev)\n', (35100, 35107), True, 'import numpy as np\n'), ((17039, 17070), 'numpy.array', 'np.array', (['[(u_min + u_max) / 2]'], {}), '([(u_min + u_max) / 2])\n', (17047, 17070), True, 'import numpy as np\n'), ((17305, 17336), 'numpy.linspace', 'np.linspace', (['u_min', 'u_max', 'npts'], {}), '(u_min, u_max, npts)\n', (17316, 17336), True, 'import numpy as np\n'), ((25775, 25802), 'numpy.inner', 'np.inner', (['J_k_grid', 'w_proba'], {}), '(J_k_grid, w_proba)\n', (25783, 25802), True, 'import numpy as np\n'), ((19848, 19862), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19860, 19862), False, 'from datetime import datetime\n'), ((22115, 22129), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22127, 22129), False, 'from datetime import datetime\n'), ((28885, 28899), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28897, 28899), False, 'from datetime import datetime\n'), ((17257, 17274), 'numpy.ceil', 'np.ceil', (['n_interv'], {}), '(n_interv)\n', (17264, 17274), True, 'import numpy as np\n')] |
data_dir = '/mnt/lareaulab/cfbuenabadn/SingleCell/data/'
######################
# load_data_short.py #
######################
print('loading data')
import numpy as np
import pandas as pd
import os
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from scipy import stats as st
import seaborn as sns
import numpy.random as r
import sys
sys.path.insert(0, '/mnt/lareaulab/cfbuenabadn/sc_binary_splicing/utils/')
import splicing_utils as spu
from splicing_utils import *
import single_cell_plots as scp
from single_cell_plots import *
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
data_dir = '/mnt/lareaulab/cfbuenabadn/SingleCell/data/'
# load PSI tables
chen_PSI = pd.read_csv(data_dir + 'chen/processed_tables/chen.skipped_exons_psi.tab', sep='\t', index_col=0)
# SJ read tables
chen_read_counts = pd.read_csv(data_dir + 'chen/processed_tables/chen.skipped_exons_SJreads.tab', sep='\t', index_col=0)
# mRNA tables
chen_mrna_counts = pd.read_csv(data_dir + 'chen/processed_tables/chen.mrna_counts.tab', sep='\t', index_col=0)
# mRNA per event rables
mrna_per_event_chen = pd.read_csv(data_dir + 'chen/processed_tables/chen.mrna_counts_per_event.tab', sep='\t', index_col=0)
# read coverage tables
chen_coverage_tab = pd.read_csv(data_dir + 'chen/processed_tables/chen.read_coverage.tab',
sep='\t', index_col=0)
chen_pca = pd.read_csv(data_dir + 'chen/chen.pca.tab', sep='\t', index_col=0)
chen_pca = chen_pca.sort_values('pseudotime')
chen_pca.PC2 = chen_pca.PC2
chen_pca.line_2 = chen_pca.line_2
chen_index = [x for x in chen_pca.sort_values('pseudotime').index if x in mrna_per_event_chen.columns]
chen_pca = chen_pca.loc[chen_index]
chen_PSI = chen_PSI[chen_index]
mrna_per_event_chen = mrna_per_event_chen[chen_index]
chen_read_counts = chen_read_counts[chen_index]
chen_coverage_tab = chen_coverage_tab.loc[chen_index]
chen_ES2i = chen_pca.loc[chen_pca.cell_type == 'ES2i'].index
chen_ES = chen_pca.loc[chen_pca.cell_type == 'ES'].index
chen_Epi = chen_pca.loc[chen_pca.cell_type == 'Epi'].index
chen_MN = chen_pca.loc[chen_pca.cell_type == 'Motor neuron'].index
###
def process_subpop(subpop, psi, mrna, mrna_per_event, reads, cj, psi_min = 0.2, mrna_min=10, reads_min = 0, cell_min = 0.5, nbins=11,
filter_cj = True):
int_genes, int_exons = spu.get_int_events(psi[subpop], mrna[subpop], psi_min)
#print(len(int_genes))
int_exons = [x for x in int_exons if x in mrna_per_event.index]
PSI_filtered, PSI_mrna_filtered, good_exons, mrna_filtered, reads_filtered = filter_psi(psi[subpop], int_exons,
mrna_per_event[subpop], cj.loc[subpop],
reads[subpop], mrna_min, reads_min = reads_min,
cell_min=cell_min, filter_cj=filter_cj)
good_cells = PSI_filtered.dropna(axis=1, how='all').columns
good_subpop = [x for x in subpop if x in good_cells]
PSI_good = PSI_filtered[good_cells]
hist_complete, hist_intermediate = scp.get_bins_table2(PSI_filtered[good_subpop], mrna_filtered[good_subpop], nbins)
hist_complete_exp, hist_intermediate_exp = scp.get_bins_table(PSI_filtered[good_subpop], mrna_filtered[good_subpop])
return PSI_filtered, good_exons, mrna_filtered, reads_filtered, hist_complete, hist_complete_exp
# Chen
ac = AgglomerativeClustering(n_clusters=5)
ac_clusters = ac.fit_predict(chen_pca[['PC1', 'PC2']])
chen_pca_clust = chen_pca.copy()
chen_pca_clust['AC'] = ac_clusters
chen_clust_filter = []
for cluster in chen_pca_clust.groupby('AC')['pseudotime'].mean().sort_values().index:
clust_subpop = chen_pca_clust.index[chen_pca_clust.AC == cluster]
chen_filter = process_subpop(clust_subpop, chen_PSI, chen_mrna_counts, mrna_per_event_chen,
chen_read_counts, chen_coverage_tab['SJ_coverage'], 0.1, 10, 0, cell_min=0.5)
chen_clust_filter.append(chen_filter)
#####
from sklearn.decomposition import PCA
from scipy.stats import spearmanr
#import rpy2
#import rpy2.robjects.packages as rpackages
#import rpy2.robjects as robjects
#import rpy2.robjects.numpy2ri as rpyn
from statsmodels.stats.multitest import multipletests
#dt = rpy2.robjects.packages.importr('diptest')
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from scipy.special import logit
from scipy.special import expit
from sklearn.metrics import adjusted_rand_score
from scipy.stats import combine_pvalues
###############
# For the sake of quantification, filter exons between 0.05 and 0.95
###################
# Chen
ac = AgglomerativeClustering(n_clusters=5)
ac_clusters = ac.fit_predict(chen_pca[['PC1', 'PC2']])
# figsize(6,4)
# plt.scatter(chen_pca.PC1, chen_pca.PC2, c=ac_clusters)
# plt.show()
chen_pca_clust = chen_pca.copy()
chen_pca_clust['AC'] = ac_clusters
chen_clust_filter_05 = []
for cluster in chen_pca_clust.groupby('AC')['pseudotime'].mean().sort_values().index:
clust_subpop = chen_pca_clust.index[chen_pca_clust.AC == cluster]
chen_filter = process_subpop(clust_subpop, chen_PSI, chen_mrna_counts, mrna_per_event_chen,
chen_read_counts, chen_coverage_tab['SJ_coverage'], 0.05, 10, 0, cell_min=0.5)
chen_clust_filter_05.append(chen_filter)
###################
# %run -i '../../utils/load_data_short.py'
# import sys
# sys.path.insert(0, '../../utils/')
import importlib
# importlib.reload(scp)
# importlib.reload(spu)
# sns.reset_orig()
from scipy.stats import combine_pvalues
# %run -i '../../utils/Kruskal_Wallis_test_functions.py'
from tqdm import tqdm
############
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import pdist
from sklearn.metrics.pairwise import euclidean_distances
print('data loaded')
def get_distance_matrix(pca, k=10):
nbrs = NearestNeighbors(n_neighbors=k).fit(pca[['PC1', 'PC2']])
distances, indices = nbrs.kneighbors(pca[['PC1', 'PC2']])
cells = list(pca.index)
W = pd.DataFrame(np.zeros((len(cells), len(cells))))
W.columns = cells
W.index = cells
for i in tqdm(range(len(cells))):
cell_i = cells[i]
sigma = np.max(distances[i])
for j in range(len(distances[i])):
cell_j = cells[indices[i][j]]
d = distances[i][j]
w = np.exp(-(d**2)/(sigma**2))
W.loc[cell_i, cell_j] = w
return W
def get_signature_matrix(PSI_tab):
return (PSI_tab - PSI_tab.mean())/PSI_tab.std()
def make_mock_C_scores(norm_PSI, Ws, exon_list, total_cells, mock=100000):
exon_out_list = []
C_scores = []
for i in tqdm(range(mock)):
mock_run = True
while mock_run:
exon = r.choice(exon_list, 1)[0]
# print(exon)
scramble_cells = r.choice(norm_PSI.columns, total_cells, replace=False)
mock_PSI = pd.DataFrame(norm_PSI.loc[exon, scramble_cells]).T
# print(mock_PSI.shape)
# print(norm_PSI.shape)
mock_PSI.columns = norm_PSI.columns
mock_df = mock_PSI.loc[exon]
# print(type(mock_df))
# print(mock_df)
mock_score = get_C(mock_df, Ws)
# print(mock_score)
if mock_score >= 0:
C_scores.append(mock_score)
exon_out_list.append('mock_'+exon+'_'+str(i))
mock_run = False
return exon_out_list, C_scores
def get_C(exon_score, W):
exon_score = exon_score.dropna()
obs_cells = exon_score.index
x = (exon_score.values.reshape(-1, 1) - exon_score.values.reshape(1, -1))
w = W.loc[obs_cells, obs_cells]
num = (len(obs_cells)-1)*((w*(x**2)).sum().sum())
den = (2*w.sum().sum())*np.sum((exon_score - exon_score.mean())**2)
C = num/den
score = 1 - C
return score
##############################
# Ahora si el bueno
def get_mock_dict(PSI_tab, norm_PSI, Ws, mock=200):
total_cells = len(PSI_tab.columns)
exons_05_10 = PSI_tab.index[(np.abs(0.5 - PSI_tab.mean(axis = 1))>0.4) & (np.abs(0.5 - PSI_tab.mean(axis = 1))<=0.45)]
exons_10_20 = PSI_tab.index[(np.abs(0.5 - PSI_tab.mean(axis = 1))>0.3) & (np.abs(0.5 - PSI_tab.mean(axis = 1))<=0.40)]
exons_20_30 = PSI_tab.index[(np.abs(0.5 - PSI_tab.mean(axis = 1))>0.2) & (np.abs(0.5 - PSI_tab.mean(axis = 1))<=0.30)]
exons_30_40 = PSI_tab.index[(np.abs(0.5 - PSI_tab.mean(axis = 1))>0.1) & (np.abs(0.5 - PSI_tab.mean(axis = 1))<=0.20)]
exons_40_50 = PSI_tab.index[(np.abs(0.5 - PSI_tab.mean(axis = 1))<=0.1)]
exons_obs_50_60 = PSI_tab.index[(PSI_tab.isna().mean(axis=1) <= 0.5) & (PSI_tab.isna().mean(axis=1) > 0.4)]
exons_obs_60_70 = PSI_tab.index[(PSI_tab.isna().mean(axis=1) <= 0.4) & (PSI_tab.isna().mean(axis=1) > 0.3)]
exons_obs_70_80 = PSI_tab.index[(PSI_tab.isna().mean(axis=1) <= 0.3) & (PSI_tab.isna().mean(axis=1) > 0.2)]
exons_obs_80_90 = PSI_tab.index[(PSI_tab.isna().mean(axis=1) <= 0.2) & (PSI_tab.isna().mean(axis=1) > 0.1)]
exons_obs_90_100 = PSI_tab.index[(PSI_tab.isna().mean(axis=1) <= 0.1)]
list1 = [exons_05_10, exons_10_20, exons_20_30, exons_30_40, exons_40_50]
list2 = [exons_obs_50_60, exons_obs_60_70, exons_obs_70_80, exons_obs_80_90, exons_obs_90_100]
exon_out_list = []
C_score_list = []
a = len(list1) * len(list2)
b = 0
for lista_1 in list1:
for lista_2 in list2:
combination = lista_1 & lista_2
b += 1
if len(combination) > 0:
print(str(b) + '/' + str(a))
exon_out, C_scores = make_mock_C_scores(norm_PSI, Ws, lista_1&lista_2, total_cells, mock=mock)
exon_out_list.append(exon_out)
C_score_list.append(C_scores)
psi_key = ['psi_05_10', 'psi_10_20', 'psi_20_30', 'psi_30_40', 'psi_40_50']
obs_key = ['<KEY>', '<KEY>', 'obs_70_80', 'obs_80_90', 'obs_90_100']
counter = 0
mock_dict = {}
mock_dict_df = pd.DataFrame()
for pk in psi_key:
obs_dict = {}
for ok in obs_key:
#fit_alpha, fit_loc, fit_beta = st.gamma.fit(C_score_list[counter])
#random_data = st.gamma.rvs(fit_alpha, loc=fit_loc, scale=fit_beta, size=1000000)
random_data = C_score_list[counter]
obs_dict.update({ok:random_data})
combined_key = pk + '-' + ok
mock_dict_df[combined_key] = list(random_data)
counter += 1
mock_dict.update({pk:obs_dict})
return mock_dict, mock_dict_df
#######################
def get_C_score_pval_gamma(PSI_tab, norm_PSI, Ws, exon_list, total_cells, mock_dict):
exon_out_list = []
C_list = []
p_list = []
for exon in tqdm(exon_list):
psi_mean = PSI_tab.loc[exon].mean()
obs_mean = PSI_tab.loc[exon].isna().mean()
exon_df = norm_PSI.loc[exon] # to make things faster
exon_score = get_C(exon_df, Ws)
if exon_score >= 0:
C_list.append(exon_score)
exon_out_list.append(exon)
if (np.abs(0.5 - psi_mean) > 0.4) and (np.abs(0.5 - psi_mean) <= 0.45):
pk = 'psi_05_10'
elif (np.abs(0.5 - psi_mean) > 0.3) and (np.abs(0.5 - psi_mean) <= 0.4):
pk = 'psi_10_20'
elif (np.abs(0.5 - psi_mean) > 0.2) and (np.abs(0.5 - psi_mean) <= 0.3):
pk = 'psi_20_30'
elif (np.abs(0.5 - psi_mean) > 0.1) and (np.abs(0.5 - psi_mean) <= 0.2):
pk = 'psi_30_40'
elif (np.abs(0.5 - psi_mean) <= 0.1):
pk = 'psi_40_50'
if (obs_mean <= 0.5) and (obs_mean > 0.4):
ok = 'obs_50_60'
elif (obs_mean <= 0.4) and (obs_mean > 0.3):
ok = 'obs_60_70'
elif (obs_mean <= 0.3) and (obs_mean > 0.2):
ok = 'obs_70_80'
elif (obs_mean <= 0.2) and (obs_mean > 0.1):
ok = 'obs_80_90'
elif (obs_mean <= 0.1):
ok = 'obs_90_100'
random_data = mock_dict[pk][ok]
x = np.sum(random_data > exon_score)
n = len(random_data)
pv = (x+1)/(n+1)
p_list.append(pv)
pval_df = pd.DataFrame()
pval_df['C_score'] = C_list
pval_df['pval'] = p_list
pval_df.index = exon_out_list
return pval_df
######################
print('')
print('#######################')
print('Working on Chen')
print('Total cells: ' + str(len(chen_pca_clust.index)))
print('')
k = round(np.sqrt(len(chen_pca_clust.index)))
k = int(np.max([k, 20]))
print('Working on weight matrix, k = ' + str(k) )
W_chen = get_distance_matrix(chen_pca_clust, k=k)
chen_norm_PSI = get_signature_matrix(chen_PSI)
int_genes, int_exons = spu.get_int_events(chen_PSI, chen_mrna_counts, 0.05)
observed_exons_1 = chen_PSI.index[chen_PSI[chen_pca_clust.index[chen_pca_clust.AC==0]].isna().mean(axis=1) <= (1-0.5)]
observed_exons_2 = chen_PSI.index[chen_PSI[chen_pca_clust.index[chen_pca_clust.AC==1]].isna().mean(axis=1) <= (1-0.5)]
observed_exons_3 = chen_PSI.index[chen_PSI[chen_pca_clust.index[chen_pca_clust.AC==2]].isna().mean(axis=1) <= (1-0.5)]
observed_exons_4 = chen_PSI.index[chen_PSI[chen_pca_clust.index[chen_pca_clust.AC==3]].isna().mean(axis=1) <= (1-0.5)]
observed_exons_5 = chen_PSI.index[chen_PSI[chen_pca_clust.index[chen_pca_clust.AC==4]].isna().mean(axis=1) <= (1-0.5)]
test_exons = []
for exon in int_exons:
exon_counts = 0
exon_counts += (exon in observed_exons_1)
exon_counts += (exon in observed_exons_2)
exon_counts += (exon in observed_exons_3)
exon_counts += (exon in observed_exons_4)
exon_counts += (exon in observed_exons_5)
if exon_counts >= 3:
test_exons.append(exon)
print('')
print('Shuffling exons')
chen_mock_dict, mdf = get_mock_dict(chen_PSI.loc[test_exons], chen_norm_PSI.loc[test_exons], W_chen, mock=1000000)
mdf.to_csv('autocorrelation_results/chen.mock_data.tab', sep='\t', header=True, index=True)
pgamma_chen = get_C_score_pval_gamma(chen_PSI.loc[test_exons], chen_norm_PSI.loc[test_exons],
W_chen, test_exons, 488, chen_mock_dict)
pgamma_chen.to_csv('autocorrelation_results/chen.autocorrelation.tab', sep='\t', header=True, index=True)
| [
"numpy.abs",
"single_cell_plots.get_bins_table2",
"sklearn.cluster.AgglomerativeClustering",
"sys.path.insert",
"pandas.read_csv",
"numpy.random.choice",
"tqdm.tqdm",
"numpy.max",
"numpy.exp",
"numpy.sum",
"splicing_utils.get_int_events",
"sklearn.neighbors.NearestNeighbors",
"pandas.DataFra... | [((353, 427), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/mnt/lareaulab/cfbuenabadn/sc_binary_splicing/utils/"""'], {}), "(0, '/mnt/lareaulab/cfbuenabadn/sc_binary_splicing/utils/')\n", (368, 427), False, 'import sys\n'), ((745, 846), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'chen/processed_tables/chen.skipped_exons_psi.tab')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_dir + 'chen/processed_tables/chen.skipped_exons_psi.tab',\n sep='\\t', index_col=0)\n", (756, 846), True, 'import pandas as pd\n'), ((880, 989), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'chen/processed_tables/chen.skipped_exons_SJreads.tab')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_dir +\n 'chen/processed_tables/chen.skipped_exons_SJreads.tab', sep='\\t',\n index_col=0)\n", (891, 989), True, 'import pandas as pd\n'), ((1016, 1112), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'chen/processed_tables/chen.mrna_counts.tab')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_dir + 'chen/processed_tables/chen.mrna_counts.tab', sep=\n '\\t', index_col=0)\n", (1027, 1112), True, 'import pandas as pd\n'), ((1155, 1264), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'chen/processed_tables/chen.mrna_counts_per_event.tab')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_dir +\n 'chen/processed_tables/chen.mrna_counts_per_event.tab', sep='\\t',\n index_col=0)\n", (1166, 1264), True, 'import pandas as pd\n'), ((1301, 1399), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'chen/processed_tables/chen.read_coverage.tab')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_dir + 'chen/processed_tables/chen.read_coverage.tab', sep=\n '\\t', index_col=0)\n", (1312, 1399), True, 'import pandas as pd\n'), ((1435, 1501), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'chen/chen.pca.tab')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_dir + 'chen/chen.pca.tab', sep='\\t', index_col=0)\n", (1446, 1501), True, 'import pandas as pd\n'), ((3542, 3579), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(5)'}), '(n_clusters=5)\n', (3565, 3579), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((4821, 4858), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(5)'}), '(n_clusters=5)\n', (4844, 4858), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((13208, 13260), 'splicing_utils.get_int_events', 'spu.get_int_events', (['chen_PSI', 'chen_mrna_counts', '(0.05)'], {}), '(chen_PSI, chen_mrna_counts, 0.05)\n', (13226, 13260), True, 'import splicing_utils as spu\n'), ((2388, 2442), 'splicing_utils.get_int_events', 'spu.get_int_events', (['psi[subpop]', 'mrna[subpop]', 'psi_min'], {}), '(psi[subpop], mrna[subpop], psi_min)\n', (2406, 2442), True, 'import splicing_utils as spu\n'), ((3216, 3301), 'single_cell_plots.get_bins_table2', 'scp.get_bins_table2', (['PSI_filtered[good_subpop]', 'mrna_filtered[good_subpop]', 'nbins'], {}), '(PSI_filtered[good_subpop], mrna_filtered[good_subpop],\n nbins)\n', (3235, 3301), True, 'import single_cell_plots as scp\n'), ((3345, 3418), 'single_cell_plots.get_bins_table', 'scp.get_bins_table', (['PSI_filtered[good_subpop]', 'mrna_filtered[good_subpop]'], {}), '(PSI_filtered[good_subpop], mrna_filtered[good_subpop])\n', (3363, 3418), True, 'import single_cell_plots as scp\n'), ((10251, 10265), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10263, 10265), True, 'import pandas as pd\n'), ((11077, 11092), 'tqdm.tqdm', 'tqdm', (['exon_list'], {}), '(exon_list)\n', (11081, 11092), False, 'from tqdm import tqdm\n'), ((12671, 12685), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12683, 12685), True, 'import pandas as pd\n'), ((13019, 13034), 'numpy.max', 'np.max', (['[k, 20]'], {}), '([k, 20])\n', (13025, 13034), True, 'import numpy as np\n'), ((6416, 6436), 'numpy.max', 'np.max', (['distances[i]'], {}), '(distances[i])\n', (6422, 6436), True, 'import numpy as np\n'), ((6075, 6106), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (6091, 6106), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((6570, 6598), 'numpy.exp', 'np.exp', (['(-d ** 2 / sigma ** 2)'], {}), '(-d ** 2 / sigma ** 2)\n', (6576, 6598), True, 'import numpy as np\n'), ((7048, 7102), 'numpy.random.choice', 'r.choice', (['norm_PSI.columns', 'total_cells'], {'replace': '(False)'}), '(norm_PSI.columns, total_cells, replace=False)\n', (7056, 7102), True, 'import numpy.random as r\n'), ((12506, 12538), 'numpy.sum', 'np.sum', (['(random_data > exon_score)'], {}), '(random_data > exon_score)\n', (12512, 12538), True, 'import numpy as np\n'), ((6967, 6989), 'numpy.random.choice', 'r.choice', (['exon_list', '(1)'], {}), '(exon_list, 1)\n', (6975, 6989), True, 'import numpy.random as r\n'), ((7126, 7174), 'pandas.DataFrame', 'pd.DataFrame', (['norm_PSI.loc[exon, scramble_cells]'], {}), '(norm_PSI.loc[exon, scramble_cells])\n', (7138, 7174), True, 'import pandas as pd\n'), ((11433, 11455), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11439, 11455), True, 'import numpy as np\n'), ((11468, 11490), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11474, 11490), True, 'import numpy as np\n'), ((11552, 11574), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11558, 11574), True, 'import numpy as np\n'), ((11587, 11609), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11593, 11609), True, 'import numpy as np\n'), ((11670, 11692), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11676, 11692), True, 'import numpy as np\n'), ((11705, 11727), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11711, 11727), True, 'import numpy as np\n'), ((11788, 11810), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11794, 11810), True, 'import numpy as np\n'), ((11823, 11845), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11829, 11845), True, 'import numpy as np\n'), ((11906, 11928), 'numpy.abs', 'np.abs', (['(0.5 - psi_mean)'], {}), '(0.5 - psi_mean)\n', (11912, 11928), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 12:43:36 2021
@author: ziyi
"""
import numpy as np
import json
import networkx as nx
import math
import torch
# CLS and DTW method copy from: https://github.com/aimagelab/perceive-transform-and-act/
class CLS(object):
""" Coverage weighted by length score (CLS).
Link to the original paper:
https://arxiv.org/abs/1905.12255
"""
def __init__(self, graph, weight='weight', threshold=3.0):
"""Initializes a CLS object.
Args:
graph: networkx graph for the environment.
weight: networkx edge weight key (str).
threshold: distance threshold $d_{th}$ (float).
"""
self.graph = graph
self.weight = weight
self.threshold = threshold
self.distance = dict(
nx.all_pairs_dijkstra_path_length(
self.graph, weight=self.weight))
def __call__(self, prediction, reference):
"""Computes the CLS metric.
Args:
prediction: list of nodes (str), path predicted by agent.
reference: list of nodes (str), the ground truth path.
Returns:
the CLS between the prediction and reference path (float).
"""
def length(nodes):
lens = []
for edge in zip(nodes[:-1], nodes[1:]):
try:
lens.append(self.graph.edges[edge].get(self.weight, 1.0))
except KeyError:
pass
return np.sum(lens)
coverage = np.mean([
np.exp(-np.min([ # pylint: disable=g-complex-comprehension
self.distance[u][v] for v in prediction
]) / self.threshold) for u in reference
])
expected = coverage * length(reference)
score = expected / (expected + np.abs(expected - length(prediction)))
return coverage * score
class DTW(object):
""" Dynamic Time Warping (DTW) evaluation metrics. """
def __init__(self, graph, weight='weight', threshold=3.0):
"""Initializes a DTW object.
Args:
graph: networkx graph for the environment.
weight: networkx edge weight key (str).
threshold: distance threshold $d_{th}$ (float).
"""
self.graph = graph
self.weight = weight
self.threshold = threshold
self.distance = dict(
nx.all_pairs_dijkstra_path_length(self.graph, weight=self.weight))
def __call__(self, prediction, reference, metric='sdtw'):
"""Computes DTW metrics.
Args:
prediction: list of nodes (str), path predicted by agent.
reference: list of nodes (str), the ground truth path.
metric: one of ['ndtw', 'sdtw', 'dtw'].
Returns:
the DTW between the prediction and reference path (float).
"""
assert metric in ['ndtw', 'sdtw', 'dtw']
dtw_matrix = np.inf * np.ones((len(prediction) + 1, len(reference) + 1))
dtw_matrix[0][0] = 0
for i in range(1, len(prediction)+1):
for j in range(1, len(reference)+1):
best_previous_cost = min(
dtw_matrix[i-1][j], dtw_matrix[i][j-1], dtw_matrix[i-1][j-1])
cost = self.distance[prediction[i-1]][reference[j-1]]
dtw_matrix[i][j] = cost + best_previous_cost
dtw = dtw_matrix[len(prediction)][len(reference)]
if metric == 'dtw':
return dtw
ndtw = np.exp(-dtw/(self.threshold * len(reference)))
if metric == 'ndtw':
return ndtw
success = self.distance[prediction[-1]][reference[-1]] <= self.threshold
return success * ndtw
def load_nav_graphs(scans):
''' Load connectivity graph for each scan '''
def distance(pose1, pose2):
''' Euclidean distance between two graph poses '''
return ((pose1['pose'][3] - pose2['pose'][3])**2
+ (pose1['pose'][7] - pose2['pose'][7])**2
+ (pose1['pose'][11] - pose2['pose'][11])**2)**0.5
graphs = {}
for scan in scans:
with open('../../connectivity/%s_connectivity.json' % scan) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i, item in enumerate(data):
if item['included']:
for j, conn in enumerate(item['unobstructed']):
if conn and data[j]['included']:
positions[item['image_id']] = np.array([item['pose'][3],
item['pose'][7], item['pose'][11]])
assert data[j]['unobstructed'][i], 'Graph should be undirected'
G.add_edge(
item['image_id'], data[j]['image_id'], weight=distance(item, data[j]))
nx.set_node_attributes(G, values=positions, name='position')
graphs[scan] = G
return graphs
def _load_nav_graphs(scans):
''' Load connectivity graph for each scan, useful for reasoning about shortest paths '''
print('Loading navigation graphs for %d scans' % len(scans))
graphs = load_nav_graphs(scans)
paths = {}
for scan, G in graphs.items(): # compute all shortest paths
paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
distances = {}
for scan, G in graphs.items(): # compute all shortest paths
distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
return distances
if __name__ == '__main__':
# Load json
with open('R2R_train_aug.json') as f:
data = json.load(f)[:500]
# =============================================================================
# # Load connectiviy graph
# scans = []
# for traj in data:
# if traj['scan'] not in scans:
# scans.append(traj['scan'])
# =============================================================================
scans = ['5q7pvUzZiYa']
graphs = load_nav_graphs(scans)
DTWs = {}
for scan in scans:
graph_i = graphs[scan]
DTWs[scan] = DTW(graph_i)
for i in range(len(data)):
scan = data[i]['scan']
if scan == scans[0]:
path_gt = data[i]['path']
viewpoint_st = path_gt[0]
viewpoint_end = path_gt[-1]
graph_i = graphs[scan]
length = len(path_gt)
all_path = nx.all_simple_paths(graph_i, source=viewpoint_st, target=viewpoint_end,cutoff=length + 5)
for path in all_path:
dtw_score = DTWs[scan](path, path_gt)
print(scan,[path_gt,path],dtw_score)
| [
"networkx.all_pairs_dijkstra_path",
"networkx.all_simple_paths",
"networkx.Graph",
"numpy.sum",
"numpy.array",
"networkx.set_node_attributes",
"numpy.min",
"json.load",
"networkx.all_pairs_dijkstra_path_length"
] | [((848, 913), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['self.graph'], {'weight': 'self.weight'}), '(self.graph, weight=self.weight)\n', (881, 913), True, 'import networkx as nx\n'), ((1539, 1551), 'numpy.sum', 'np.sum', (['lens'], {}), '(lens)\n', (1545, 1551), True, 'import numpy as np\n'), ((2432, 2497), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['self.graph'], {'weight': 'self.weight'}), '(self.graph, weight=self.weight)\n', (2465, 2497), True, 'import networkx as nx\n'), ((4223, 4233), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4231, 4233), True, 'import networkx as nx\n'), ((4280, 4292), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4289, 4292), False, 'import json\n'), ((4935, 4995), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G'], {'values': 'positions', 'name': '"""position"""'}), "(G, values=positions, name='position')\n", (4957, 4995), True, 'import networkx as nx\n'), ((5375, 5404), 'networkx.all_pairs_dijkstra_path', 'nx.all_pairs_dijkstra_path', (['G'], {}), '(G)\n', (5401, 5404), True, 'import networkx as nx\n'), ((5521, 5557), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['G'], {}), '(G)\n', (5554, 5557), True, 'import networkx as nx\n'), ((5683, 5695), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5692, 5695), False, 'import json\n'), ((6483, 6577), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['graph_i'], {'source': 'viewpoint_st', 'target': 'viewpoint_end', 'cutoff': '(length + 5)'}), '(graph_i, source=viewpoint_st, target=viewpoint_end,\n cutoff=length + 5)\n', (6502, 6577), True, 'import networkx as nx\n'), ((1602, 1651), 'numpy.min', 'np.min', (['[self.distance[u][v] for v in prediction]'], {}), '([self.distance[u][v] for v in prediction])\n', (1608, 1651), True, 'import numpy as np\n'), ((4557, 4619), 'numpy.array', 'np.array', (["[item['pose'][3], item['pose'][7], item['pose'][11]]"], {}), "([item['pose'][3], item['pose'][7], item['pose'][11]])\n", (4565, 4619), True, 'import numpy as np\n')] |
# anvil_mods.py
import pandas as pd
import numpy as np
import shapely
import geopandas as gpd
import quandl
from fred import Fred
# demo api key
quandl.ApiConfig.api_key = "<KEY>"
def formatIndicatorLikeQuandl(indicator, **kwargs):
"""
Uses the FRED module to access data not included
in QUANDL's dataset. Limits the returned FRED data
to only date and value fields supplied.
Accepts a FRED-formatted string for the desired
economic index (indicator).
Returns the formatted indicator as a pandas
DataFrame for downstream processing, or
an error message.
"""
try:
# set fred instance: demo API key
fr = Fred(api_key='<KEY>',response_type='df')
# get the index and limit to start_date=start_date, end_date=end_date
indicator = fr.series.observations(indicator).loc[:, ('date', 'value')]
# drop nans
indicator.dropna(inplace=True)
# convert str date to datetime
indicator['date'] = pd.to_datetime(indicator['date'])
# check if start and end dates are present
if kwargs:
# create date mask for only dates within period of dataset
date_mask = (indicator['date'] >= kwargs['start_date']) & (indicator['date'] <= kwargs['end_date'])
# filter
indicator = indicator[date_mask]
# set the index to the date for index processing downstream
indicator.set_index('date', drop=True, inplace=True)
# rename the year col for consistency
indicator.rename({'value':'Value'}, axis=1, inplace=True)
except Exception as e:
return e
# return the index
return indicator
def convertGeoJsonGeometry(data):
"""
Convert JSON features into shapely
geometry and then convert entire json data
object into geopandas dataframe.
Accepts a JSON data object.
Returns a geopandas geodataframe or an
error.
"""
try:
# convert features to shapes so it can be converted to GDF
for d in data['features']:
d['geometry'] = shapely.geometry.shape(d['geometry'])
# covnvert to geopandas
geoframe = gpd.GeoDataFrame(pd.io.json.json_normalize(data['features'])) # comes as a geojson feature collection
# replace prefix in column names
geoframe.columns = geoframe.columns.str.replace('properties.', '')
except Exception as e:
return e
return geoframe
def convertSpecGeoJsonGeometry(data, cols):
"""
Convert JSON features into shapely
geometry and then convert entire json data
object into geopandas dataframe.
Accepts a JSON data object as well as a
list of columns to create for the dataframe
from properties listed in the JSON object.
Returns a geopandas geodataframe or an
error.
"""
try:
# extract all data and put into single list
all_parcels = []
# for each feature in the data
for feature in data['features']:
# dict container
parcel = {}
# get the keys for the feature set
keys = feature.keys()
# loop through the keys
for key in keys:
if key == 'geometry':
# convert features to shapes so it can be converted to GDF
parcel[key] = shapely.geometry.shape(feature[key])
elif key == 'properties':
# for each desired column in the property set
for col in cols:
# get property name and append to parcel
parcel[col] = feature[key][col]
else: # skip any other keys
pass
# append entire parcel to all_parcels
all_parcels.append(parcel)
# covnvert to geopandas
geoframe = gpd.GeoDataFrame(all_parcels)
except Exception as e:
return e
return geoframe
def getPeriodicIndexMovement(indicator):
"""
Get the movement of the index (a nx1 DF) for each
year desired.
Accepts a pandas DataFrame, which is an index
of economic indicators.
Note that the column values 'Year' and 'Value' are
baked into QUANDL data. Will need to check for changes
in future. A tripwire assert is added in case the change
occurs.
Returns either a numpy float val or an error message.
"""
try:
# trip wire in case col values change in QUANDL
assert 'Value' in indicator.columns, 'getIndexMovement() Value column value has changed. Edit function definition and try again..'
# set the year of the non res const for grouping
indicator['Year'] = indicator.index.year
# group the years and get the sum of the differences for each year
indicator_mvt = indicator.groupby(['Year'])['Value'].apply(lambda x: x.diff().sum())
except Exception as e:
return e
return indicator_mvt
def getAnnualIndexMovement(indicator):
"""
Get the movement of the index (a nx1 DF) for each year desired
Accepts a pd.DataFrame, which is an index
of economic indicators.
Note that the column values 'Year' and 'Value' are
baked into QUANDL data. Will need to check for changes
in future. A tripwire fault is added in case the change
occurs.
Returns either a tuple of pd.DataFrames or an error message.
"""
try:
# trip wire in case col values change in QUANDL
assert 'Value' in indicator.columns, 'getIndexMovement() Value column value has changed. Edit function definition and try again..'
# group the years and get the sum of the differences for each year
indicator_mvt = indicator.diff(-1)
# convert index to only year for .get() lookup
indicator_mvt.index = indicator_mvt.index.year
except Exception as e:
return e
# return a series
return indicator_mvt.squeeze()
def spatialJoinFeatures(parcels, features):
"""
Spatially join each parcel with the feature dataset
by intersecting based on geometry.
Parcels is a geopandas dataframe. The columns in this
frame should only be [['buff_dist', 'parcel']].
Features is a geopandas dataframe. Contains only
geometry and feature names columns.
Returns the spaital join of the two input
geopandas dataframes. Resulting frame has
4 columns: geometry, feature name, parcel
name, and index_right.
"""
try:
assert isinstance(parcels, gpd.GeoDataFrame), 'spatialJoinAmmenities first argument must be a geodataframe. You passed an %r' % type(parcels)
assert isinstance(features, gpd.GeoDataFrame), 'spatialJoinAmmenities second argument must be a geodataframe. You passed an %r' % type(features)
# make a container
parcels_w_features = gpd.GeoDataFrame()
# chunk the data to make memory usage more efficient
for chunk in np.array_split(parcels, np.round(parcels.index.size/100)):
increment = 500
iter1 = 0
iter2 = increment
size = chunk.index.size
# convert chunk back to GeoDataFrame for sjoin operation
chunk = gpd.GeoDataFrame(chunk)
if 'buff_dist' in chunk.columns: # set the right geometry in case of buffer distance
chunk = chunk.set_geometry('buff_dist')
# iterate through each chunk
while iter1 < size:
# do remaining rows
if iter2 > size:
temp_df = gpd.tools.sjoin(chunk.iloc[iter1:], features)
# iterate through sequence iter1:iter2 to use memory more efficiently
else:
temp_df = gpd.tools.sjoin(chunk.iloc[iter1:iter2], features)
# save memory if empty
if temp_df.empty:
del(temp_df)
else: # combine parcels_w_features and temp_df
parcels_w_features = pd.concat([parcels_w_features, temp_df])
# free up memory
del(temp_df)
# increment iterators
iter1=iter2
iter2+=increment
# break loop when finished
if iter1 > size:
break
except Exception as e:
return e
# return the result w/o the index_right column added with concat
return parcels_w_features.drop('index_right', axis=1)
def getCountForSpatialJoin(search_parcels, record_parcels, search_col1, search_col2):
"""
Computes the number of times each parcel
appears in the results of a spatial join.
Accepts:
search_parcels, a pd.Series
record_parcels is a gpd.GeoDataFrame
search_col1 is the name to match
search_col2 is the address to match to ensure
duplicates are removed.
Returns a pandas Series of parcel counts indexed
by the parcel.
"""
try:
assert isinstance(search_col1, str), 'Param search_col1 should be type str. Got %r instead.' % type(search_col1)
assert isinstance(search_col2, str), 'Param search_col2 should be type str. Got %r instead.' % type(search_col2)
# temp container
counts = {}
# for each parcel
for parcel in search_parcels.unique():
# get unique values as pd.Series for feature names and count how many
# this will bring up non-unique parcels, so you must filter for unique again.
items = record_parcels[record_parcels['parcel'] == parcel].loc[:,(search_col1, search_col2)].drop_duplicates(search_col2)[search_col1]
# count the number of roads in each unique record
count = 0
for item in items:
# count how many items in this parcel in case of semicolon delimiter
splits = len(item.split(';'))
# if more than 1, increment count of roads by that number
if splits > 1:
count += splits
# if 1, increment count of roads by 1
else:
count += 1
# set this parcel's count entry
counts[parcel] = count
except Exception as e:
return e
# apply count to features nearby col in parcels_w_hist and return series
return pd.Series(counts)
def getDateIntvlByParcel(parcels):
"""
Calculate days between sales of each parcel.
parcels is a pd.DataFrame with two cols: parcels and
sale date.
Returns a pd.Series of pd.timedeltas to be assigned
to a new col in DataFrame.
"""
try:
assert isinstance(parcels, pd.DataFrame), 'getDateIntvlByParcel argument takes a pd.DataFrame. Got %r instead' % type(parcels)
assert all([i in parcels.columns for i in ['parcel', 'Sale Date']]), 'getDateIntvlByParcel parcels argument must contain parcel and Sale Date columns.'
# make container Series to hold timeintvls. Initialize with NaTs
timeintvls = pd.Series([pd.NaT for i in parcels.index])
# iterate through each unique parcel number
for parcel in parcels['parcel'].unique():
# get all the parcels in parcels_w_hist and sort by date Descending
search_df = parcels[parcels['parcel'] == parcel]['Sale Date'].sort_values(ascending=False)
# make sure the parcel has more than one sale to compare
if search_df.index.size > 1:
# calculate the time intervals between sales of each parcel
intervals = search_df - search_df.shift(-1)
# iterate through each interval and assign the timedelta to parcels_w_hist using index
for idx, interval in intervals.iteritems():
timeintvls.iloc[idx] = interval
except Exception as e:
return e
# return the time intervals as a pd.Series
return timeintvls
def getRecentIdxMovement(product, source):
"""
Computes the most recent year's movement in
the economic index data provided.
Accepts two string arguments where product
is the unique ID of the data product and
source is one of two main data sources,
Quandl or FRED.
Returns a numpy float64 rounded value of the
index's change for the current year. Note for
early year months (Jan-Mar), this function does
not yet account for early months or lack of
current year data.
"""
try:
# make sure a string was passed
assert isinstance(product, str), 'getLatestIdxStats takes a str argument. Got type %r instead.' % type(index)
assert isinstance(source, str), 'getLatestIdxStats takes a str argument. Got type %r instead.' % type(index)
# set the search date range
today = pd.to_datetime('today')
past = today - pd.Timedelta('730 days')
# check the source
if source == 'quandl':
# special products require more formatting
if product == 'FMAC/FIX30YR':
index = quandl.get(product, start_date=past, end_date=today).loc[:,'US Interest Rate']
# special products require more formatting
elif product == 'FMAC/HPI':
index = quandl.get(product, start_date=past, end_date=today).loc[:,'United States seasonaly adjusted']
# get the raw data
else:
index = quandl.get(product, start_date=past, end_date=today)
elif source == 'fred':
index = formatIndicatorLikeQuandl(product, **{'start_date':past, 'end_date':today})
else:
# if incorrect source, send a msg back to caller
raise ValueError('Got an unrecgonized source argument: %r. Should be either quandl or fred.' % source)
# check for empty frame...index no longer tracked or no longer available
if index.empty:
latest = 0.0
# format the raw data, sort descending, and select the first element
else:
# special products require more formatting
if product == 'FMAC/FIX30YR':
index = pd.DataFrame(index).rename({'US Interest Rate':'Value'}, axis=1)
latest = getPeriodicIndexMovement(index).sort_index(ascending=False).iloc[0]
elif product == 'FMAC/HPI':
index = pd.DataFrame(index).rename({'United States seasonaly adjusted':'Value'}, axis=1)
latest = getPeriodicIndexMovement(index).sort_index(ascending=False).iloc[0]
else:
latest = getPeriodicIndexMovement(index).sort_index(ascending=False).iloc[0]
except Exception as e:
print('An error occurred. Did you pass the correct product and source info?')
return e
# return a rounded figure
return np.round(latest, 3)
| [
"pandas.Series",
"pandas.DataFrame",
"numpy.round",
"pandas.Timedelta",
"pandas.io.json.json_normalize",
"fred.Fred",
"pandas.concat",
"quandl.get",
"shapely.geometry.shape",
"geopandas.GeoDataFrame",
"geopandas.tools.sjoin",
"pandas.to_datetime"
] | [((10649, 10666), 'pandas.Series', 'pd.Series', (['counts'], {}), '(counts)\n', (10658, 10666), True, 'import pandas as pd\n'), ((15218, 15237), 'numpy.round', 'np.round', (['latest', '(3)'], {}), '(latest, 3)\n', (15226, 15237), True, 'import numpy as np\n'), ((680, 721), 'fred.Fred', 'Fred', ([], {'api_key': '"""<KEY>"""', 'response_type': '"""df"""'}), "(api_key='<KEY>', response_type='df')\n", (684, 721), False, 'from fred import Fred\n'), ((1006, 1039), 'pandas.to_datetime', 'pd.to_datetime', (["indicator['date']"], {}), "(indicator['date'])\n", (1020, 1039), True, 'import pandas as pd\n'), ((3946, 3975), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['all_parcels'], {}), '(all_parcels)\n', (3962, 3975), True, 'import geopandas as gpd\n'), ((7031, 7049), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (7047, 7049), True, 'import geopandas as gpd\n'), ((11357, 11399), 'pandas.Series', 'pd.Series', (['[pd.NaT for i in parcels.index]'], {}), '([pd.NaT for i in parcels.index])\n', (11366, 11399), True, 'import pandas as pd\n'), ((13179, 13202), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (13193, 13202), True, 'import pandas as pd\n'), ((2106, 2143), 'shapely.geometry.shape', 'shapely.geometry.shape', (["d['geometry']"], {}), "(d['geometry'])\n", (2128, 2143), False, 'import shapely\n'), ((2216, 2259), 'pandas.io.json.json_normalize', 'pd.io.json.json_normalize', (["data['features']"], {}), "(data['features'])\n", (2241, 2259), True, 'import pandas as pd\n'), ((7156, 7190), 'numpy.round', 'np.round', (['(parcels.index.size / 100)'], {}), '(parcels.index.size / 100)\n', (7164, 7190), True, 'import numpy as np\n'), ((7422, 7445), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['chunk'], {}), '(chunk)\n', (7438, 7445), True, 'import geopandas as gpd\n'), ((13226, 13250), 'pandas.Timedelta', 'pd.Timedelta', (['"""730 days"""'], {}), "('730 days')\n", (13238, 13250), True, 'import pandas as pd\n'), ((3420, 3456), 'shapely.geometry.shape', 'shapely.geometry.shape', (['feature[key]'], {}), '(feature[key])\n', (3442, 3456), False, 'import shapely\n'), ((7785, 7830), 'geopandas.tools.sjoin', 'gpd.tools.sjoin', (['chunk.iloc[iter1:]', 'features'], {}), '(chunk.iloc[iter1:], features)\n', (7800, 7830), True, 'import geopandas as gpd\n'), ((7969, 8019), 'geopandas.tools.sjoin', 'gpd.tools.sjoin', (['chunk.iloc[iter1:iter2]', 'features'], {}), '(chunk.iloc[iter1:iter2], features)\n', (7984, 8019), True, 'import geopandas as gpd\n'), ((8230, 8270), 'pandas.concat', 'pd.concat', (['[parcels_w_features, temp_df]'], {}), '([parcels_w_features, temp_df])\n', (8239, 8270), True, 'import pandas as pd\n'), ((13803, 13855), 'quandl.get', 'quandl.get', (['product'], {'start_date': 'past', 'end_date': 'today'}), '(product, start_date=past, end_date=today)\n', (13813, 13855), False, 'import quandl\n'), ((13435, 13487), 'quandl.get', 'quandl.get', (['product'], {'start_date': 'past', 'end_date': 'today'}), '(product, start_date=past, end_date=today)\n', (13445, 13487), False, 'import quandl\n'), ((14524, 14543), 'pandas.DataFrame', 'pd.DataFrame', (['index'], {}), '(index)\n', (14536, 14543), True, 'import pandas as pd\n'), ((13634, 13686), 'quandl.get', 'quandl.get', (['product'], {'start_date': 'past', 'end_date': 'today'}), '(product, start_date=past, end_date=today)\n', (13644, 13686), False, 'import quandl\n'), ((14747, 14766), 'pandas.DataFrame', 'pd.DataFrame', (['index'], {}), '(index)\n', (14759, 14766), True, 'import pandas as pd\n')] |
# coding: utf-8
import argparse
import os
import pickle
import numpy as np
from supervised_model_common import *
def pickle_load(file):
with open(file, 'rb') as f:
return pickle.load(f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_prefix', type=str)
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of examples in each mini-batch')
parser.add_argument('--lr', '-l', type=float, default=0.001)
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--use_cudnn', '-c', type=int, default=0,
help='Use CuDNN if the value is 1')
args = parser.parse_args()
if not os.path.exists(args.out):
os.mkdir(args.out)
if args.use_cudnn == 0:
chainer.global_config.use_cudnn = 'never'
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
xp = cuda.cupy
else:
xp = np
print('Data reading...')
with open(args.data_prefix + '/train_data.dump', 'rb') as f:
train_data = pickle.load(f)
train_w1s, train_w2s, train_paths, train_labels = train_data
train_paths = np.array(train_paths)
with open(args.data_prefix + '/test_data.dump', 'rb') as f:
test_data = pickle.load(f)
test_w1s, test_w2s, test_paths, test_labels = test_data
with open(args.data_prefix + '/val_data.dump', 'rb') as f:
val_data = pickle.load(f)
val_w1s, val_w2s, val_paths, val_labels = val_data
with open(args.data_prefix + '/relations.txt', 'r') as f:
lines = f.read().strip().split('\n')
classes = {line.split('\t')[0]: int(line.split('\t')[1]) for line in lines}
n_classes = len(classes)
train_labels = np.array([classes[i] for i in train_labels])
test_labels = np.array([classes[i] for i in test_labels])
val_labels = np.array([classes[i] for i in val_labels])
val_w1s = xp.array(val_w1s, dtype=xp.int32)
val_w2s = xp.array(val_w2s, dtype=xp.int32)
val_paths = list(val_paths)
test_w1s = xp.array(test_w1s, dtype=xp.int32)
test_w2s = xp.array(test_w2s, dtype=xp.int32)
test_paths = list(test_paths)
print('Data are read!')
print('Model building...')
lemma_index = pickle_load('work/glove_index.dump')
pos_index = pickle_load('work/pos_index.dump')
dep_index = pickle_load('work/dep_index.dump')
dir_index = pickle_load('work/dir_index.dump')
lemma_embed = np.load('work/glove50.npy')
n_lemma = len(lemma_index)
n_pos = len(pos_index)
n_dep = len(dep_index)
n_dir = len(dir_index)
max_val_score = 0
dropout_rate = [0.0, 0.2, 0.4]
n_layers = [2]
f = open(args.out + '/log.txt', 'w')
f.close()
val_f = open(args.out + '/val_log.txt', 'w')
val_f.close()
test_f = open(args.out + '/test_score.txt', 'w')
test_f.close()
test_score = 0
for layer_num in n_layers:
for d_r in dropout_rate:
lstm = BaseLSTM(n_layers=layer_num, emb_size=60, n_units=60, dropout=0,
n_lemma_vocab=n_lemma, lemma_emb_size=50, lemma_embed_initial=lemma_embed,
n_pos_vocab=n_pos, pos_emb_size=4,
n_dep_vocab=n_dep, dep_emb_size=5,
n_dir_vocab=n_dir, dir_emb_size=1
)
path_encoder = Path_Encoder(lstm)
path_based = Path_Based(path_encoder, class_n=n_classes, dropout=d_r)
model = Classifier_Path_Based(path_based)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = optimizers.Adam(args.lr)
optimizer.setup(model)
n_train = len(train_w1s)
test_score = 0
c_val = 0
c_max_val_score = 0
e = 0
while c_val <= 7:
perm = np.random.permutation(n_train)
for i in range(0, n_train, args.batchsize):
c_w1s = xp.array(train_w1s[perm[i:i + args.batchsize]], dtype=xp.int32)
c_w2s = xp.array(train_w2s[perm[i:i + args.batchsize]], dtype=xp.int32)
c_paths = train_paths[perm[i:i + args.batchsize]]
c_labels = xp.array(train_labels[perm[i:i + args.batchsize]], dtype=xp.int32)
loss = model(c_paths, c_labels)
optimizer.target.cleargrads()
loss.backward()
optimizer.update()
cur_result = '# epoch = {}, minibatch = {}/{}, loss = {}'.format(e + 1,
int(i / args.batchsize) + 1,
int(n_train / args.batchsize) + 1,
loss.data
)
with open(args.out + '/log.txt', 'a') as f:
f.write('dropout: {} n_layer: {},'.format(str(d_r), str(layer_num)) + cur_result + '\n')
current_val_score = path_based.evaluate(val_paths, val_labels)
if current_val_score > c_max_val_score:
c_val = 0
c_max_val_score = current_val_score
c_val += 1
e += 1
with open(args.out + '/val_log.txt', 'a') as f:
f.write('{}\t{}'.format(str(d_r), str(layer_num)) + '\t' + str(current_val_score) + '\n')
if current_val_score > max_val_score:
max_val_score = current_val_score
serializers.save_npz(args.out + '/best.model', path_based)
test_score = path_based.evaluate(test_paths, test_labels)
with open(args.out + '/test_score.txt', 'a') as f:
f.write('dropout: {}, n_layers: {}\ttest_score: {}\n'.format(str(d_r), str(layer_num),
str(test_score)))
| [
"os.path.exists",
"argparse.ArgumentParser",
"pickle.load",
"numpy.array",
"os.mkdir",
"numpy.load",
"numpy.random.permutation"
] | [((246, 271), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (269, 271), False, 'import argparse\n'), ((1421, 1442), 'numpy.array', 'np.array', (['train_paths'], {}), '(train_paths)\n', (1429, 1442), True, 'import numpy as np\n'), ((1997, 2041), 'numpy.array', 'np.array', (['[classes[i] for i in train_labels]'], {}), '([classes[i] for i in train_labels])\n', (2005, 2041), True, 'import numpy as np\n'), ((2060, 2103), 'numpy.array', 'np.array', (['[classes[i] for i in test_labels]'], {}), '([classes[i] for i in test_labels])\n', (2068, 2103), True, 'import numpy as np\n'), ((2121, 2163), 'numpy.array', 'np.array', (['[classes[i] for i in val_labels]'], {}), '([classes[i] for i in val_labels])\n', (2129, 2163), True, 'import numpy as np\n'), ((2716, 2743), 'numpy.load', 'np.load', (['"""work/glove50.npy"""'], {}), "('work/glove50.npy')\n", (2723, 2743), True, 'import numpy as np\n'), ((188, 202), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (199, 202), False, 'import pickle\n'), ((945, 969), 'os.path.exists', 'os.path.exists', (['args.out'], {}), '(args.out)\n', (959, 969), False, 'import os\n'), ((979, 997), 'os.mkdir', 'os.mkdir', (['args.out'], {}), '(args.out)\n', (987, 997), False, 'import os\n'), ((1323, 1337), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1334, 1337), False, 'import pickle\n'), ((1528, 1542), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1539, 1542), False, 'import pickle\n'), ((1686, 1700), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1697, 1700), False, 'import pickle\n'), ((4203, 4233), 'numpy.random.permutation', 'np.random.permutation', (['n_train'], {}), '(n_train)\n', (4224, 4233), True, 'import numpy as np\n')] |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import base64
import os
import struct
import urllib.parse
import urllib.request
import zlib
import numpy as np
from . import BaseDriver
from .helper import guess_mime, array2pb, pb2array
class BaseConvertDriver(BaseDriver):
def __init__(self, target: str, override: bool = False, *args, **kwargs):
""" Set a target attribute of the document by another attribute
:param target: attribute to set
:param override: override the target value even when exits
:param args:
:param kwargs:
"""
super().__init__(*args, **kwargs)
self.override = override
self.target = target
def __call__(self, *args, **kwargs):
for d in self.docs:
if getattr(d, self.target) and not self.override:
continue
self.convert(d)
def convert(self, d):
raise NotImplementedError
class MIMEDriver(BaseConvertDriver):
"""Guessing the MIME type based on the doc content
Can be used before/after :class:`DocCraftDriver` to fill MIME type
"""
def __init__(self, target='mime', default_mime: str = 'application/octet-stream', *args, **kwargs):
"""
:param default_mime: for text documents without a specific subtype, text/plain should be used.
Similarly, for binary documents without a specific or known subtype, application/octet-stream should be used.
"""
super().__init__(target, *args, **kwargs)
self.default_mime = default_mime
self.buffer_sniff = False
try:
import magic
self.buffer_sniff = True
except (ImportError, ModuleNotFoundError):
self.logger.warning(f'can not sniff the MIME type '
f'MIME sniffing requires pip install "jina[http]" '
f'and brew install libmagic (Mac)/ apt-get install libmagic1 (Linux)')
def convert(self, d):
import mimetypes
m_type = d.mime_type
if m_type and (m_type not in mimetypes.types_map.values()):
m_type = mimetypes.guess_type(f'*.{m_type}')[0]
if not m_type: # for ClientInputType=PROTO, d_type could be empty
d_type = d.WhichOneof('content')
if d_type == 'buffer':
d_content = getattr(d, d_type)
if self.buffer_sniff:
try:
import magic
m_type = magic.from_buffer(d_content, mime=True)
except Exception as ex:
self.logger.warning(f'can not sniff the MIME type due to the exception {ex}')
if d.uri:
m_type = guess_mime(d.uri)
if m_type:
d.mime_type = m_type
else:
d.mime_type = self.default_mime
self.logger.warning(f'can not determine the MIME type, set to default {self.default_mime}')
class Buffer2NdArray(BaseConvertDriver):
"""Convert buffer to numpy array"""
def __init__(self, target='blob', *args, **kwargs):
super().__init__(target, *args, **kwargs)
def convert(self, d):
d.blob.CopyFrom(array2pb(np.frombuffer(d.buffer)))
class Blob2PngURI(BaseConvertDriver):
"""Simple DocCrafter used in :command:`jina hello-world`,
it reads ``buffer`` into base64 png and stored in ``uri``"""
def __init__(self, target='uri', width: int = 28, height: int = 28, *args, **kwargs):
super().__init__(target, *args, **kwargs)
self.width = width
self.height = height
def convert(self, d):
arr = pb2array(d.blob)
pixels = []
for p in arr[::-1]:
pixels.extend([255 - int(p), 255 - int(p), 255 - int(p), 255])
buf = bytearray(pixels)
# reverse the vertical line order and add null bytes at the start
width_byte_4 = self.width * 4
raw_data = b''.join(
b'\x00' + buf[span:span + width_byte_4]
for span in range((self.height - 1) * width_byte_4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack('!I', len(data)) +
chunk_head +
struct.pack('!I', 0xFFFFFFFF & zlib.crc32(chunk_head)))
png_bytes = b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack('!2I5B', self.width, self.height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
d.uri = 'data:image/png;base64,' + base64.b64encode(png_bytes).decode()
class URI2Buffer(BaseConvertDriver):
""" Convert local file path, remote URL doc to a buffer doc.
"""
def __init__(self, target='buffer', *args, **kwargs):
super().__init__(target, *args, **kwargs)
def convert(self, d):
if urllib.parse.urlparse(d.uri).scheme in {'http', 'https', 'data'}:
page = urllib.request.Request(d.uri, headers={'User-Agent': 'Mozilla/5.0'})
tmp = urllib.request.urlopen(page)
d.buffer = tmp.read()
elif os.path.exists(d.uri):
with open(d.uri, 'rb') as fp:
d.buffer = fp.read()
else:
raise FileNotFoundError(f'{d.uri} is not a URL or a valid local path')
class URI2DataURI(URI2Buffer):
def __init__(self, target='uri', charset: str = 'utf-8', base64: bool = False, *args, **kwargs):
""" Convert file path doc to data uri doc. Internally it first reads into buffer and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:param args:
:param kwargs:
"""
super().__init__(target, *args, **kwargs)
self.charset = charset
self.base64 = base64
def __call__(self, *args, **kwargs):
super().__call__()
for d in self.docs:
if d.uri and not self.override:
continue
if d.uri and urllib.parse.urlparse(d.uri).scheme == 'data':
pass
else:
d.uri = self.make_datauri(d.mime_type, d.buffer)
def make_datauri(self, mimetype, data, binary=True):
parts = ['data:', mimetype]
if self.charset is not None:
parts.extend([';charset=', self.charset])
if self.base64:
parts.append(';base64')
from base64 import encodebytes as encode64
if binary:
encoded_data = encode64(data).decode(self.charset).replace('\n', '').strip()
else:
encoded_data = encode64(data).strip()
else:
from urllib.parse import quote_from_bytes, quote
if binary:
encoded_data = quote_from_bytes(data)
else:
encoded_data = quote(data)
parts.extend([',', encoded_data])
return ''.join(parts)
class Buffer2URI(URI2DataURI):
"""Convert buffer to data URI"""
def convert(self, d):
if urllib.parse.urlparse(d.uri).scheme == 'data':
pass
else:
d.uri = self.make_datauri(d.mime_type, d.buffer)
class Text2URI(URI2DataURI):
"""Convert text to data URI"""
def convert(self, d):
d.uri = self.make_datauri(d.mime_type, d.text, binary=False)
class All2URI(Text2URI, Buffer2URI):
def convert(self, d):
if d.text:
Text2URI.convert(self, d)
elif d.buffer:
Buffer2URI.convert(self, d)
else:
raise NotImplementedError
| [
"os.path.exists",
"mimetypes.types_map.values",
"base64.encodebytes",
"magic.from_buffer",
"base64.b64encode",
"urllib.parse.quote",
"struct.pack",
"zlib.compress",
"urllib.parse.quote_from_bytes",
"zlib.crc32",
"mimetypes.guess_type",
"numpy.frombuffer"
] | [((5254, 5275), 'os.path.exists', 'os.path.exists', (['d.uri'], {}), '(d.uri)\n', (5268, 5275), False, 'import os\n'), ((2145, 2173), 'mimetypes.types_map.values', 'mimetypes.types_map.values', ([], {}), '()\n', (2171, 2173), False, 'import mimetypes\n'), ((2197, 2232), 'mimetypes.guess_type', 'mimetypes.guess_type', (['f"""*.{m_type}"""'], {}), "(f'*.{m_type}')\n", (2217, 2232), False, 'import mimetypes\n'), ((3288, 3311), 'numpy.frombuffer', 'np.frombuffer', (['d.buffer'], {}), '(d.buffer)\n', (3301, 3311), True, 'import numpy as np\n'), ((7161, 7183), 'urllib.parse.quote_from_bytes', 'quote_from_bytes', (['data'], {}), '(data)\n', (7177, 7183), False, 'from urllib.parse import quote_from_bytes, quote\n'), ((7233, 7244), 'urllib.parse.quote', 'quote', (['data'], {}), '(data)\n', (7238, 7244), False, 'from urllib.parse import quote_from_bytes, quote\n'), ((4508, 4568), 'struct.pack', 'struct.pack', (['"""!2I5B"""', 'self.width', 'self.height', '(8)', '(6)', '(0)', '(0)', '(0)'], {}), "('!2I5B', self.width, self.height, 8, 6, 0, 0, 0)\n", (4519, 4568), False, 'import struct\n'), ((4601, 4627), 'zlib.compress', 'zlib.compress', (['raw_data', '(9)'], {}), '(raw_data, 9)\n', (4614, 4627), False, 'import zlib\n'), ((4710, 4737), 'base64.b64encode', 'base64.b64encode', (['png_bytes'], {}), '(png_bytes)\n', (4726, 4737), False, 'import base64\n'), ((2572, 2611), 'magic.from_buffer', 'magic.from_buffer', (['d_content'], {'mime': '(True)'}), '(d_content, mime=True)\n', (2589, 2611), False, 'import magic\n'), ((4387, 4409), 'zlib.crc32', 'zlib.crc32', (['chunk_head'], {}), '(chunk_head)\n', (4397, 4409), False, 'import zlib\n'), ((7009, 7023), 'base64.encodebytes', 'encode64', (['data'], {}), '(data)\n', (7017, 7023), True, 'from base64 import encodebytes as encode64\n'), ((6898, 6912), 'base64.encodebytes', 'encode64', (['data'], {}), '(data)\n', (6906, 6912), True, 'from base64 import encodebytes as encode64\n')] |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from cifar10_data import data_iterator_cifar10
from micro_CNN import CNN_run, get_data_stats, show_arch
from args import get_micro_args
def main():
args = get_micro_args()
args.num_nodes = args.num_nodes - 2
if args.recommended_arch:
filename = args.recommended_arch
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
ext = nn.ext_utils.import_extension_module(args.context)
data_iterator = data_iterator_cifar10
tdata = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
mean_val_train, std_val_train, channel, img_height, img_width, num_class = get_data_stats(
tdata)
mean_val_valid, std_val_valid, _, _, _, _ = get_data_stats(vdata)
data_dict = {"train_data": (tdata, mean_val_train, std_val_train),
"valid_data": (vdata, mean_val_valid, std_val_valid),
"basic_info": (channel, img_height, img_width, num_class)}
check_arch = np.load(filename)
print("Train the model whose architecture is:")
show_arch(check_arch)
val_acc = CNN_run(args, check_arch.tolist(), data_dict,
with_train=True, after_search=True)
if __name__ == '__main__':
main()
| [
"nnabla.ext_utils.import_extension_module",
"nnabla.set_default_context",
"args.get_micro_args",
"micro_CNN.get_data_stats",
"nnabla.ext_utils.get_extension_context",
"micro_CNN.show_arch",
"numpy.load"
] | [((859, 875), 'args.get_micro_args', 'get_micro_args', ([], {}), '()\n', (873, 875), False, 'from args import get_micro_args\n'), ((1000, 1096), 'nnabla.ext_utils.get_extension_context', 'get_extension_context', (['args.context'], {'device_id': 'args.device_id', 'type_config': 'args.type_config'}), '(args.context, device_id=args.device_id, type_config=\n args.type_config)\n', (1021, 1096), False, 'from nnabla.ext_utils import get_extension_context\n'), ((1105, 1132), 'nnabla.set_default_context', 'nn.set_default_context', (['ctx'], {}), '(ctx)\n', (1127, 1132), True, 'import nnabla as nn\n'), ((1143, 1193), 'nnabla.ext_utils.import_extension_module', 'nn.ext_utils.import_extension_module', (['args.context'], {}), '(args.context)\n', (1179, 1193), True, 'import nnabla as nn\n'), ((1416, 1437), 'micro_CNN.get_data_stats', 'get_data_stats', (['tdata'], {}), '(tdata)\n', (1430, 1437), False, 'from micro_CNN import CNN_run, get_data_stats, show_arch\n'), ((1495, 1516), 'micro_CNN.get_data_stats', 'get_data_stats', (['vdata'], {}), '(vdata)\n', (1509, 1516), False, 'from micro_CNN import CNN_run, get_data_stats, show_arch\n'), ((1754, 1771), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1761, 1771), True, 'import numpy as np\n'), ((1828, 1849), 'micro_CNN.show_arch', 'show_arch', (['check_arch'], {}), '(check_arch)\n', (1837, 1849), False, 'from micro_CNN import CNN_run, get_data_stats, show_arch\n')] |
# Author: <NAME>
# License: Apache-2.0
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.validation import as_float_array
from xgboost import XGBClassifier, XGBRegressor
from .utils import encode_categories, get_inferred_categories_index, replace_categorical_values_back
__all__ = ['XGBImputer']
class XGBImputer(BaseEstimator, TransformerMixin):
'''
XGBImputer is an effort to implement the concepts of the MissForest
algorithm proposed by <NAME> and <NAME> in 2012,
but leveraging the robustness and predictive power of the XGBoost
algorithm released in 2014. It also aims to simplify the process of
imputing categorical values in a scikit-learn compatible way.
Parameters
----------
categorical_features_index : List[int], np.array[int], optional (default = [])
List or array of integers representing the index of categorical
features of the array being imputed.If no index of categorical
feature is informed, the algorithm will treat all features as numerical.
replace_categorical_values_back : bool, optional (default = False)
If set to True, the values of the imputed X will be replaced with
the initial categories back again. Otherwise, the categorical features
will be OrdinalEncoded and the imputed X will be a numpy array
containing only floats.
**kwargs:
Any keyword argument provided will be treated as XGBoost parameters
to be set.
Attributes
----------
encoded_categories : dict
Dictionary containing all column indexes of categorical features
and the respective OrdinalEncoded values used by the algorithm.
casted_as_string_categories : dict
Dictionary containing all column indexes of categorical featues
that needed to be casted as string to be processed.
By default, the sklearn's OrdinalEncoder cannot operate on arrays that
contains both floats and strings. Verifying this condition, XGBImputer
will treat all values of the specific feature as strings to be OrdinalEncoded.
If the parameter 'replace_categorical_values_back' is set to True, the feature's
dtype will be casted again to 'object' and the numeric like values will be casted
as floats when possible.
'''
def __init__(
self,
categorical_features_index=[],
replace_categorical_values_back=False,
**kwargs
):
self.categorical_features_index = np.array(categorical_features_index)
self.replace_categorical_values_back = replace_categorical_values_back
if kwargs:
self.kwargs = kwargs
else:
self.kwargs = {}
def fit(self, X, y=None):
return self
def transform(self, X):
if type(X) != np.ndarray:
X = np.array(X)
self.columns_index = np.arange(X.shape[1])
X, self.encoded_categories, self.casted_as_string_categories = encode_categories(X, self.categorical_features_index)
X = as_float_array(X)
self.isnan_array = np.isnan(X)
self.inferred_features_index = get_inferred_categories_index(self)
self.inferred_categorical_features_index = np.intersect1d(self.inferred_features_index, self.categorical_features_index)
self.numerical_features_index = np.setdiff1d(self.columns_index, self.categorical_features_index)
self.inferred_numerical_features_index = np.intersect1d(self.inferred_features_index, self.numerical_features_index)
mean_simple_imputer = SimpleImputer(strategy='mean')
X[:,self.numerical_features_index] = mean_simple_imputer.fit_transform(X[:,self.numerical_features_index])
mode_simple_imputer = SimpleImputer(strategy='most_frequent')
X[:,self.categorical_features_index] = mode_simple_imputer.fit_transform(X[:,self.categorical_features_index])
Ximp = X.copy()
iterations_counter = 1
gamma_inferred_categorical_features_old = np.inf
gamma_inferred_categorical_features_new = 0
gamma_inferred_numerical_features_old = np.inf
gamma_inferred_numerical_features_new = 0
while (gamma_inferred_categorical_features_new < gamma_inferred_categorical_features_old or gamma_inferred_numerical_features_new < gamma_inferred_numerical_features_old):
Ximp_old = Ximp.copy()
if iterations_counter > 1:
gamma_inferred_categorical_features_old = gamma_inferred_categorical_features_new
gamma_inferred_numerical_features_old = gamma_inferred_numerical_features_new
for column_index in self.inferred_features_index:
if column_index in self.categorical_features_index:
xgb = XGBClassifier(subsample=0.7, use_label_encoder=False, verbosity=0)
else:
xgb = XGBRegressor(subsample=0.7, verbosity=0)
if self.kwargs:
xgb.set_params(**self.kwargs)
X_obs = np.delete(Ximp, column_index, axis=1)[np.invert(self.isnan_array[:,column_index]),:]
y_obs = Ximp[np.invert(self.isnan_array[:,column_index]),column_index]
X_mis = np.delete(Ximp, column_index, axis=1)[self.isnan_array[:,column_index],:]
one_hot_encoded_features_index = np.hstack([self.categorical_features_index[self.categorical_features_index < column_index],self.categorical_features_index[self.categorical_features_index > column_index]-1])
ct = ColumnTransformer(transformers=[('one_hot_encoder', OneHotEncoder(handle_unknown='ignore'), one_hot_encoded_features_index)])
X_obs = ct.fit_transform(X_obs)
X_mis = ct.transform(X_mis)
xgb.fit(X_obs, y_obs)
y_mis = xgb.predict(X_mis)
Ximp[self.isnan_array[:,column_index],column_index] = y_mis
gamma_inferred_categorical_features_new = np.sum(Ximp[:,self.inferred_categorical_features_index] != Ximp_old[:,self.inferred_categorical_features_index])/self.inferred_categorical_features_index.size
gamma_inferred_numerical_features_new = np.sum((Ximp[:,self.inferred_numerical_features_index] - Ximp_old[:,self.inferred_numerical_features_index])**2)/np.sum((Ximp[:, self.inferred_numerical_features_index]) ** 2)
print(f'XGBImputer - Epoch: {iterations_counter} | Categorical gamma: {np.format_float_positional(gamma_inferred_categorical_features_old, precision=4)}/{np.format_float_positional(gamma_inferred_categorical_features_new, precision=4)} | Numerical gamma: {np.format_float_positional(gamma_inferred_numerical_features_old, precision=10)}/{np.format_float_positional(gamma_inferred_numerical_features_new, precision=10)}')
iterations_counter += 1
if self.replace_categorical_values_back:
Ximp = replace_categorical_values_back(Ximp, self.encoded_categories, self.casted_as_string_categories)
return Ximp
def fit_transform(self, X, y=None):
return self.fit(X).transform(X) | [
"numpy.intersect1d",
"sklearn.utils.validation.as_float_array",
"numpy.hstack",
"numpy.format_float_positional",
"numpy.delete",
"sklearn.preprocessing.OneHotEncoder",
"numpy.invert",
"numpy.array",
"numpy.sum",
"xgboost.XGBRegressor",
"numpy.isnan",
"numpy.setdiff1d",
"sklearn.impute.Simple... | [((2717, 2753), 'numpy.array', 'np.array', (['categorical_features_index'], {}), '(categorical_features_index)\n', (2725, 2753), True, 'import numpy as np\n'), ((3118, 3139), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (3127, 3139), True, 'import numpy as np\n'), ((3278, 3295), 'sklearn.utils.validation.as_float_array', 'as_float_array', (['X'], {}), '(X)\n', (3292, 3295), False, 'from sklearn.utils.validation import as_float_array\n'), ((3323, 3334), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (3331, 3334), True, 'import numpy as np\n'), ((3470, 3547), 'numpy.intersect1d', 'np.intersect1d', (['self.inferred_features_index', 'self.categorical_features_index'], {}), '(self.inferred_features_index, self.categorical_features_index)\n', (3484, 3547), True, 'import numpy as np\n'), ((3588, 3653), 'numpy.setdiff1d', 'np.setdiff1d', (['self.columns_index', 'self.categorical_features_index'], {}), '(self.columns_index, self.categorical_features_index)\n', (3600, 3653), True, 'import numpy as np\n'), ((3703, 3778), 'numpy.intersect1d', 'np.intersect1d', (['self.inferred_features_index', 'self.numerical_features_index'], {}), '(self.inferred_features_index, self.numerical_features_index)\n', (3717, 3778), True, 'import numpy as np\n'), ((3818, 3848), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (3831, 3848), False, 'from sklearn.impute import SimpleImputer\n'), ((4003, 4042), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (4016, 4042), False, 'from sklearn.impute import SimpleImputer\n'), ((3076, 3087), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3084, 3087), True, 'import numpy as np\n'), ((5672, 5858), 'numpy.hstack', 'np.hstack', (['[self.categorical_features_index[self.categorical_features_index <\n column_index], self.categorical_features_index[self.\n categorical_features_index > column_index] - 1]'], {}), '([self.categorical_features_index[self.categorical_features_index <\n column_index], self.categorical_features_index[self.\n categorical_features_index > column_index] - 1])\n', (5681, 5858), True, 'import numpy as np\n'), ((6302, 6420), 'numpy.sum', 'np.sum', (['(Ximp[:, self.inferred_categorical_features_index] != Ximp_old[:, self.\n inferred_categorical_features_index])'], {}), '(Ximp[:, self.inferred_categorical_features_index] != Ximp_old[:,\n self.inferred_categorical_features_index])\n', (6308, 6420), True, 'import numpy as np\n'), ((6513, 6634), 'numpy.sum', 'np.sum', (['((Ximp[:, self.inferred_numerical_features_index] - Ximp_old[:, self.\n inferred_numerical_features_index]) ** 2)'], {}), '((Ximp[:, self.inferred_numerical_features_index] - Ximp_old[:, self.\n inferred_numerical_features_index]) ** 2)\n', (6519, 6634), True, 'import numpy as np\n'), ((6626, 6686), 'numpy.sum', 'np.sum', (['(Ximp[:, self.inferred_numerical_features_index] ** 2)'], {}), '(Ximp[:, self.inferred_numerical_features_index] ** 2)\n', (6632, 6686), True, 'import numpy as np\n'), ((5056, 5122), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'subsample': '(0.7)', 'use_label_encoder': '(False)', 'verbosity': '(0)'}), '(subsample=0.7, use_label_encoder=False, verbosity=0)\n', (5069, 5122), False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((5171, 5211), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'subsample': '(0.7)', 'verbosity': '(0)'}), '(subsample=0.7, verbosity=0)\n', (5183, 5211), False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((5336, 5373), 'numpy.delete', 'np.delete', (['Ximp', 'column_index'], {'axis': '(1)'}), '(Ximp, column_index, axis=1)\n', (5345, 5373), True, 'import numpy as np\n'), ((5532, 5569), 'numpy.delete', 'np.delete', (['Ximp', 'column_index'], {'axis': '(1)'}), '(Ximp, column_index, axis=1)\n', (5541, 5569), True, 'import numpy as np\n'), ((5374, 5418), 'numpy.invert', 'np.invert', (['self.isnan_array[:, column_index]'], {}), '(self.isnan_array[:, column_index])\n', (5383, 5418), True, 'import numpy as np\n'), ((5450, 5494), 'numpy.invert', 'np.invert', (['self.isnan_array[:, column_index]'], {}), '(self.isnan_array[:, column_index])\n', (5459, 5494), True, 'import numpy as np\n'), ((6785, 6870), 'numpy.format_float_positional', 'np.format_float_positional', (['gamma_inferred_categorical_features_old'], {'precision': '(4)'}), '(gamma_inferred_categorical_features_old, precision=4\n )\n', (6811, 6870), True, 'import numpy as np\n'), ((6868, 6953), 'numpy.format_float_positional', 'np.format_float_positional', (['gamma_inferred_categorical_features_new'], {'precision': '(4)'}), '(gamma_inferred_categorical_features_new, precision=4\n )\n', (6894, 6953), True, 'import numpy as np\n'), ((6970, 7049), 'numpy.format_float_positional', 'np.format_float_positional', (['gamma_inferred_numerical_features_old'], {'precision': '(10)'}), '(gamma_inferred_numerical_features_old, precision=10)\n', (6996, 7049), True, 'import numpy as np\n'), ((7052, 7131), 'numpy.format_float_positional', 'np.format_float_positional', (['gamma_inferred_numerical_features_new'], {'precision': '(10)'}), '(gamma_inferred_numerical_features_new, precision=10)\n', (7078, 7131), True, 'import numpy as np\n'), ((5920, 5958), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5933, 5958), False, 'from sklearn.preprocessing import OneHotEncoder\n')] |
"""skeleton image used by SKNW and then by AmiGraph
This class had a lot of mess and has been refactored"""
import logging
from pathlib import Path
import numpy as np
import networkx as nx
import sknw # must pip install sknw
import os
import matplotlib.pyplot as plt
from skimage import io
# local
from ..pyimage.ami_image import AmiImage
from ..pyimage.ami_util import AmiUtil
from ..pyimage.bbox import BBox
from ..pyimage.flood_fill import FloodFill
from ..pyimage.ami_graph_all import AmiGraph
class AmiSkeleton:
"""manages workflow from file to plot.
creates:
* binary
* skeleton
* sknw nodes and edges
* networkx graph (often called nx_graph)
* plots
May need rationalizatiom with AmiGraph
"""
# NODE_PTS = "pts"
# CENTROID = "o"
logger = logging.getLogger("ami_skeleton")
def __init__(self, plot_plot=False, title=None):
self.skeleton_image = None
self.binary = None
self.nx_graph = None
self.edge_xy_list = []
self.node_xy = []
self.nodes = []
self.image = None
self.path = None
self.new_binary = None
self.interactive = False
self.title = title
self.plot_plot = plot_plot
self.islands = None
self.bboxes = None
self.thresh = None
#
self.ami_graph = None
self.node_dict = {}
self.edge_dict = {}
@classmethod
def binarize_skeletonize_sknw_nx_graph_plot_TEST(cls, path, plot_plot=True):
"""
Creates skeleton and nx_graph and plots it
:param path:
:param plot_plot:
:return: AmiSkeleton
"""
assert path is not None
path = Path(path)
skeleton_image = AmiImage.create_white_skeleton_from_file(path)
# build graph from skeleton
nx_graph = AmiGraph.create_nx_graph_from_skeleton(skeleton_image)
if plot_plot:
self.plot_nx_graph_NX(nx_graph)
return skeleton_image
def create_nx_graph_via_skeleton_sknw_NX_GRAPH(self, path):
"""
Creates a nx_graph
does it need a path?
:param path:
:return: AmiSkeleton
"""
assert path is not None
path = Path(path)
self.skeleton_image = AmiImage.create_white_skeleton_from_file(path)
io.imshow(self.skeleton_image)
io.show()
assert self.skeleton_image is not None
# build graph from skeleton
nx_graph = AmiGraph.create_nx_graph_from_skeleton(self.skeleton_image)
return nx_graph
def plot_nx_graph_NX(self, nx_graph, title="skeleton"):
"""
:param nx_graph:
:param title:
:return: None
"""
"""
requires that nx_graph has been created
graph.node[id]['pts'] : Numpy(x, n), coordinates of nodes points
graph.node[id]['o']: Numpy(n), centried of the node
graph.edge(id1, id2)['pts']: Numpy(x, n), sequence of the edge point
graph.edge(id1, id2)['weight']: float, length of this edge """
assert nx_graph is not None
AmiSkeleton.get_coords_for_nodes_and_edges_from_nx_graph_GRAPH(nx_graph)
self.plot_edges_nodes_and_title_GRAPH(title)
return None
def plot_edges_nodes_and_title_GRAPH(self, title, plot_plot=True):
"""
Requires nodes and edges to have been created
:param title:
:param plot_plot:
:return:
"""
for edge_xy in self.edge_xy_list:
plt.plot(edge_xy[:, 1], np.negative(edge_xy[:, 0]), 'green')
# draw node by small circle (".")
plt.plot(self.node_xy[:, 1], np.negative(self.node_xy[:, 0]), 'r.')
# title and show
plt.title(title)
if plot_plot:
plt.show()
path = Path(Path(__file__).parent.parent, "temp/figs")
if not path.exists():
path.mkdir()
fig = Path(path, f"{title}.png")
if fig.exists():
os.remove(fig)
plt.savefig(fig, format="png")
if self.interactive:
plt.show()
@classmethod
def get_coords_for_nodes_and_edges_from_nx_graph_GRAPH(cls, nx_graph):
"""
creates nodes and edges from networkx graph
:return: Node
"""
assert nx_graph is not None
nodes = nx_graph.nodes()
node_xy = np.array([nodes[i][AmiSkeleton.CENTROID] for i in nodes])
# edges by pts (s(tart),e(nd)) appear to be the nodes on each edge
edge_xy_list = []
for (s, e) in nx_graph.edges():
edge_xy = nx_graph[s][e][AmiSkeleton.NODE_PTS]
edge_xy_list.append(edge_xy)
return node_xy, edge_xy_list
def extract_bbox_for_nodes_ISLAND(self, ami_island):
from pyimage import AmiIsland
"""
gets bounding box for a list of nodes in
requires nodes to have been created
:param ami_island:
:return: bounding box ((xmin, xmax), (ymin, ymax))
"""
assert ami_island is not None
assert type(ami_island) is AmiIsland, f"expected {AmiIsland} found {type(ami_island)}"
node_xy = self.extract_coords_for_nodes_ISLAND(ami_island)
# print ("node_xy...", node_xy)
xx = node_xy[:, 0]
yy = node_xy[:, 1]
xmin = int(np.min(xx))
xmax = int(np.max(xx))
ymin = int(np.min(yy))
ymax = int(np.max(yy))
bbox = BBox(((xmin, xmax), (ymin, ymax)))
return bbox
def extract_coords_for_nodes_ISLAND(self, ami_island):
"""
gets coordinates for a set of nx_graph nodes
*** NOTE it seems the sknw output has y,x rather than x,y ***
:param ami_island: normally ints but I suppose could be other
:return: node_xy as [npoints, 2] ndarray
"""
assert ami_island is not None
assert type(ami_island) is AmiIsland, f"expected {AmiIsland} found {type(ami_island)}"
npoints = len(ami_island)
node_xy = np.empty([0, 2], dtype=float)
for isd in ami_island:
centroid = self.extract_coords_for_node_NX_GRAPH_CLS(isd)
node_xy = np.append(node_xy, centroid)
node_xy = np.reshape(node_xy, (npoints, 2))
return node_xy
def create_bbox_for_island_ISLAND(self, island):
bbox0 = self.extract_bbox_for_nodes_ISLAND(island)
bbox = BBox(bbox0)
return bbox
def read_image_plot_component(self, component_index, image):
"""
Convenience method to read imag, get components and plot given one
:param component_index:
:param image:
:return:
"""
nx_graph = self.create_nx_graph_via_skeleton_sknw_NX_GRAPH(image)
# self.get_coords_for_nodes_and_edges_from_nx_graph_GRAPH(nx_graph)
# ami_graph = AmiGraph.
# TODO needs AmiGraph adding
islands = self.get_ami_islands_from_nx_graph_GRAPH()
island = islands[component_index]
self.plot_island_ISLAND(island)
def plot_island_ISLAND(self, component):
"""
Plots a given component
:param component:
:return:
"""
start_node_index = list(component)[0] # take first node
start_node = self.nodes[start_node_index]
start_pixel = start_node[self.NODE_PTS][0] # may be a list of xy for a complex node always pick first
flooder = FloodFill()
pixels = flooder.flood_fill(self.binary, start_pixel)
if self.interactive:
flooder.plot_used_pixels()
def create_and_plot_all_components_TEST(self, path, min_size=None):
"""
:param path:
:param min_size:
:return:
"""
if min_size is None:
min_size = [30, 30]
self.create_nx_graph_via_skeleton_sknw_NX_GRAPH(path)
nodes_xy, edges_xy = self.get_coords_for_nodes_and_edges_from_nx_graph_GRAPH(self.nx_graph)
components = self.get_ami_islands_from_nx_graph_GRAPH()
assert self.nx_graph is not None
self.islands = self.get_ami_islands_from_nx_graph_GRAPH()
bboxes = self.islands
for component, bbox in zip(components, bboxes):
w, h = AmiSkeleton.get_width_height_BBOX(bbox)
if min_size[0] < w or min_size[1] < h:
self.plot_island_ISLAND(component)
def get_ami_islands_from_image_OBSOLETE(self, image):
"""
read image, calculate islands
:param image:
:return: list of islands in arbitrary order
"""
nx_graph = self.create_nx_graph_via_skeleton_sknw_NX_GRAPH(image)
self.get_coords_for_nodes_and_edges_from_nx_graph_GRAPH(nx_graph)
return self.get_ami_islands_from_nx_graph_GRAPH()
| [
"logging.getLogger",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"pathlib.Path",
"skimage.io.show",
"numpy.min",
"numpy.max",
"os.remove",
"numpy.array",
"numpy.negative",
"numpy.append",
"numpy.empty",
"skimage.io.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((803, 836), 'logging.getLogger', 'logging.getLogger', (['"""ami_skeleton"""'], {}), "('ami_skeleton')\n", (820, 836), False, 'import logging\n'), ((1716, 1726), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1720, 1726), False, 'from pathlib import Path\n'), ((2247, 2257), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2251, 2257), False, 'from pathlib import Path\n'), ((2343, 2373), 'skimage.io.imshow', 'io.imshow', (['self.skeleton_image'], {}), '(self.skeleton_image)\n', (2352, 2373), False, 'from skimage import io\n'), ((2382, 2391), 'skimage.io.show', 'io.show', ([], {}), '()\n', (2389, 2391), False, 'from skimage import io\n'), ((3720, 3736), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3729, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3915, 3941), 'pathlib.Path', 'Path', (['path', 'f"""{title}.png"""'], {}), "(path, f'{title}.png')\n", (3919, 3941), False, 'from pathlib import Path\n'), ((4002, 4032), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig'], {'format': '"""png"""'}), "(fig, format='png')\n", (4013, 4032), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4420), 'numpy.array', 'np.array', (['[nodes[i][AmiSkeleton.CENTROID] for i in nodes]'], {}), '([nodes[i][AmiSkeleton.CENTROID] for i in nodes])\n', (4371, 4420), True, 'import numpy as np\n'), ((5999, 6028), 'numpy.empty', 'np.empty', (['[0, 2]'], {'dtype': 'float'}), '([0, 2], dtype=float)\n', (6007, 6028), True, 'import numpy as np\n'), ((6199, 6232), 'numpy.reshape', 'np.reshape', (['node_xy', '(npoints, 2)'], {}), '(node_xy, (npoints, 2))\n', (6209, 6232), True, 'import numpy as np\n'), ((3648, 3679), 'numpy.negative', 'np.negative', (['self.node_xy[:, 0]'], {}), '(self.node_xy[:, 0])\n', (3659, 3679), True, 'import numpy as np\n'), ((3771, 3781), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3779, 3781), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3993), 'os.remove', 'os.remove', (['fig'], {}), '(fig)\n', (3988, 3993), False, 'import os\n'), ((4074, 4084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4082, 4084), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5322), 'numpy.min', 'np.min', (['xx'], {}), '(xx)\n', (5318, 5322), True, 'import numpy as np\n'), ((5343, 5353), 'numpy.max', 'np.max', (['xx'], {}), '(xx)\n', (5349, 5353), True, 'import numpy as np\n'), ((5374, 5384), 'numpy.min', 'np.min', (['yy'], {}), '(yy)\n', (5380, 5384), True, 'import numpy as np\n'), ((5405, 5415), 'numpy.max', 'np.max', (['yy'], {}), '(yy)\n', (5411, 5415), True, 'import numpy as np\n'), ((6152, 6180), 'numpy.append', 'np.append', (['node_xy', 'centroid'], {}), '(node_xy, centroid)\n', (6161, 6180), True, 'import numpy as np\n'), ((3532, 3558), 'numpy.negative', 'np.negative', (['edge_xy[:, 0]'], {}), '(edge_xy[:, 0])\n', (3543, 3558), True, 'import numpy as np\n'), ((3803, 3817), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3807, 3817), False, 'from pathlib import Path\n')] |
import pytest
import numpy as np
from encomp.units import Q
from encomp.fluids import Fluid, HumidAir, Water
from encomp.utypes import Density
def test_Fluid():
fld = Fluid('R123', P=Q(2, 'bar'), T=Q(25, '°C'))
assert fld.get('S') == Q(1087.7758824621442, 'J/(K kg)')
assert fld.D == fld.get('D')
water = Fluid('water', P=Q(2, 'bar'), T=Q(25, '°C'))
assert water.T.u == Q.get_unit('degC')
assert water.T.m == 25
HumidAir(T=Q(25, 'degC'), P=Q(125, 'kPa'), R=Q(0.2, 'dimensionless'))
Water(P=Q(1, 'bar'), Q=Q(0.9, ''))
Water(P=Q(1, 'bar'), T=Q(0.9, 'degC'))
Water(T=Q(1, 'bar'), Q=Q(0.9, ''))
with pytest.raises(Exception):
# cannot fix all of P, T, Q
Water(P=Q(1, 'bar'), T=Q(150, 'degC'), Q=(0.4, ''))
# incorrect argument name
Water(T=Q(1, 'bar'), P=Q(9, 'degC'))
Fluid('water', T=Q([25, 95], 'C'), P=Q([1, 2], 'bar')).H
Fluid('water', T=Q([25, np.nan], 'C'), P=Q([1, 2], 'bar')).H
Fluid('water', T=Q([np.nan, np.nan], 'C'), P=Q([1, 2], 'bar')).H
Fluid('water', T=Q([np.nan, np.nan], 'C'), P=Q([np.nan, np.nan], 'bar')).H
Fluid('water', T=Q(23, 'C'), P=Q([1, 2], 'bar')).H
Fluid('water', T=Q(23, 'C'), P=Q([1], 'bar')).H
Fluid('water', T=Q([23, 25], 'C'), P=Q([1], 'bar')).H
Fluid('water', T=Q([23, 25], 'C'), P=Q(np.nan, 'bar')).H
Fluid('water', T=Q([23, 25], 'C'), P=Q([1, np.nan], 'bar')).H
Water(T=Q([25, 25, 63], 'C'), Q=Q([np.nan, np.nan, 0.4], '')).H
Water(T=Q([25, np.nan, 63], 'C'), Q=Q([np.nan, 0.2, 0.5], '')).H
Water(T=Q([25, np.nan, np.nan], 'C'), Q=Q([np.nan, 0.2, np.nan], '')).H
# returns empty array (not nan)
ret = Fluid('water', T=Q([], 'C'), P=Q([], 'bar')).H.m
assert isinstance(ret, np.ndarray) and ret.size == 0
ret = Fluid('water', T=Q([], 'C'), P=Q((), 'bar')).H.m
assert isinstance(ret, np.ndarray) and ret.size == 0
ret = Fluid('water', T=Q([], 'C'), P=Q(np.array([]), 'bar')).H.m
assert isinstance(ret, np.ndarray) and ret.size == 0
# returns single float (not 1-element list)
assert isinstance(Fluid('water', T=Q([23], 'C'), P=Q([1], 'bar')).H.m,
float)
assert isinstance(Fluid('water', T=Q(23, 'C'), P=Q([1], 'bar')).H.m,
float)
assert isinstance(Fluid('water', T=Q([23], 'C'), P=Q(1, 'bar')).H.m,
float)
with pytest.raises(ValueError):
Fluid('water', T=Q([np.nan, np.nan], 'C'),
P=Q([np.nan, np.nan, np.nan], 'bar')).H
Fluid('water', T=Q([np.nan, np.nan], 'C'), P=Q([], 'bar')).H
def test_Water():
water_single = Water(
T=Q(25, '°C'),
P=Q(5, 'bar')
)
repr(water_single)
water_multi = Water(
T=Q(np.linspace(25, 50), '°C'),
P=Q(5, 'bar')
)
repr(water_multi)
water_mixed_phase = Water(
T=Q(np.linspace(25, 500, 10), '°C'),
P=Q(np.linspace(0.5, 10, 10), 'bar')
)
repr(water_mixed_phase)
with pytest.raises(Exception):
# mismatching sizes
# must access an attribute before it's actually evaluated
Water(
T=Q(np.linspace(25, 500, 10), '°C'),
P=Q(np.linspace(0.5, 10, 50), 'bar')
).P
def test_HumidAir():
T = Q(20, 'C')
P = Q(20, 'bar')
R = Q(20, '%')
ha = HumidAir(T=T, P=P, R=R)
ha.V
T = Q([25, 34], 'C')
P = Q(20, 'bar')
R = Q(20, '%')
ha = HumidAir(T=T, P=P, R=R)
ha.V
T = Q([25, 34], 'C')
P = Q([20, 30], 'bar')
R = Q([20, 40], '%')
ha = HumidAir(T=T, P=P, R=R)
ha.V
T = Q([25, 34], 'C')
P = Q([20, 30], 'bar')
R = Q([20, np.nan], '%')
ha = HumidAir(T=T, P=P, R=R)
ha.V
T = Q([np.nan, 34], 'C')
P = Q([np.nan, 30], 'bar')
R = Q([20, np.nan], '%')
ha = HumidAir(T=T, P=P, R=R)
ha.V
T = Q([20, 40], 'C')
P = Q([20, 1], 'bar')
R = Q([20, 101], '%')
ha = HumidAir(T=T, P=P, R=R)
val = ha.V.m
assert not np.isnan(val[0])
assert np.isnan(val[1])
def test_shapes():
N = 16
T = Q(np.linspace(50, 60, N).reshape(4, 4), 'C')
P = Q(np.linspace(2, 4, N).reshape(4, 4), 'bar')
water = Fluid('water', T=T, P=P)
assert water.D.m.shape == P.m.shape
assert water.D.m.shape == T.m.shape
N = 27
T = Q(np.linspace(50, 60, N).reshape(3, 3, 3), 'C')
P = Q(np.linspace(2, 4, N).reshape(3, 3, 3), 'bar')
water = Fluid('water', T=T, P=P)
assert water.D.m.shape == P.m.shape
assert water.D.m.shape == T.m.shape
def test_invalid_areas():
N = 10
T = Q(np.linspace(-100, -50, N), 'K')
P = Q(np.linspace(-1, -2, N), 'bar')
water = Fluid('water', T=T, P=P)
assert water.D.check(Density)
assert isinstance(water.D.m, np.ndarray)
T = Q(np.linspace(-100, 300, N), 'K')
P = Q(np.linspace(-1, 2, N), 'bar')
water = Fluid('water', T=T, P=P)
assert water.D.check(Density)
assert isinstance(water.D.m, np.ndarray)
assert np.isnan(water.D.m[0])
assert not np.isnan(water.D.m[-1])
arr1 = np.linspace(-100, 400, N)
arr2 = np.linspace(-1, 2, N)
arr1[-2] = np.nan
arr2[-1] = np.nan
arr2[-3] = np.nan
T = Q(arr1, 'K')
P = Q(arr2, 'bar')
water = Fluid('water', T=T, P=P)
assert water.D.m.size == N
def test_properties_Fluid():
props = Fluid.ALL_PROPERTIES
fluid_names = ['water', 'methane', 'R134a']
Ts = [
25, 0, -1, -100, np.nan,
[25, 30], [np.nan, 25], [np.nan, np.nan], [np.inf, np.nan],
np.linspace(0, 10, 10), np.linspace(-10, 10, 10)
]
Ps = [
1, 0, -1, -100, np.nan,
[3, 4], [np.nan, 3], [np.nan, np.nan], [np.inf, np.nan],
np.linspace(0, 10, 10), np.linspace(-10, 10, 10)
]
for fluid_name in fluid_names:
for T, P in zip(Ts, Ps):
fluid = Fluid(fluid_name, T=Q(T, 'C'), P=Q(P, 'bar'))
repr(fluid)
for p in props:
getattr(fluid, p)
def test_properties_HumidAir():
props = HumidAir.ALL_PROPERTIES
Ts = [
25, 0, -1, -100, np.nan,
[25, 30], [np.nan, 25], [np.nan, np.nan], [np.inf, np.nan],
np.linspace(0, 10, 10), np.linspace(-10, 10, 10)
]
Ps = [
1, 0, -1, -100, np.nan,
[3, 4], [np.nan, 3], [np.nan, np.nan], [np.inf, np.nan],
np.linspace(0, 10, 10), np.linspace(-10, 10, 10)
]
Rs = [
0.5, 0.1, -1, -100, np.nan, -0.5, 0.00001, -0.0001, 0.99999, 1, 1.00001,
[0.3, 0.4], [np.nan, 0.3], [np.nan, np.nan], [np.inf, np.nan],
np.linspace(0, 1, 10), np.linspace(-0.5, 0.5, 10)
]
for T, P, R in zip(Ts, Ps, Rs):
ha = HumidAir(T=Q(T, 'C'), P=Q(P, 'bar'), R=Q(R))
repr(ha)
for p in props:
getattr(ha, p)
| [
"encomp.units.Q.get_unit",
"encomp.fluids.Fluid",
"encomp.fluids.HumidAir",
"encomp.units.Q",
"numpy.array",
"numpy.linspace",
"numpy.isnan",
"pytest.raises"
] | [((3281, 3291), 'encomp.units.Q', 'Q', (['(20)', '"""C"""'], {}), "(20, 'C')\n", (3282, 3291), False, 'from encomp.units import Q\n'), ((3300, 3312), 'encomp.units.Q', 'Q', (['(20)', '"""bar"""'], {}), "(20, 'bar')\n", (3301, 3312), False, 'from encomp.units import Q\n'), ((3321, 3331), 'encomp.units.Q', 'Q', (['(20)', '"""%"""'], {}), "(20, '%')\n", (3322, 3331), False, 'from encomp.units import Q\n'), ((3342, 3365), 'encomp.fluids.HumidAir', 'HumidAir', ([], {'T': 'T', 'P': 'P', 'R': 'R'}), '(T=T, P=P, R=R)\n', (3350, 3365), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((3384, 3400), 'encomp.units.Q', 'Q', (['[25, 34]', '"""C"""'], {}), "([25, 34], 'C')\n", (3385, 3400), False, 'from encomp.units import Q\n'), ((3409, 3421), 'encomp.units.Q', 'Q', (['(20)', '"""bar"""'], {}), "(20, 'bar')\n", (3410, 3421), False, 'from encomp.units import Q\n'), ((3430, 3440), 'encomp.units.Q', 'Q', (['(20)', '"""%"""'], {}), "(20, '%')\n", (3431, 3440), False, 'from encomp.units import Q\n'), ((3451, 3474), 'encomp.fluids.HumidAir', 'HumidAir', ([], {'T': 'T', 'P': 'P', 'R': 'R'}), '(T=T, P=P, R=R)\n', (3459, 3474), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((3493, 3509), 'encomp.units.Q', 'Q', (['[25, 34]', '"""C"""'], {}), "([25, 34], 'C')\n", (3494, 3509), False, 'from encomp.units import Q\n'), ((3518, 3536), 'encomp.units.Q', 'Q', (['[20, 30]', '"""bar"""'], {}), "([20, 30], 'bar')\n", (3519, 3536), False, 'from encomp.units import Q\n'), ((3545, 3561), 'encomp.units.Q', 'Q', (['[20, 40]', '"""%"""'], {}), "([20, 40], '%')\n", (3546, 3561), False, 'from encomp.units import Q\n'), ((3572, 3595), 'encomp.fluids.HumidAir', 'HumidAir', ([], {'T': 'T', 'P': 'P', 'R': 'R'}), '(T=T, P=P, R=R)\n', (3580, 3595), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((3614, 3630), 'encomp.units.Q', 'Q', (['[25, 34]', '"""C"""'], {}), "([25, 34], 'C')\n", (3615, 3630), False, 'from encomp.units import Q\n'), ((3639, 3657), 'encomp.units.Q', 'Q', (['[20, 30]', '"""bar"""'], {}), "([20, 30], 'bar')\n", (3640, 3657), False, 'from encomp.units import Q\n'), ((3666, 3686), 'encomp.units.Q', 'Q', (['[20, np.nan]', '"""%"""'], {}), "([20, np.nan], '%')\n", (3667, 3686), False, 'from encomp.units import Q\n'), ((3697, 3720), 'encomp.fluids.HumidAir', 'HumidAir', ([], {'T': 'T', 'P': 'P', 'R': 'R'}), '(T=T, P=P, R=R)\n', (3705, 3720), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((3739, 3759), 'encomp.units.Q', 'Q', (['[np.nan, 34]', '"""C"""'], {}), "([np.nan, 34], 'C')\n", (3740, 3759), False, 'from encomp.units import Q\n'), ((3768, 3790), 'encomp.units.Q', 'Q', (['[np.nan, 30]', '"""bar"""'], {}), "([np.nan, 30], 'bar')\n", (3769, 3790), False, 'from encomp.units import Q\n'), ((3799, 3819), 'encomp.units.Q', 'Q', (['[20, np.nan]', '"""%"""'], {}), "([20, np.nan], '%')\n", (3800, 3819), False, 'from encomp.units import Q\n'), ((3830, 3853), 'encomp.fluids.HumidAir', 'HumidAir', ([], {'T': 'T', 'P': 'P', 'R': 'R'}), '(T=T, P=P, R=R)\n', (3838, 3853), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((3872, 3888), 'encomp.units.Q', 'Q', (['[20, 40]', '"""C"""'], {}), "([20, 40], 'C')\n", (3873, 3888), False, 'from encomp.units import Q\n'), ((3897, 3914), 'encomp.units.Q', 'Q', (['[20, 1]', '"""bar"""'], {}), "([20, 1], 'bar')\n", (3898, 3914), False, 'from encomp.units import Q\n'), ((3923, 3940), 'encomp.units.Q', 'Q', (['[20, 101]', '"""%"""'], {}), "([20, 101], '%')\n", (3924, 3940), False, 'from encomp.units import Q\n'), ((3951, 3974), 'encomp.fluids.HumidAir', 'HumidAir', ([], {'T': 'T', 'P': 'P', 'R': 'R'}), '(T=T, P=P, R=R)\n', (3959, 3974), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((4035, 4051), 'numpy.isnan', 'np.isnan', (['val[1]'], {}), '(val[1])\n', (4043, 4051), True, 'import numpy as np\n'), ((4205, 4229), 'encomp.fluids.Fluid', 'Fluid', (['"""water"""'], {'T': 'T', 'P': 'P'}), "('water', T=T, P=P)\n", (4210, 4229), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((4449, 4473), 'encomp.fluids.Fluid', 'Fluid', (['"""water"""'], {'T': 'T', 'P': 'P'}), "('water', T=T, P=P)\n", (4454, 4473), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((4691, 4715), 'encomp.fluids.Fluid', 'Fluid', (['"""water"""'], {'T': 'T', 'P': 'P'}), "('water', T=T, P=P)\n", (4696, 4715), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((4892, 4916), 'encomp.fluids.Fluid', 'Fluid', (['"""water"""'], {'T': 'T', 'P': 'P'}), "('water', T=T, P=P)\n", (4897, 4916), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((5008, 5030), 'numpy.isnan', 'np.isnan', (['water.D.m[0]'], {}), '(water.D.m[0])\n', (5016, 5030), True, 'import numpy as np\n'), ((5082, 5107), 'numpy.linspace', 'np.linspace', (['(-100)', '(400)', 'N'], {}), '(-100, 400, N)\n', (5093, 5107), True, 'import numpy as np\n'), ((5119, 5140), 'numpy.linspace', 'np.linspace', (['(-1)', '(2)', 'N'], {}), '(-1, 2, N)\n', (5130, 5140), True, 'import numpy as np\n'), ((5217, 5229), 'encomp.units.Q', 'Q', (['arr1', '"""K"""'], {}), "(arr1, 'K')\n", (5218, 5229), False, 'from encomp.units import Q\n'), ((5238, 5252), 'encomp.units.Q', 'Q', (['arr2', '"""bar"""'], {}), "(arr2, 'bar')\n", (5239, 5252), False, 'from encomp.units import Q\n'), ((5266, 5290), 'encomp.fluids.Fluid', 'Fluid', (['"""water"""'], {'T': 'T', 'P': 'P'}), "('water', T=T, P=P)\n", (5271, 5290), False, 'from encomp.fluids import Fluid, HumidAir, Water\n'), ((247, 280), 'encomp.units.Q', 'Q', (['(1087.7758824621442)', '"""J/(K kg)"""'], {}), "(1087.7758824621442, 'J/(K kg)')\n", (248, 280), False, 'from encomp.units import Q\n'), ((396, 414), 'encomp.units.Q.get_unit', 'Q.get_unit', (['"""degC"""'], {}), "('degC')\n", (406, 414), False, 'from encomp.units import Q\n'), ((649, 673), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (662, 673), False, 'import pytest\n'), ((2397, 2422), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2410, 2422), False, 'import pytest\n'), ((3004, 3028), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3017, 3028), False, 'import pytest\n'), ((4007, 4023), 'numpy.isnan', 'np.isnan', (['val[0]'], {}), '(val[0])\n', (4015, 4023), True, 'import numpy as np\n'), ((4605, 4630), 'numpy.linspace', 'np.linspace', (['(-100)', '(-50)', 'N'], {}), '(-100, -50, N)\n', (4616, 4630), True, 'import numpy as np\n'), ((4647, 4669), 'numpy.linspace', 'np.linspace', (['(-1)', '(-2)', 'N'], {}), '(-1, -2, N)\n', (4658, 4669), True, 'import numpy as np\n'), ((4807, 4832), 'numpy.linspace', 'np.linspace', (['(-100)', '(300)', 'N'], {}), '(-100, 300, N)\n', (4818, 4832), True, 'import numpy as np\n'), ((4849, 4870), 'numpy.linspace', 'np.linspace', (['(-1)', '(2)', 'N'], {}), '(-1, 2, N)\n', (4860, 4870), True, 'import numpy as np\n'), ((5046, 5069), 'numpy.isnan', 'np.isnan', (['water.D.m[-1]'], {}), '(water.D.m[-1])\n', (5054, 5069), True, 'import numpy as np\n'), ((5558, 5580), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (5569, 5580), True, 'import numpy as np\n'), ((5582, 5606), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(10)'], {}), '(-10, 10, 10)\n', (5593, 5606), True, 'import numpy as np\n'), ((5730, 5752), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (5741, 5752), True, 'import numpy as np\n'), ((5754, 5778), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(10)'], {}), '(-10, 10, 10)\n', (5765, 5778), True, 'import numpy as np\n'), ((6200, 6222), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (6211, 6222), True, 'import numpy as np\n'), ((6224, 6248), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(10)'], {}), '(-10, 10, 10)\n', (6235, 6248), True, 'import numpy as np\n'), ((6372, 6394), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (6383, 6394), True, 'import numpy as np\n'), ((6396, 6420), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(10)'], {}), '(-10, 10, 10)\n', (6407, 6420), True, 'import numpy as np\n'), ((6599, 6620), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (6610, 6620), True, 'import numpy as np\n'), ((6622, 6648), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)', '(10)'], {}), '(-0.5, 0.5, 10)\n', (6633, 6648), True, 'import numpy as np\n'), ((191, 202), 'encomp.units.Q', 'Q', (['(2)', '"""bar"""'], {}), "(2, 'bar')\n", (192, 202), False, 'from encomp.units import Q\n'), ((206, 217), 'encomp.units.Q', 'Q', (['(25)', '"""°C"""'], {}), "(25, '°C')\n", (207, 217), False, 'from encomp.units import Q\n'), ((344, 355), 'encomp.units.Q', 'Q', (['(2)', '"""bar"""'], {}), "(2, 'bar')\n", (345, 355), False, 'from encomp.units import Q\n'), ((359, 370), 'encomp.units.Q', 'Q', (['(25)', '"""°C"""'], {}), "(25, '°C')\n", (360, 370), False, 'from encomp.units import Q\n'), ((458, 471), 'encomp.units.Q', 'Q', (['(25)', '"""degC"""'], {}), "(25, 'degC')\n", (459, 471), False, 'from encomp.units import Q\n'), ((475, 488), 'encomp.units.Q', 'Q', (['(125)', '"""kPa"""'], {}), "(125, 'kPa')\n", (476, 488), False, 'from encomp.units import Q\n'), ((492, 515), 'encomp.units.Q', 'Q', (['(0.2)', '"""dimensionless"""'], {}), "(0.2, 'dimensionless')\n", (493, 515), False, 'from encomp.units import Q\n'), ((530, 541), 'encomp.units.Q', 'Q', (['(1)', '"""bar"""'], {}), "(1, 'bar')\n", (531, 541), False, 'from encomp.units import Q\n'), ((545, 555), 'encomp.units.Q', 'Q', (['(0.9)', '""""""'], {}), "(0.9, '')\n", (546, 555), False, 'from encomp.units import Q\n'), ((569, 580), 'encomp.units.Q', 'Q', (['(1)', '"""bar"""'], {}), "(1, 'bar')\n", (570, 580), False, 'from encomp.units import Q\n'), ((584, 598), 'encomp.units.Q', 'Q', (['(0.9)', '"""degC"""'], {}), "(0.9, 'degC')\n", (585, 598), False, 'from encomp.units import Q\n'), ((612, 623), 'encomp.units.Q', 'Q', (['(1)', '"""bar"""'], {}), "(1, 'bar')\n", (613, 623), False, 'from encomp.units import Q\n'), ((627, 637), 'encomp.units.Q', 'Q', (['(0.9)', '""""""'], {}), "(0.9, '')\n", (628, 637), False, 'from encomp.units import Q\n'), ((2655, 2666), 'encomp.units.Q', 'Q', (['(25)', '"""°C"""'], {}), "(25, '°C')\n", (2656, 2666), False, 'from encomp.units import Q\n'), ((2678, 2689), 'encomp.units.Q', 'Q', (['(5)', '"""bar"""'], {}), "(5, 'bar')\n", (2679, 2689), False, 'from encomp.units import Q\n'), ((2796, 2807), 'encomp.units.Q', 'Q', (['(5)', '"""bar"""'], {}), "(5, 'bar')\n", (2797, 2807), False, 'from encomp.units import Q\n'), ((728, 739), 'encomp.units.Q', 'Q', (['(1)', '"""bar"""'], {}), "(1, 'bar')\n", (729, 739), False, 'from encomp.units import Q\n'), ((743, 757), 'encomp.units.Q', 'Q', (['(150)', '"""degC"""'], {}), "(150, 'degC')\n", (744, 757), False, 'from encomp.units import Q\n'), ((823, 834), 'encomp.units.Q', 'Q', (['(1)', '"""bar"""'], {}), "(1, 'bar')\n", (824, 834), False, 'from encomp.units import Q\n'), ((838, 850), 'encomp.units.Q', 'Q', (['(9)', '"""degC"""'], {}), "(9, 'degC')\n", (839, 850), False, 'from encomp.units import Q\n'), ((874, 890), 'encomp.units.Q', 'Q', (['[25, 95]', '"""C"""'], {}), "([25, 95], 'C')\n", (875, 890), False, 'from encomp.units import Q\n'), ((894, 910), 'encomp.units.Q', 'Q', (['[1, 2]', '"""bar"""'], {}), "([1, 2], 'bar')\n", (895, 910), False, 'from encomp.units import Q\n'), ((935, 955), 'encomp.units.Q', 'Q', (['[25, np.nan]', '"""C"""'], {}), "([25, np.nan], 'C')\n", (936, 955), False, 'from encomp.units import Q\n'), ((959, 975), 'encomp.units.Q', 'Q', (['[1, 2]', '"""bar"""'], {}), "([1, 2], 'bar')\n", (960, 975), False, 'from encomp.units import Q\n'), ((1000, 1024), 'encomp.units.Q', 'Q', (['[np.nan, np.nan]', '"""C"""'], {}), "([np.nan, np.nan], 'C')\n", (1001, 1024), False, 'from encomp.units import Q\n'), ((1028, 1044), 'encomp.units.Q', 'Q', (['[1, 2]', '"""bar"""'], {}), "([1, 2], 'bar')\n", (1029, 1044), False, 'from encomp.units import Q\n'), ((1069, 1093), 'encomp.units.Q', 'Q', (['[np.nan, np.nan]', '"""C"""'], {}), "([np.nan, np.nan], 'C')\n", (1070, 1093), False, 'from encomp.units import Q\n'), ((1097, 1123), 'encomp.units.Q', 'Q', (['[np.nan, np.nan]', '"""bar"""'], {}), "([np.nan, np.nan], 'bar')\n", (1098, 1123), False, 'from encomp.units import Q\n'), ((1148, 1158), 'encomp.units.Q', 'Q', (['(23)', '"""C"""'], {}), "(23, 'C')\n", (1149, 1158), False, 'from encomp.units import Q\n'), ((1162, 1178), 'encomp.units.Q', 'Q', (['[1, 2]', '"""bar"""'], {}), "([1, 2], 'bar')\n", (1163, 1178), False, 'from encomp.units import Q\n'), ((1203, 1213), 'encomp.units.Q', 'Q', (['(23)', '"""C"""'], {}), "(23, 'C')\n", (1204, 1213), False, 'from encomp.units import Q\n'), ((1217, 1230), 'encomp.units.Q', 'Q', (['[1]', '"""bar"""'], {}), "([1], 'bar')\n", (1218, 1230), False, 'from encomp.units import Q\n'), ((1255, 1271), 'encomp.units.Q', 'Q', (['[23, 25]', '"""C"""'], {}), "([23, 25], 'C')\n", (1256, 1271), False, 'from encomp.units import Q\n'), ((1275, 1288), 'encomp.units.Q', 'Q', (['[1]', '"""bar"""'], {}), "([1], 'bar')\n", (1276, 1288), False, 'from encomp.units import Q\n'), ((1313, 1329), 'encomp.units.Q', 'Q', (['[23, 25]', '"""C"""'], {}), "([23, 25], 'C')\n", (1314, 1329), False, 'from encomp.units import Q\n'), ((1333, 1349), 'encomp.units.Q', 'Q', (['np.nan', '"""bar"""'], {}), "(np.nan, 'bar')\n", (1334, 1349), False, 'from encomp.units import Q\n'), ((1374, 1390), 'encomp.units.Q', 'Q', (['[23, 25]', '"""C"""'], {}), "([23, 25], 'C')\n", (1375, 1390), False, 'from encomp.units import Q\n'), ((1394, 1415), 'encomp.units.Q', 'Q', (['[1, np.nan]', '"""bar"""'], {}), "([1, np.nan], 'bar')\n", (1395, 1415), False, 'from encomp.units import Q\n'), ((1432, 1452), 'encomp.units.Q', 'Q', (['[25, 25, 63]', '"""C"""'], {}), "([25, 25, 63], 'C')\n", (1433, 1452), False, 'from encomp.units import Q\n'), ((1456, 1484), 'encomp.units.Q', 'Q', (['[np.nan, np.nan, 0.4]', '""""""'], {}), "([np.nan, np.nan, 0.4], '')\n", (1457, 1484), False, 'from encomp.units import Q\n'), ((1500, 1524), 'encomp.units.Q', 'Q', (['[25, np.nan, 63]', '"""C"""'], {}), "([25, np.nan, 63], 'C')\n", (1501, 1524), False, 'from encomp.units import Q\n'), ((1528, 1553), 'encomp.units.Q', 'Q', (['[np.nan, 0.2, 0.5]', '""""""'], {}), "([np.nan, 0.2, 0.5], '')\n", (1529, 1553), False, 'from encomp.units import Q\n'), ((1569, 1597), 'encomp.units.Q', 'Q', (['[25, np.nan, np.nan]', '"""C"""'], {}), "([25, np.nan, np.nan], 'C')\n", (1570, 1597), False, 'from encomp.units import Q\n'), ((1601, 1629), 'encomp.units.Q', 'Q', (['[np.nan, 0.2, np.nan]', '""""""'], {}), "([np.nan, 0.2, np.nan], '')\n", (1602, 1629), False, 'from encomp.units import Q\n'), ((2758, 2777), 'numpy.linspace', 'np.linspace', (['(25)', '(50)'], {}), '(25, 50)\n', (2769, 2777), True, 'import numpy as np\n'), ((2881, 2905), 'numpy.linspace', 'np.linspace', (['(25)', '(500)', '(10)'], {}), '(25, 500, 10)\n', (2892, 2905), True, 'import numpy as np\n'), ((2926, 2950), 'numpy.linspace', 'np.linspace', (['(0.5)', '(10)', '(10)'], {}), '(0.5, 10, 10)\n', (2937, 2950), True, 'import numpy as np\n'), ((4096, 4118), 'numpy.linspace', 'np.linspace', (['(50)', '(60)', 'N'], {}), '(50, 60, N)\n', (4107, 4118), True, 'import numpy as np\n'), ((4149, 4169), 'numpy.linspace', 'np.linspace', (['(2)', '(4)', 'N'], {}), '(2, 4, N)\n', (4160, 4169), True, 'import numpy as np\n'), ((4334, 4356), 'numpy.linspace', 'np.linspace', (['(50)', '(60)', 'N'], {}), '(50, 60, N)\n', (4345, 4356), True, 'import numpy as np\n'), ((4390, 4410), 'numpy.linspace', 'np.linspace', (['(2)', '(4)', 'N'], {}), '(2, 4, N)\n', (4401, 4410), True, 'import numpy as np\n'), ((6717, 6726), 'encomp.units.Q', 'Q', (['T', '"""C"""'], {}), "(T, 'C')\n", (6718, 6726), False, 'from encomp.units import Q\n'), ((6730, 6741), 'encomp.units.Q', 'Q', (['P', '"""bar"""'], {}), "(P, 'bar')\n", (6731, 6741), False, 'from encomp.units import Q\n'), ((6745, 6749), 'encomp.units.Q', 'Q', (['R'], {}), '(R)\n', (6746, 6749), False, 'from encomp.units import Q\n'), ((1697, 1707), 'encomp.units.Q', 'Q', (['[]', '"""C"""'], {}), "([], 'C')\n", (1698, 1707), False, 'from encomp.units import Q\n'), ((1711, 1723), 'encomp.units.Q', 'Q', (['[]', '"""bar"""'], {}), "([], 'bar')\n", (1712, 1723), False, 'from encomp.units import Q\n'), ((1813, 1823), 'encomp.units.Q', 'Q', (['[]', '"""C"""'], {}), "([], 'C')\n", (1814, 1823), False, 'from encomp.units import Q\n'), ((1827, 1839), 'encomp.units.Q', 'Q', (['()', '"""bar"""'], {}), "((), 'bar')\n", (1828, 1839), False, 'from encomp.units import Q\n'), ((1929, 1939), 'encomp.units.Q', 'Q', (['[]', '"""C"""'], {}), "([], 'C')\n", (1930, 1939), False, 'from encomp.units import Q\n'), ((2449, 2473), 'encomp.units.Q', 'Q', (['[np.nan, np.nan]', '"""C"""'], {}), "([np.nan, np.nan], 'C')\n", (2450, 2473), False, 'from encomp.units import Q\n'), ((2491, 2525), 'encomp.units.Q', 'Q', (['[np.nan, np.nan, np.nan]', '"""bar"""'], {}), "([np.nan, np.nan, np.nan], 'bar')\n", (2492, 2525), False, 'from encomp.units import Q\n'), ((2554, 2578), 'encomp.units.Q', 'Q', (['[np.nan, np.nan]', '"""C"""'], {}), "([np.nan, np.nan], 'C')\n", (2555, 2578), False, 'from encomp.units import Q\n'), ((2582, 2594), 'encomp.units.Q', 'Q', (['[]', '"""bar"""'], {}), "([], 'bar')\n", (2583, 2594), False, 'from encomp.units import Q\n'), ((5895, 5904), 'encomp.units.Q', 'Q', (['T', '"""C"""'], {}), "(T, 'C')\n", (5896, 5904), False, 'from encomp.units import Q\n'), ((5908, 5919), 'encomp.units.Q', 'Q', (['P', '"""bar"""'], {}), "(P, 'bar')\n", (5909, 5919), False, 'from encomp.units import Q\n'), ((1945, 1957), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1953, 1957), True, 'import numpy as np\n'), ((2116, 2128), 'encomp.units.Q', 'Q', (['[23]', '"""C"""'], {}), "([23], 'C')\n", (2117, 2128), False, 'from encomp.units import Q\n'), ((2132, 2145), 'encomp.units.Q', 'Q', (['[1]', '"""bar"""'], {}), "([1], 'bar')\n", (2133, 2145), False, 'from encomp.units import Q\n'), ((2221, 2231), 'encomp.units.Q', 'Q', (['(23)', '"""C"""'], {}), "(23, 'C')\n", (2222, 2231), False, 'from encomp.units import Q\n'), ((2235, 2248), 'encomp.units.Q', 'Q', (['[1]', '"""bar"""'], {}), "([1], 'bar')\n", (2236, 2248), False, 'from encomp.units import Q\n'), ((2324, 2336), 'encomp.units.Q', 'Q', (['[23]', '"""C"""'], {}), "([23], 'C')\n", (2325, 2336), False, 'from encomp.units import Q\n'), ((2340, 2351), 'encomp.units.Q', 'Q', (['(1)', '"""bar"""'], {}), "(1, 'bar')\n", (2341, 2351), False, 'from encomp.units import Q\n'), ((3156, 3180), 'numpy.linspace', 'np.linspace', (['(25)', '(500)', '(10)'], {}), '(25, 500, 10)\n', (3167, 3180), True, 'import numpy as np\n'), ((3205, 3229), 'numpy.linspace', 'np.linspace', (['(0.5)', '(10)', '(50)'], {}), '(0.5, 10, 50)\n', (3216, 3229), True, 'import numpy as np\n')] |
#! /usr/bin/env python
from __future__ import division
from builtins import range
from LLC_Membranes.llclib import file_rw, transform, topology
import mdtraj as md
import numpy as np
import matplotlib.path as mplPath
import mdtraj as md
from random import randint
import tqdm
class region:
"""
Define a region as an extrusion of a polygon in the z direction
"""
def __init__(self, sides):
"""
:param sides: number of sides making up the region in the xy plane
:return: region
"""
self.sides = sides
def xyregion(self, corners):
"""
:param corners: points defining the corners of the polygon making up the xy region
:return: a region defined by corners
"""
path = mplPath.Path(corners)
def thickness(filename, ref_atoms, grid, *traj, **kwargs):
"""
:param filename: name of .gro file
:param ref_atoms: atoms which thickness will be based on
:param traj: trajectory of positions
:return: trajectory of thicknesses or single thickness based on max/min z coordinate of reference atoms
"""
if traj:
traj = np.asarray(traj)[0] # optional arguments of the form *args need to be convert back to numpy arrays
nT = traj.shape[0] # number of trajectory points
thick = np.zeros([nT])
z_max = np.zeros([nT])
z_min = np.zeros([nT])
thick_std = np.zeros([nT])
for t in range(nT):
z_max_t = max(traj[t, :, 2])
z_min_t = min(traj[t, :, 2])
thick[t] = z_max_t - z_min_t
z_max[t] = z_max_t
z_min[t] = z_min_t
else:
f = open(filename, "r") # .gro file whose positions of Na ions will be read
a = [] # list to hold lines of file
for line in f:
a.append(line)
line = 0
while a[line].count('HII') == 0:
line += 1
if grid:
t = md.load(filename)
pos = t.xyz[0, :, :] # positions of all atoms
if kwargs['exclude']:
keep = [a.index for a in t.topology.atoms if a.residue.name != kwargs['exclude']]
pos = t.atom_slice(keep).xyz[0, :, :]
# define boundaries of each grid area
grid_res = kwargs['grid_res']
nregions = (grid_res - 1) ** 2
g = np.zeros([2, grid_res, grid_res])
dims = a[-1].split()
xbox = np.linalg.norm([dims[0], dims[3], dims[4]])
ybox = np.linalg.norm([dims[1], dims[5], dims[6]])
yangle = np.arctan(float(dims[1])/abs(float(dims[5])))
for i in range(grid_res):
g[0, i, :] = np.linspace(0, xbox, grid_res) + (float(i) / grid_res)*float(dims[5])
g[1, :, i] = np.linspace(0, ybox, grid_res)*np.sin(yangle)
corners = np.zeros([nregions, 4, 2])
zmaxes = np.zeros([nregions])
zmins = np.zeros([nregions])
thicks = np.zeros([nregions])
for i in range(grid_res - 1):
for j in range(grid_res - 1):
# define corners of grid region
r = i*(grid_res - 1) + j
corners[r, 0, :] = g[:, i, j]
corners[r, 1, :] = g[:, i + 1, j]
corners[r, 2, :] = g[:, i + 1, j + 1]
corners[r, 3, :] = g[:, i, j + 1]
# create a region using the corners (corners need to be traced in order)
path = mplPath.Path(corners[r, :, :])
contained = path.contains_points(pos[:, :2]) # check whether each point is in the region
z = pos[np.where(contained), 2] # get the z position of all atoms contained in the region
zmaxes[r] = np.max(z)
zmins[r] = np.min(z)
thicks[r] = zmaxes[r] - zmins[r]
# bootstrap to get statistics
nboot = 2000
vmax = np.zeros([nboot])
vmin = np.zeros([nboot])
for i in range(nboot):
imax = randint(0, nregions - 1)
imin = randint(0, nregions - 1)
vmax[i] = zmaxes[imax]
vmin[i] = zmins[imin]
z_max = np.mean(vmax)
z_min = np.mean(vmin)
thick = np.mean(vmax - vmin)
thick_std = np.std(vmax - vmin)
else:
z = [] # list to hold z positions of all atoms
while a[line].count('HII') != 0:
if str.strip(a[line][11:15]) in ref_atoms:
z.append(float(a[line][36:44]))
line += 1
z_max = max(z)
z_min = min(z)
thick = z_max - z_min
thick_std = 0
return thick, z_max, z_min, thick_std
def conc(t, comp, b):
"""
Calculate the concentration of the specified component
:param t: mdtraj trajectory object for system being studied
:param comp: component which you want the concentration of
:param b: buffer. distance into membrane to go before starting calculation
:return: concentration
"""
from pymbar import timeseries
box = t.unitcell_vectors
equil = timeseries.detectEquilibration(box[:, 2, 2])[0]
thick = np.mean(box[equil:, 2, 2])
z_max = thick
z_min = 0
buffer = thick*b
z_max -= buffer
z_min += buffer
thick = z_max - z_min
# Calculate concentration (an average of all frames)
keep = [a.index for a in t.topology.atoms if a.name == comp]
t_comp = t.atom_slice(keep)
pos = t_comp.xyz
ncomp = pos.shape[1] # number of components in the simulation which you want the concentration of
nT = pos.shape[0]
if b > 0:
count = np.zeros([nT])
box_vol = np.zeros([nT])
cross = np.zeros([nT])
for t in range(nT):
x_dim = np.linalg.norm(box[t, 0, :])
y_dim = np.linalg.norm(box[t, 1, :])
cross[t] = x_dim*y_dim
box_vol[t] = x_dim*y_dim*thick
for c in range(ncomp):
if z_max >= pos[t, c, 2] >= z_min:
count[t] += 1
else:
count = ncomp*np.ones([nT])
box_vol = np.zeros([nT])
cross = np.zeros([nT])
for t in range(nT):
x_dim = np.linalg.norm(box[t, 0, :])
y_dim = np.linalg.norm(box[t, 1, :])
cross[t] = x_dim*y_dim
box_vol[t] = x_dim*y_dim*thick
factor = 1 / (1*10**-27) # convert from ions/nm^3 to ions/m^3
conc = np.zeros([nT])
for c in range(nT):
conc[c] = (count[c] / box_vol[c]) * factor
avg_conc = np.mean(conc)
std = np.std(conc)
avg_cross = np.mean(cross)
return avg_conc, std, avg_cross, thick, z_max, z_min
def avg_pore_loc(npores, pos, box, buffer=0, spline=False, npts=20, progress=False, bins=False, spline_name='spline.pl'):
""" Calculate average pore location for each pore at each frame
:param no_pores: the number of pores in the unit cell
:param pos: the coordinates of the component(s) which you are using to locate the pore centers
:param box: box vectors (t.unitcell_vectors when trajectory is load with mdtraj)
:param buffer: fraction (of membrane thickness) of top and bottom of membrane to exclude from p2p calculations
:param spline: trace pore centers with a spline
:param npts: number of points making up the spline in each pore
:param progress: show progress bar while constructing splines
:param bins: return the bin centers of each spline for plotting purposes
:param spline_name: name of spline. Include absolute path if not in same directory where script was run
:type no_pores: int
:type pos: numpy.ndarray, shape(ncomponents, 3) or numpy.ndarray, shape(nframes, ncomponents, 3)
:type buffer: float
:type spline: bool
:type box: numpy.ndarray, shape(nframes, 3, 3)
:type npts: int
:type progress: bool
:type bins: bool
:type spline_name: str
:return: numpy array containing the x, y coordinates of the center of each pore at each frame
"""
# Find the average location of the pores w.r.t. x and y
if spline:
if box is None:
print('You must supply box vectors if you are to trace the pores with a spline')
exit()
else:
print('Calculating pore spline...')
centers, bin_centers = trace_pores(pos, box, npts, npores=4, progress=progress, savename=spline_name)
if bins:
return centers, bin_centers
else:
return centers
else:
if len(pos.shape) == 3: # multiple frames
nT = np.shape(pos)[0]
comp_ppore = np.shape(pos)[1] // npores
p_center = np.zeros([nT, npores, 2])
for i in range(nT):
positions = wrap_box(pos[i, ...], box[i, ...])
if buffer > 0:
include = np.full(pos.shape[1], True)
include[np.where(pos[i, :, 2] > box[i, 2, 2] + buffer)] = False
include[np.where(pos[i, :, 2] < buffer)] = False
for j in range(npores):
p_center[i, j, :] = positions[comp_ppore * j:comp_ppore * (j + 1), :2].mean(axis=0)
count = 0
for k in range(comp_ppore * j, comp_ppore * (j + 1)):
if include[k]:
p_center[i, j, :] += positions[k, :2]
count += 1
p_center[i, j, :] /= count # take the average
else:
for j in range(npores):
p_center[i, j, :] = positions[comp_ppore*j:comp_ppore*(j + 1), :2].mean(axis=0)
elif len(pos.shape) == 2: # single frame
comp_ppore = pos.shape[0] // npores
p_center = np.zeros([npores, 2])
for j in range(npores):
for k in range(comp_ppore*j, comp_ppore*(j + 1)):
p_center[j, :] += pos[k, :2]
p_center[j, :] /= comp_ppore
else:
return 'Please use a position array with valid dimensions'
exit()
return p_center
def p2p(p_centers, distances):
"""
:param p_centers: the x, y locations of the pore centers in the format return from avg_pore_loc()
:param distances: the number of distinct distances between pores
:return: all of the pore to pore distances
"""
nT = np.shape(p_centers)[2]
p2ps = np.zeros([distances, nT]) # distances in the order 1-2, 1-3, 1-4, 2-3, 2-4, 3-4
for i in range(nT):
# So ugly ... sadness :(
p2ps[0, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 1, i])
p2ps[1, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 2, i])
p2ps[2, i] = np.linalg.norm(p_centers[:, 0, i] - p_centers[:, 3, i])
p2ps[3, i] = np.linalg.norm(p_centers[:, 1, i] - p_centers[:, 2, i])
p2ps[4, i] = np.linalg.norm(p_centers[:, 1, i] - p_centers[:, 3, i])
p2ps[5, i] = np.linalg.norm(p_centers[:, 2, i] - p_centers[:, 3, i])
return p2ps
def limits(pos, pcenters):
"""
Estimate the pore 'radius' based on the position of some component and it's maximum deviation from the pore center
:param: pos: the positions of all atoms included in making the estimate
:param: pcenters: the x,y positions of the pore centers for each frame
:return: an approximate pore radius. Beyond which, we have entered the alkane region
"""
nT = pcenters.shape[0]
npores = pcenters.shape[1]
natoms = pos.shape[1]
atom_ppore = natoms // npores
deviation = np.zeros([nT, npores, atom_ppore])
for f in tqdm.tqdm(range(nT)):
for i in range(atom_ppore):
for j in range(npores):
deviation[f, j, i] = np.linalg.norm(pos[f, j*atom_ppore + i, :2] - pcenters[f, j, :])
#deviation = np.reshape(deviation, (nT, natoms))
fr = np.zeros([nT])
frstd = np.zeros([nT])
#
# for i in range(nT):
# fr[i] = np.mean(deviation[i, :]) # + np.std(deviation[i, :]) # maybe?
# frstd[i] = np.std(deviation[i, :])
radii = np.zeros([nT, npores])
for t in range(nT):
for p in range(npores):
radii[t, p] = np.mean(deviation[t, p, :])
return radii
def put_in_box(pt, x_box, y_box, m, angle):
"""
:param pt: The point to place back in the box
:param x_box: length of box in x dimension
:param y_box: length of box in y dimension
:param m: slope of box vector
:param angle: angle between x axis and y box vector
:return: coordinate shifted into box
"""
b = - m * x_box # y intercept of box vector that does not pass through origin (right side of box)
if pt[1] < 0:
pt[:2] += [np.cos(angle)*x_box, np.sin(angle)*x_box] # if the point is under the box
if pt[1] > y_box:
pt[:2] -= [np.cos(angle)*x_box, np.sin(angle)*x_box]
if pt[1] > m*pt[0]: # if the point is on the left side of the box
pt[0] += x_box
if pt[1] < (m*pt[0] + b): # if the point is on the right side of the box
pt[0] -= x_box
return pt
def trace_pores(pos, box, npoints, npores=4, progress=True, save=True, savename='spline.pl'):
"""
Find the line which traces through the center of the pores
:param pos: positions of atoms used to define pore location (args.ref) [natoms, 3]
:param box: xy box vectors, [2, 2], mdtraj format (t.unitcell_vectors)
:param npoints: number of points for spline in each pore
:param npores: number of pores in unit cell (assumed that atoms are number sequentially by pore. i.e. pore 1 atom
numbers all precede those in pore 2)
:param progress: set to True if you want a progress bar to be shown
:param save: save spline as pickled object
:param savename: path to spline. If absolute path is not provided, will look in current directory
:type pos: np.ndarray
:type box: np.ndarray
:type npoints: int
:type npores: int
:type progress: bool
:type save: bool
:type savename: str
:return: points which trace the pore center
"""
try:
print('Attempting to load spline ... ', end='', flush=True)
spline = file_rw.load_object(savename)
print('Success!')
return spline[0], spline[1]
except FileNotFoundError:
print('%s not found ... Calculating spline' % savename)
single_frame = False
if np.shape(pos.shape)[0] == 2:
pos = pos[np.newaxis, ...] # add a new axis if we are looking at a single frame
box = box[np.newaxis, ...]
single_frame = True
nframes = pos.shape[0]
atoms_p_pore = int(pos.shape[1] / npores) # atoms in each pore
v = np.zeros([nframes, 4, 2]) # vertices of unitcell box
bounds = []
v[:, 0, :] = [0, 0]
v[:, 1, 0] = box[:, 0, 0]
v[:, 3, :] = np.vstack((box[:, 1, 0], box[:, 1, 1])).T
v[:, 2, :] = v[:, 3, :] + np.vstack((box[:, 0, 0], np.zeros([nframes]))).T
center = np.vstack((np.mean(v[..., 0], axis=1), np.mean(v[..., 1], axis=1), np.zeros(nframes))).T
for t in range(nframes):
bounds.append(mplPath.Path(v[t, ...])) # create a path tracing the vertices, v
angle = np.arcsin(box[:, 1, 1]/box[:, 0, 0]) # specific to case where magnitude of x and y box lengths are equal
angle = np.where(box[:, 1, 0] < 0, angle + np.pi / 2, angle) # haven't tested this well yet
m = (v[:, 3, 1] - v[:, 0, 1]) / (v[:, 3, 0] - v[:, 0, 0]) # slope from points connecting first and third vertices
centers = np.zeros([nframes, npores, npoints, 3])
bin_centers = np.zeros([nframes, npores, npoints])
for t in tqdm.tqdm(range(nframes), disable=(not progress)):
for p in range(npores):
pore = pos[t, p*atoms_p_pore:(p+1)*atoms_p_pore, :] # coordinates for atoms belonging to a single pore
while np.min(pore[:, 2]) < 0 or np.max(pore[:, 2]) > box[t, 2, 2]: # because cross-linked configurations can extend very far up and down
pore[:, 2] = np.where(pore[:, 2] < 0, pore[:, 2] + box[t, 2, 2], pore[:, 2])
pore[:, 2] = np.where(pore[:, 2] > box[t, 2, 2], pore[:, 2] - box[t, 2, 2], pore[:, 2])
_, bins = np.histogram(pore[:, 2], bins=npoints) # bin z-positions
section_indices = np.digitize(pore[:, 2], bins) # list that tells which bin each atom belongs to
bin_centers[t, p, :] = [(bins[i] + bins[i + 1])/2 for i in range(npoints)]
for l in range(1, npoints + 1):
atom_indices = np.where(section_indices == l)[0]
before = pore[atom_indices[0], :] # choose the first atom as a reference
shift = transform.translate(pore[atom_indices, :], before, center[t, :]) # shift everything to towards the center
for i in range(shift.shape[0]): # check if the points are within the bounds of the unitcell
while not bounds[t].contains_point(shift[i, :2]):
shift[i, :] = put_in_box(shift[i, :], box[t, 0, 0], box[t, 1, 1], m[t], angle[t]) # if its not in the unitcell, shift it so it is
c = [np.mean(shift, axis=0)]
centers[t, p, l - 1, :] = transform.translate(c, center[t, :], before) # move everything back to where it was
while not bounds[t].contains_point(centers[t, p, l - 1, :]): # make sure everything is in the box again
centers[t, p, l - 1, :] = put_in_box(centers[t, p, l - 1, :], box[t, 0, 0], box[t, 1, 1], m[t], angle[t])
if single_frame:
return centers[0, ...] # doesn't return bin center yet
else:
if save:
file_rw.save_object((centers, bin_centers), savename)
return centers, bin_centers
def center_of_mass(pos, mass_atoms):
""" Calculate center of mass of residues over a trajectory
:param pos: xyz coordinates of atoms
:param mass_atoms : mass of atoms in order they appear in pos
:type pos: np.array (nframes, natoms, 3)
:type mass_atoms: list
:return: center of mass of each residue at each frame
"""
nframes = pos.shape[0]
natoms = len(mass_atoms)
com = np.zeros([nframes, pos.shape[1] // natoms, 3]) # track the center of mass of each residue
for f in range(nframes):
for i in range(com.shape[1]):
w = (pos[f, i * natoms:(i + 1) * natoms, :].T * mass_atoms).T # weight each atom in the residue by its mass
com[f, i, :] = np.sum(w, axis=0) / sum(mass_atoms) # sum the coordinates and divide by the mass of the residue
return com
def residue_center_of_mass(t, res):
""" Calculate the center of mass versus time of a residue in an MD trajectory
:param t: mdtraj trajectory object
:param res: name of residue to track
:type t: object
:type res: str
:return: center of mass of residue versus time
"""
residue = topology.Residue(res) # get resiude attributes
ndx = [a.index for a in t.topology.atoms if a.residue.name == res] # index of all residue atoms
names = [a.name for a in t.topology.atoms if a.residue.name == res][:residue.natoms] # names of atoms in one residue
mass = [residue.mass[x] for x in names] # mass of atoms in order that they appear in file
print('Calculating center of mass trajectories of residue %s' % residue.name)
return center_of_mass(t.xyz[:, ndx, :], mass) # determine center of mass trajectories
def compdensity(coord, pore_centers, box, cut=1.5, nbins=50, spline=False):
""" Measure the density of a component as a function of the distance from the pore centers.
:param coord: the coordinates of the component(s) which you want a radial distribution of at each frame
:param pore_centers: a numpy array of the locations of each pore center at each trajectory frame
:param cut: cutoff distance for distance calculations. Will not count anything further than cut from the pore center
:param nbins: number of bins in r direction
:param spline: calculate RDF with respect to spline
:type coord: numpy.ndarray
:type pore_centers: numpy.ndarray
:type cut: float
:type nbins: int
:type spline: bool
:return: Radial distance from pore center r, and the density of a species, whose positions are defined by
`coordinates`, as a function the distance from the pore center.
"""
nT = coord.shape[0]
pores = pore_centers.shape[1]
density = np.zeros([nT, nbins]) # number / nm^3
for t in tqdm.tqdm(range(nT), unit=' Frames'):
for p in range(pores):
if spline:
distances = radial_distance_spline(pore_centers[t, p, ...], coord[t, ...], box[t, ...])
else:
distances = np.linalg.norm(coord[t, :, :2] - pore_centers[t, p, :], axis=1)
hist, bin_edges = np.histogram(distances, bins=nbins, range=(0, cut))
density[t, :] += hist
density[t, :] /= (pores * box[t, 2, 2]) # normalize by z-dimension
# normalize based on volume of anulus where bin is located (just need to divide by area since height done above)
r = np.zeros([nbins])
for i in range(nbins):
density[:, i] /= (np.pi * (bin_edges[i + 1] ** 2 - bin_edges[i] ** 2))
r[i] = (bin_edges[i + 1] + bin_edges[i]) / 2 # center of bins
return r, density
def distance_from_pore_center(coord, pore_centers, box, spline=False):
""" Measure the density of a component as a function of the distance from the pore centers.
:param coord: the coordinates of the component(s) which you want a radial distribution of at each frame
:param pore_centers: a numpy array of the locations of each pore center at each trajectory frame
:param cut: cutoff distance for distance calculations. Will not count anything further than cut from the pore center
:param
:type coord: numpy.ndarray
:type pore_centers: numpy.ndarray
:type cut: float
:return: Radial distance of each individual solute/component, defined by coords, as a function of time
"""
nT = coord.shape[0]
pores = pore_centers.shape[1]
nsolute = coord.shape[1]
r_distances = np.zeros([nT, nsolute])
for t in tqdm.tqdm(range(nT), unit=' Frames'):
rd = np.zeros([nsolute, pores])
for p in range(pores):
if spline:
rd[:, p] = radial_distance_spline(pore_centers[t, p, ...], coord[t, ...], box[t, ...])
else:
rd[:, p] = np.linalg.norm(coord[t, :, :2] - pore_centers[t, p, :], axis=1)
# Move the minimum solute--pore-center distance for each solute to the first index of rd
# This removes any assumption that there is a constant number of solutes per pore and that the solute
# stays in the same pore.
for i, r in enumerate(rd): # there is probably a vectorized way to do this with argsort
rd[i, :] = r[np.argsort(r)]
r_distances[t, :] = rd[:, 0]
return r_distances
def radial_distance_spline(spline, com, box):
""" Calculate radial distance from pore center based on distance from center of mass to closest z point in spline
:param spline: coordinates of spline for a single pore and frame
:param com: atomic center of mass z-coordinates
:param zbox: z box dimension (nm)
:type spline: np.ndarray [npts_spline, 3]
:type com: np.ndarray [n_com, 3]
:type zbox: float
:return: array of distances from pore center
"""
edges = np.zeros([spline.shape[0] + 1])
edges[1:-1] = ((spline[1:, 2] - spline[:-1, 2]) / 2) + spline[:-1, 2]
edges[-1] = box[2, 2]
com = wrap_box(com, box)
# while np.min(com[:, 2]) < 0 or np.max(com[:, 2]) > zbox: # because cross-linked configurations can extend very far up and down
# com[:, 2] = np.where(com[:, 2] < 0, com[:, 2] + zbox, com[:, 2])
# com[:, 2] = np.where(com[:, 2] > zbox, com[:, 2] - zbox, com[:, 2])
zbins = np.digitize(com[:, 2], edges)
# handle niche case where coordinate lies exactly on the upper or lower bound
zbins = np.where(zbins == 0, zbins + 1, zbins)
zbins = np.where(zbins == edges.size, zbins - 1, zbins)
return np.linalg.norm(com[:, :2] - spline[zbins - 1, :2], axis=1)
def minimum_image_distance(dist, box):
""" Calculate minimum image distances from a vector of distances. This assumes a monoclinic unit cell where the x
box vector is fixed along the x-axis, the z-box vector is perpendicular to the xy plane, and the y-box vector makes
an angle, theta, with the x-axis.
:param d: a vector of distances (n, 3) where n is number of points
:param box: box vectors meant to enclose d, mdtraj format: (3, 3)
:return:
"""
x_box = box[0, 0] # length of x-box vector
y_box = box[1, 1] # perpendicular distance from x-axis to top of box in y-direction
z_box = box[2, 2] # length of z-box vector
d = np.copy(dist)
angle = np.arcsin(y_box / x_box) # angle between y-box vector and x-box vector in radians
# check x coordinates
while np.max(np.abs(d[:, 0])) > 0.5*x_box: # iterate in case subtracting/adding box vector length once isn't enough
d[:, 0] = np.where(d[:, 0] > 0.5*x_box, d[:, 0] - x_box, d[:, 0])
d[:, 0] = np.where(d[:, 0] < -0.5*x_box, d[:, 0] + x_box, d[:, 0])
# check y coordinates
while np.amax(np.abs(d[:, 1])) > 0.5*y_box: # written differently because np.where didn't know how to handle 2 axes
d[np.where(d[:, 1] > 0.5*y_box)[0], :2] -= [x_box*np.cos(angle), y_box]
d[np.where(d[:, 1] < -0.5*y_box)[0], :2] += [x_box*np.cos(angle), y_box]
# check z coordinates
while np.max(np.abs(d[:, 2])) > 0.5*z_box:
d[:, 2] = np.where(d[:, 2] > 0.5*z_box, d[:, 2] - z_box, d[:, 2])
d[:, 2] = np.where(d[:, 2] < -0.5*z_box, d[:, 2] + z_box, d[:, 2])
return d
def partition(com, pore_centers, r, buffer=0, unitcell=None, npores=4, spline=False, spline_range=None):
""" Partition residue center of masses into tail and pore region
:param com: positions of centers of mass of particle whose partition we are calculating
:param pore_centers: positions of pore centers
:param r: pore radius, outside of which atoms will be considered in the tail region
:param buffer: z distance (nm) to cut out from top and bottom of membrane (in cases where there is a water gap)
:param unitcell: unitcell vectors in mdtraj format (t.unitcell_vectors). Only needed if buffer and/or spline is used
:param npores: number of pores
:param spline: calculate partition with respect to pore spline
:param spline_range: range of frames to use in spline. Provide a tuple with the first and last (non-inclusive)
frame that should be included.
:type com: numpy.ndarray (nT, ncom, 3)
:type pore_centers: numpy.ndarray (nT, npores, 2) or (nT, npores, 3) or (nT, npores, npts, 3) if spline=True where
npts=number of points in spline
:type r: float
:type buffer: float
:type unitcell: numpy.ndarray (nT, 3, 3)
:type npores: int
:type spline: bool
:type spline_path: str
:type spline_range: NoneType or tuple
:return part: boolean numpy array with shape (nT, com.shape[1]) where True indicates a center of mass that is
inside the inner region (i.e. < r)
"""
nT = com.shape[0]
if spline:
npts = pore_centers.shape[2] # number of points in each spline
if nT < pore_centers.shape[0]:
if spline_range is not None:
start, end = spline_range
pore_centers = pore_centers[start:end]
else:
print('The number of frames in the trajectory is less than the number frames in the spline. I will assume '
'that the difference has been chopped off from the front of the full trajectory and truncate the '
'spline accordingly. If this seems wrong, check out partition() in LLC_Membranes.llclib.physical')
diff = pore_centers.shape[0] - nT
pore_centers = pore_centers[diff:, ...] # assumes trajectory h
part = np.zeros([nT, com.shape[1]], dtype=bool) # Will be changed to True if solute in pores
print('Calculating solute partition...')
for i in tqdm.tqdm(range(nT)):
if buffer > 0:
xy_positions = com[i, (com[i, :, 2] > buffer) & (com[i, :, 2] < unitcell[i, 2, 2] - buffer), :2]
else:
xy_positions = com[i, :, :2]
if spline:
z = com[i, :, 2] # extract z-coordinates for this frame
zbox = unitcell[i, 2, 2] # z-box vector for this frame
# make sure z-component of every particle in the box
while np.max(z) > zbox or np.min(z) < 0: # might need to do this multiple times
z = np.where(z > zbox, z - zbox, z)
z = np.where(z < 0, z + zbox, z)
zbins = np.digitize(z, np.linspace(0, zbox, npts + 1))
# handle niche case where coordinate lies exactly on the upper or lower bound
zbins = np.where(zbins == 0, zbins + 1, zbins)
zbins = np.where(zbins == npts + 1, zbins - 1, zbins)
zbins -= 1 # digitize numbers bins starting at 1 (0 is below the bottom bin)
pore = []
for p in range(npores):
d = np.linalg.norm(xy_positions - pore_centers[i, p, zbins, :2], axis=1)
pore += np.where(d <= r)[0].tolist()
else:
pore = []
for p in range(npores):
d = np.linalg.norm(xy_positions - pore_centers[i, p, :], axis=1)
pore += np.where(d <= r)[0].tolist()
part[i, pore] = True
return part
def wrap_box(positions, box, tol=1e-6):
""" Put all atoms in box
:param positions: xyz atomic position [n_atoms, 3]
:param box: box vectors [3, 3] (as obtained from mdtraj t.unitcell_vectors)
:type positions: np.ndarray
:type box: np.ndarray
:return: positions moved into box
"""
xy = positions[:, :2] # xy coordinates have dependent changes so this makes things neater below
z = positions[:, 2]
xbox, ybox, zbox = box[0, 0], box[1, 1], box[2, 2]
angle = np.arcsin(ybox / xbox) # angle between y-box vector and x-box vector in radians
m = np.tan(angle)
b = - m * xbox # y intercept of box vector that does not pass through origin (right side of box)
while max(xy[:, 1]) > ybox or min(xy[:, 1]) < 0:
xy[np.where(xy[:, 1] > ybox)[0], :2] -= [xbox*np.cos(angle), ybox]
xy[np.where(xy[:, 1] < 0)[0], :2] += [xbox * np.cos(angle), ybox]
# added tolerance for corner case
while len(np.where(xy[:, 0] - (xy[:, 1] / m) < -tol)[0]) > 0 or \
len(np.where(xy[:, 0] - ((xy[:, 1] - b) / m) > 0)[0]) > 0:
xy[np.where(xy[:, 0] < (xy[:, 1] / m))[0], 0] += xbox
xy[np.where(xy[:, 0] > ((xy[:, 1] - b) / m))[0], 0] -= xbox
# check z coordinates
while np.max(z) > zbox or np.min(z) < 0: # might need to do this multiple times
z = np.where(z > zbox, z - zbox, z)
z = np.where(z < 0, z + zbox, z)
return np.concatenate((xy, z[:, np.newaxis]), axis=1)
def fft_3D_monoclinic(xyz, box_vectors, bins):
""" Calculate 3D discrete fourier transform for each frame of a trajectory of coordinates in monoclinic unit cell
:param xyz: (nframes, natoms, 3) coordinate array of atoms whose 3D DFT we want to calculate
:param box_vectors: matrix of box vectors of shape (nT, 3, 3)
:param bins: number of bins in each dimension
:type xyz: np.ndarray
:type box_vectors: np.ndarray
:type bins: list of ints
:return: 3D structure factor
:type: np.ndarray
"""
nT = xyz.shape[0] # number of frames
L = np.linalg.norm(box_vectors, axis=2)
locations, L = rescale(xyz, L) # make unit cell constant size
# define histograme bin edges
x = np.linspace(0, L[0], int(bins[0]))
y = np.linspace(0, L[1], int(bins[1]))
z = np.linspace(0, L[2], int(bins[2]))
zv = [0.0, 0.0, 0.0] # zero vector
# put all atoms inside box - works for single frame and multiframe
for it in range(locations.shape[0]): # looped to save memory
locations[it, ...] = np.where(locations[it, ...] < L, locations[it, ...], locations[it, ...] - L) # get positions in periodic cell
locations[it, ...] = np.where(locations[it, ...] > zv, locations[it, ...], locations[it, ...] + L)
# fourier transform loop
sf = np.zeros([x.size - 1, y.size - 1, z.size - 1])
for frame in tqdm.tqdm(range(nT), unit=' Frames'):
H, edges = np.histogramdd(locations[frame, ...], bins=(x, y, z))
if sf:
fft = np.fft.fftn(H - H.mean())
sf += (fft * fft.conjugate()).real
else:
sf += np.abs(np.fft.fftn(H)) ** 2
sf /= nT # average of all frames
return sf | [
"numpy.argsort",
"builtins.range",
"numpy.linalg.norm",
"numpy.sin",
"numpy.mean",
"matplotlib.path.Path",
"numpy.histogram",
"numpy.histogramdd",
"LLC_Membranes.llclib.transform.translate",
"numpy.where",
"numpy.asarray",
"numpy.fft.fftn",
"numpy.max",
"numpy.linspace",
"numpy.vstack",
... | [((5320, 5346), 'numpy.mean', 'np.mean', (['box[equil:, 2, 2]'], {}), '(box[equil:, 2, 2])\n', (5327, 5346), True, 'import numpy as np\n'), ((6596, 6610), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (6604, 6610), True, 'import numpy as np\n'), ((6624, 6633), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (6629, 6633), False, 'from builtins import range\n'), ((6702, 6715), 'numpy.mean', 'np.mean', (['conc'], {}), '(conc)\n', (6709, 6715), True, 'import numpy as np\n'), ((6726, 6738), 'numpy.std', 'np.std', (['conc'], {}), '(conc)\n', (6732, 6738), True, 'import numpy as np\n'), ((6755, 6769), 'numpy.mean', 'np.mean', (['cross'], {}), '(cross)\n', (6762, 6769), True, 'import numpy as np\n'), ((10666, 10691), 'numpy.zeros', 'np.zeros', (['[distances, nT]'], {}), '([distances, nT])\n', (10674, 10691), True, 'import numpy as np\n'), ((10760, 10769), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (10765, 10769), False, 'from builtins import range\n'), ((11823, 11857), 'numpy.zeros', 'np.zeros', (['[nT, npores, atom_ppore]'], {}), '([nT, npores, atom_ppore])\n', (11831, 11857), True, 'import numpy as np\n'), ((12131, 12145), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (12139, 12145), True, 'import numpy as np\n'), ((12158, 12172), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (12166, 12172), True, 'import numpy as np\n'), ((12344, 12366), 'numpy.zeros', 'np.zeros', (['[nT, npores]'], {}), '([nT, npores])\n', (12352, 12366), True, 'import numpy as np\n'), ((12381, 12390), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (12386, 12390), False, 'from builtins import range\n'), ((18627, 18673), 'numpy.zeros', 'np.zeros', (['[nframes, pos.shape[1] // natoms, 3]'], {}), '([nframes, pos.shape[1] // natoms, 3])\n', (18635, 18673), True, 'import numpy as np\n'), ((18732, 18746), 'builtins.range', 'range', (['nframes'], {}), '(nframes)\n', (18737, 18746), False, 'from builtins import range\n'), ((19363, 19384), 'LLC_Membranes.llclib.topology.Residue', 'topology.Residue', (['res'], {}), '(res)\n', (19379, 19384), False, 'from LLC_Membranes.llclib import file_rw, transform, topology\n'), ((20909, 20930), 'numpy.zeros', 'np.zeros', (['[nT, nbins]'], {}), '([nT, nbins])\n', (20917, 20930), True, 'import numpy as np\n'), ((21590, 21607), 'numpy.zeros', 'np.zeros', (['[nbins]'], {}), '([nbins])\n', (21598, 21607), True, 'import numpy as np\n'), ((21621, 21633), 'builtins.range', 'range', (['nbins'], {}), '(nbins)\n', (21626, 21633), False, 'from builtins import range\n'), ((22633, 22656), 'numpy.zeros', 'np.zeros', (['[nT, nsolute]'], {}), '([nT, nsolute])\n', (22641, 22656), True, 'import numpy as np\n'), ((23960, 23991), 'numpy.zeros', 'np.zeros', (['[spline.shape[0] + 1]'], {}), '([spline.shape[0] + 1])\n', (23968, 23991), True, 'import numpy as np\n'), ((24422, 24451), 'numpy.digitize', 'np.digitize', (['com[:, 2]', 'edges'], {}), '(com[:, 2], edges)\n', (24433, 24451), True, 'import numpy as np\n'), ((24547, 24585), 'numpy.where', 'np.where', (['(zbins == 0)', '(zbins + 1)', 'zbins'], {}), '(zbins == 0, zbins + 1, zbins)\n', (24555, 24585), True, 'import numpy as np\n'), ((24598, 24645), 'numpy.where', 'np.where', (['(zbins == edges.size)', '(zbins - 1)', 'zbins'], {}), '(zbins == edges.size, zbins - 1, zbins)\n', (24606, 24645), True, 'import numpy as np\n'), ((24658, 24716), 'numpy.linalg.norm', 'np.linalg.norm', (['(com[:, :2] - spline[zbins - 1, :2])'], {'axis': '(1)'}), '(com[:, :2] - spline[zbins - 1, :2], axis=1)\n', (24672, 24716), True, 'import numpy as np\n'), ((25392, 25405), 'numpy.copy', 'np.copy', (['dist'], {}), '(dist)\n', (25399, 25405), True, 'import numpy as np\n'), ((25418, 25442), 'numpy.arcsin', 'np.arcsin', (['(y_box / x_box)'], {}), '(y_box / x_box)\n', (25427, 25442), True, 'import numpy as np\n'), ((28585, 28625), 'numpy.zeros', 'np.zeros', (['[nT, com.shape[1]]'], {'dtype': 'bool'}), '([nT, com.shape[1]], dtype=bool)\n', (28593, 28625), True, 'import numpy as np\n'), ((30699, 30721), 'numpy.arcsin', 'np.arcsin', (['(ybox / xbox)'], {}), '(ybox / xbox)\n', (30708, 30721), True, 'import numpy as np\n'), ((30788, 30801), 'numpy.tan', 'np.tan', (['angle'], {}), '(angle)\n', (30794, 30801), True, 'import numpy as np\n'), ((31626, 31672), 'numpy.concatenate', 'np.concatenate', (['(xy, z[:, np.newaxis])'], {'axis': '(1)'}), '((xy, z[:, np.newaxis]), axis=1)\n', (31640, 31672), True, 'import numpy as np\n'), ((32259, 32294), 'numpy.linalg.norm', 'np.linalg.norm', (['box_vectors'], {'axis': '(2)'}), '(box_vectors, axis=2)\n', (32273, 32294), True, 'import numpy as np\n'), ((32654, 32679), 'builtins.range', 'range', (['locations.shape[0]'], {}), '(locations.shape[0])\n', (32659, 32679), False, 'from builtins import range\n'), ((32992, 33038), 'numpy.zeros', 'np.zeros', (['[x.size - 1, y.size - 1, z.size - 1]'], {}), '([x.size - 1, y.size - 1, z.size - 1])\n', (33000, 33038), True, 'import numpy as np\n'), ((768, 789), 'matplotlib.path.Path', 'mplPath.Path', (['corners'], {}), '(corners)\n', (780, 789), True, 'import matplotlib.path as mplPath\n'), ((1321, 1335), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (1329, 1335), True, 'import numpy as np\n'), ((1352, 1366), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (1360, 1366), True, 'import numpy as np\n'), ((1383, 1397), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (1391, 1397), True, 'import numpy as np\n'), ((1418, 1432), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (1426, 1432), True, 'import numpy as np\n'), ((1450, 1459), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (1455, 1459), False, 'from builtins import range\n'), ((5260, 5304), 'pymbar.timeseries.detectEquilibration', 'timeseries.detectEquilibration', (['box[:, 2, 2]'], {}), '(box[:, 2, 2])\n', (5290, 5304), False, 'from pymbar import timeseries\n'), ((5800, 5814), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (5808, 5814), True, 'import numpy as np\n'), ((5833, 5847), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (5841, 5847), True, 'import numpy as np\n'), ((5864, 5878), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (5872, 5878), True, 'import numpy as np\n'), ((5896, 5905), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (5901, 5905), False, 'from builtins import range\n'), ((6267, 6281), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (6275, 6281), True, 'import numpy as np\n'), ((6298, 6312), 'numpy.zeros', 'np.zeros', (['[nT]'], {}), '([nT])\n', (6306, 6312), True, 'import numpy as np\n'), ((6330, 6339), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (6335, 6339), False, 'from builtins import range\n'), ((10632, 10651), 'numpy.shape', 'np.shape', (['p_centers'], {}), '(p_centers)\n', (10640, 10651), True, 'import numpy as np\n'), ((10825, 10880), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_centers[:, 0, i] - p_centers[:, 1, i])'], {}), '(p_centers[:, 0, i] - p_centers[:, 1, i])\n', (10839, 10880), True, 'import numpy as np\n'), ((10902, 10957), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_centers[:, 0, i] - p_centers[:, 2, i])'], {}), '(p_centers[:, 0, i] - p_centers[:, 2, i])\n', (10916, 10957), True, 'import numpy as np\n'), ((10979, 11034), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_centers[:, 0, i] - p_centers[:, 3, i])'], {}), '(p_centers[:, 0, i] - p_centers[:, 3, i])\n', (10993, 11034), True, 'import numpy as np\n'), ((11056, 11111), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_centers[:, 1, i] - p_centers[:, 2, i])'], {}), '(p_centers[:, 1, i] - p_centers[:, 2, i])\n', (11070, 11111), True, 'import numpy as np\n'), ((11133, 11188), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_centers[:, 1, i] - p_centers[:, 3, i])'], {}), '(p_centers[:, 1, i] - p_centers[:, 3, i])\n', (11147, 11188), True, 'import numpy as np\n'), ((11210, 11265), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_centers[:, 2, i] - p_centers[:, 3, i])'], {}), '(p_centers[:, 2, i] - p_centers[:, 3, i])\n', (11224, 11265), True, 'import numpy as np\n'), ((11881, 11890), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (11886, 11890), False, 'from builtins import range\n'), ((11910, 11927), 'builtins.range', 'range', (['atom_ppore'], {}), '(atom_ppore)\n', (11915, 11927), False, 'from builtins import range\n'), ((12409, 12422), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (12414, 12422), False, 'from builtins import range\n'), ((14433, 14462), 'LLC_Membranes.llclib.file_rw.load_object', 'file_rw.load_object', (['savename'], {}), '(savename)\n', (14452, 14462), False, 'from LLC_Membranes.llclib import file_rw, transform, topology\n'), ((18765, 18784), 'builtins.range', 'range', (['com.shape[1]'], {}), '(com.shape[1])\n', (18770, 18784), False, 'from builtins import range\n'), ((20972, 20981), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (20977, 20981), False, 'from builtins import range\n'), ((21017, 21029), 'builtins.range', 'range', (['pores'], {}), '(pores)\n', (21022, 21029), False, 'from builtins import range\n'), ((22681, 22690), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (22686, 22690), False, 'from builtins import range\n'), ((22722, 22748), 'numpy.zeros', 'np.zeros', (['[nsolute, pores]'], {}), '([nsolute, pores])\n', (22730, 22748), True, 'import numpy as np\n'), ((22766, 22778), 'builtins.range', 'range', (['pores'], {}), '(pores)\n', (22771, 22778), False, 'from builtins import range\n'), ((25667, 25724), 'numpy.where', 'np.where', (['(d[:, 0] > 0.5 * x_box)', '(d[:, 0] - x_box)', 'd[:, 0]'], {}), '(d[:, 0] > 0.5 * x_box, d[:, 0] - x_box, d[:, 0])\n', (25675, 25724), True, 'import numpy as np\n'), ((25741, 25799), 'numpy.where', 'np.where', (['(d[:, 0] < -0.5 * x_box)', '(d[:, 0] + x_box)', 'd[:, 0]'], {}), '(d[:, 0] < -0.5 * x_box, d[:, 0] + x_box, d[:, 0])\n', (25749, 25799), True, 'import numpy as np\n'), ((26199, 26256), 'numpy.where', 'np.where', (['(d[:, 2] > 0.5 * z_box)', '(d[:, 2] - z_box)', 'd[:, 2]'], {}), '(d[:, 2] > 0.5 * z_box, d[:, 2] - z_box, d[:, 2])\n', (26207, 26256), True, 'import numpy as np\n'), ((26273, 26331), 'numpy.where', 'np.where', (['(d[:, 2] < -0.5 * z_box)', '(d[:, 2] + z_box)', 'd[:, 2]'], {}), '(d[:, 2] < -0.5 * z_box, d[:, 2] + z_box, d[:, 2])\n', (26281, 26331), True, 'import numpy as np\n'), ((28741, 28750), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (28746, 28750), False, 'from builtins import range\n'), ((31541, 31572), 'numpy.where', 'np.where', (['(z > zbox)', '(z - zbox)', 'z'], {}), '(z > zbox, z - zbox, z)\n', (31549, 31572), True, 'import numpy as np\n'), ((31585, 31613), 'numpy.where', 'np.where', (['(z < 0)', '(z + zbox)', 'z'], {}), '(z < 0, z + zbox, z)\n', (31593, 31613), True, 'import numpy as np\n'), ((32735, 32811), 'numpy.where', 'np.where', (['(locations[it, ...] < L)', 'locations[it, ...]', '(locations[it, ...] - L)'], {}), '(locations[it, ...] < L, locations[it, ...], locations[it, ...] - L)\n', (32743, 32811), True, 'import numpy as np\n'), ((32875, 32952), 'numpy.where', 'np.where', (['(locations[it, ...] > zv)', 'locations[it, ...]', '(locations[it, ...] + L)'], {}), '(locations[it, ...] > zv, locations[it, ...], locations[it, ...] + L)\n', (32883, 32952), True, 'import numpy as np\n'), ((33066, 33075), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (33071, 33075), False, 'from builtins import range\n'), ((33113, 33166), 'numpy.histogramdd', 'np.histogramdd', (['locations[frame, ...]'], {'bins': '(x, y, z)'}), '(locations[frame, ...], bins=(x, y, z))\n', (33127, 33166), True, 'import numpy as np\n'), ((1146, 1162), 'numpy.asarray', 'np.asarray', (['traj'], {}), '(traj)\n', (1156, 1162), True, 'import numpy as np\n'), ((1954, 1971), 'mdtraj.load', 'md.load', (['filename'], {}), '(filename)\n', (1961, 1971), True, 'import mdtraj as md\n'), ((2371, 2404), 'numpy.zeros', 'np.zeros', (['[2, grid_res, grid_res]'], {}), '([2, grid_res, grid_res])\n', (2379, 2404), True, 'import numpy as np\n'), ((2457, 2500), 'numpy.linalg.norm', 'np.linalg.norm', (['[dims[0], dims[3], dims[4]]'], {}), '([dims[0], dims[3], dims[4]])\n', (2471, 2500), True, 'import numpy as np\n'), ((2520, 2563), 'numpy.linalg.norm', 'np.linalg.norm', (['[dims[1], dims[5], dims[6]]'], {}), '([dims[1], dims[5], dims[6]])\n', (2534, 2563), True, 'import numpy as np\n'), ((2653, 2668), 'builtins.range', 'range', (['grid_res'], {}), '(grid_res)\n', (2658, 2668), False, 'from builtins import range\n'), ((2867, 2893), 'numpy.zeros', 'np.zeros', (['[nregions, 4, 2]'], {}), '([nregions, 4, 2])\n', (2875, 2893), True, 'import numpy as np\n'), ((2915, 2935), 'numpy.zeros', 'np.zeros', (['[nregions]'], {}), '([nregions])\n', (2923, 2935), True, 'import numpy as np\n'), ((2956, 2976), 'numpy.zeros', 'np.zeros', (['[nregions]'], {}), '([nregions])\n', (2964, 2976), True, 'import numpy as np\n'), ((2998, 3018), 'numpy.zeros', 'np.zeros', (['[nregions]'], {}), '([nregions])\n', (3006, 3018), True, 'import numpy as np\n'), ((3041, 3060), 'builtins.range', 'range', (['(grid_res - 1)'], {}), '(grid_res - 1)\n', (3046, 3060), False, 'from builtins import range\n'), ((4017, 4034), 'numpy.zeros', 'np.zeros', (['[nboot]'], {}), '([nboot])\n', (4025, 4034), True, 'import numpy as np\n'), ((4054, 4071), 'numpy.zeros', 'np.zeros', (['[nboot]'], {}), '([nboot])\n', (4062, 4071), True, 'import numpy as np\n'), ((4094, 4106), 'builtins.range', 'range', (['nboot'], {}), '(nboot)\n', (4099, 4106), False, 'from builtins import range\n'), ((4302, 4315), 'numpy.mean', 'np.mean', (['vmax'], {}), '(vmax)\n', (4309, 4315), True, 'import numpy as np\n'), ((4336, 4349), 'numpy.mean', 'np.mean', (['vmin'], {}), '(vmin)\n', (4343, 4349), True, 'import numpy as np\n'), ((4370, 4390), 'numpy.mean', 'np.mean', (['(vmax - vmin)'], {}), '(vmax - vmin)\n', (4377, 4390), True, 'import numpy as np\n'), ((4415, 4434), 'numpy.std', 'np.std', (['(vmax - vmin)'], {}), '(vmax - vmin)\n', (4421, 4434), True, 'import numpy as np\n'), ((5927, 5955), 'numpy.linalg.norm', 'np.linalg.norm', (['box[t, 0, :]'], {}), '(box[t, 0, :])\n', (5941, 5955), True, 'import numpy as np\n'), ((5976, 6004), 'numpy.linalg.norm', 'np.linalg.norm', (['box[t, 1, :]'], {}), '(box[t, 1, :])\n', (5990, 6004), True, 'import numpy as np\n'), ((6104, 6116), 'builtins.range', 'range', (['ncomp'], {}), '(ncomp)\n', (6109, 6116), False, 'from builtins import range\n'), ((6235, 6248), 'numpy.ones', 'np.ones', (['[nT]'], {}), '([nT])\n', (6242, 6248), True, 'import numpy as np\n'), ((6361, 6389), 'numpy.linalg.norm', 'np.linalg.norm', (['box[t, 0, :]'], {}), '(box[t, 0, :])\n', (6375, 6389), True, 'import numpy as np\n'), ((6410, 6438), 'numpy.linalg.norm', 'np.linalg.norm', (['box[t, 1, :]'], {}), '(box[t, 1, :])\n', (6424, 6438), True, 'import numpy as np\n'), ((8851, 8876), 'numpy.zeros', 'np.zeros', (['[nT, npores, 2]'], {}), '([nT, npores, 2])\n', (8859, 8876), True, 'import numpy as np\n'), ((8899, 8908), 'builtins.range', 'range', (['nT'], {}), '(nT)\n', (8904, 8908), False, 'from builtins import range\n'), ((11950, 11963), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (11955, 11963), False, 'from builtins import range\n'), ((12450, 12477), 'numpy.mean', 'np.mean', (['deviation[t, p, :]'], {}), '(deviation[t, p, :])\n', (12457, 12477), True, 'import numpy as np\n'), ((14973, 14998), 'numpy.zeros', 'np.zeros', (['[nframes, 4, 2]'], {}), '([nframes, 4, 2])\n', (14981, 14998), True, 'import numpy as np\n'), ((15380, 15394), 'builtins.range', 'range', (['nframes'], {}), '(nframes)\n', (15385, 15394), False, 'from builtins import range\n'), ((15505, 15543), 'numpy.arcsin', 'np.arcsin', (['(box[:, 1, 1] / box[:, 0, 0])'], {}), '(box[:, 1, 1] / box[:, 0, 0])\n', (15514, 15543), True, 'import numpy as np\n'), ((15627, 15679), 'numpy.where', 'np.where', (['(box[:, 1, 0] < 0)', '(angle + np.pi / 2)', 'angle'], {}), '(box[:, 1, 0] < 0, angle + np.pi / 2, angle)\n', (15635, 15679), True, 'import numpy as np\n'), ((15855, 15894), 'numpy.zeros', 'np.zeros', (['[nframes, npores, npoints, 3]'], {}), '([nframes, npores, npoints, 3])\n', (15863, 15894), True, 'import numpy as np\n'), ((15917, 15953), 'numpy.zeros', 'np.zeros', (['[nframes, npores, npoints]'], {}), '([nframes, npores, npoints])\n', (15925, 15953), True, 'import numpy as np\n'), ((21300, 21351), 'numpy.histogram', 'np.histogram', (['distances'], {'bins': 'nbins', 'range': '(0, cut)'}), '(distances, bins=nbins, range=(0, cut))\n', (21312, 21351), True, 'import numpy as np\n'), ((25545, 25560), 'numpy.abs', 'np.abs', (['d[:, 0]'], {}), '(d[:, 0])\n', (25551, 25560), True, 'import numpy as np\n'), ((25843, 25858), 'numpy.abs', 'np.abs', (['d[:, 1]'], {}), '(d[:, 1])\n', (25849, 25858), True, 'import numpy as np\n'), ((26151, 26166), 'numpy.abs', 'np.abs', (['d[:, 2]'], {}), '(d[:, 2])\n', (26157, 26166), True, 'import numpy as np\n'), ((29538, 29576), 'numpy.where', 'np.where', (['(zbins == 0)', '(zbins + 1)', 'zbins'], {}), '(zbins == 0, zbins + 1, zbins)\n', (29546, 29576), True, 'import numpy as np\n'), ((29597, 29642), 'numpy.where', 'np.where', (['(zbins == npts + 1)', '(zbins - 1)', 'zbins'], {}), '(zbins == npts + 1, zbins - 1, zbins)\n', (29605, 29642), True, 'import numpy as np\n'), ((29778, 29791), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (29783, 29791), False, 'from builtins import range\n'), ((29995, 30008), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (30000, 30008), False, 'from builtins import range\n'), ((31454, 31463), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (31460, 31463), True, 'import numpy as np\n'), ((31474, 31483), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (31480, 31483), True, 'import numpy as np\n'), ((3087, 3106), 'builtins.range', 'range', (['(grid_res - 1)'], {}), '(grid_res - 1)\n', (3092, 3106), False, 'from builtins import range\n'), ((4131, 4155), 'random.randint', 'randint', (['(0)', '(nregions - 1)'], {}), '(0, nregions - 1)\n', (4138, 4155), False, 'from random import randint\n'), ((4179, 4203), 'random.randint', 'randint', (['(0)', '(nregions - 1)'], {}), '(0, nregions - 1)\n', (4186, 4203), False, 'from random import randint\n'), ((8758, 8771), 'numpy.shape', 'np.shape', (['pos'], {}), '(pos)\n', (8766, 8771), True, 'import numpy as np\n'), ((10007, 10028), 'numpy.zeros', 'np.zeros', (['[npores, 2]'], {}), '([npores, 2])\n', (10015, 10028), True, 'import numpy as np\n'), ((10051, 10064), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (10056, 10064), False, 'from builtins import range\n'), ((12002, 12068), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos[f, j * atom_ppore + i, :2] - pcenters[f, j, :])'], {}), '(pos[f, j * atom_ppore + i, :2] - pcenters[f, j, :])\n', (12016, 12068), True, 'import numpy as np\n'), ((12975, 12988), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (12981, 12988), True, 'import numpy as np\n'), ((12996, 13009), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (13002, 13009), True, 'import numpy as np\n'), ((13091, 13104), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (13097, 13104), True, 'import numpy as np\n'), ((13112, 13125), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (13118, 13125), True, 'import numpy as np\n'), ((15131, 15170), 'numpy.vstack', 'np.vstack', (['(box[:, 1, 0], box[:, 1, 1])'], {}), '((box[:, 1, 0], box[:, 1, 1]))\n', (15140, 15170), True, 'import numpy as np\n'), ((15982, 15996), 'builtins.range', 'range', (['nframes'], {}), '(nframes)\n', (15987, 15996), False, 'from builtins import range\n'), ((16044, 16057), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (16049, 16057), False, 'from builtins import range\n'), ((18934, 18951), 'numpy.sum', 'np.sum', (['w'], {'axis': '(0)'}), '(w, axis=0)\n', (18940, 18951), True, 'import numpy as np\n'), ((21205, 21268), 'numpy.linalg.norm', 'np.linalg.norm', (['(coord[t, :, :2] - pore_centers[t, p, :])'], {'axis': '(1)'}), '(coord[t, :, :2] - pore_centers[t, p, :], axis=1)\n', (21219, 21268), True, 'import numpy as np\n'), ((22952, 23015), 'numpy.linalg.norm', 'np.linalg.norm', (['(coord[t, :, :2] - pore_centers[t, p, :])'], {'axis': '(1)'}), '(coord[t, :, :2] - pore_centers[t, p, :], axis=1)\n', (22966, 23015), True, 'import numpy as np\n'), ((23380, 23393), 'numpy.argsort', 'np.argsort', (['r'], {}), '(r)\n', (23390, 23393), True, 'import numpy as np\n'), ((26004, 26017), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (26010, 26017), True, 'import numpy as np\n'), ((26085, 26098), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (26091, 26098), True, 'import numpy as np\n'), ((29278, 29309), 'numpy.where', 'np.where', (['(z > zbox)', '(z - zbox)', 'z'], {}), '(z > zbox, z - zbox, z)\n', (29286, 29309), True, 'import numpy as np\n'), ((29330, 29358), 'numpy.where', 'np.where', (['(z < 0)', '(z + zbox)', 'z'], {}), '(z < 0, z + zbox, z)\n', (29338, 29358), True, 'import numpy as np\n'), ((29395, 29425), 'numpy.linspace', 'np.linspace', (['(0)', 'zbox', '(npts + 1)'], {}), '(0, zbox, npts + 1)\n', (29406, 29425), True, 'import numpy as np\n'), ((29813, 29881), 'numpy.linalg.norm', 'np.linalg.norm', (['(xy_positions - pore_centers[i, p, zbins, :2])'], {'axis': '(1)'}), '(xy_positions - pore_centers[i, p, zbins, :2], axis=1)\n', (29827, 29881), True, 'import numpy as np\n'), ((30030, 30090), 'numpy.linalg.norm', 'np.linalg.norm', (['(xy_positions - pore_centers[i, p, :])'], {'axis': '(1)'}), '(xy_positions - pore_centers[i, p, :], axis=1)\n', (30044, 30090), True, 'import numpy as np\n'), ((31012, 31025), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (31018, 31025), True, 'import numpy as np\n'), ((31086, 31099), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (31092, 31099), True, 'import numpy as np\n'), ((2699, 2729), 'numpy.linspace', 'np.linspace', (['(0)', 'xbox', 'grid_res'], {}), '(0, xbox, grid_res)\n', (2710, 2729), True, 'import numpy as np\n'), ((2798, 2828), 'numpy.linspace', 'np.linspace', (['(0)', 'ybox', 'grid_res'], {}), '(0, ybox, grid_res)\n', (2809, 2828), True, 'import numpy as np\n'), ((2829, 2843), 'numpy.sin', 'np.sin', (['yangle'], {}), '(yangle)\n', (2835, 2843), True, 'import numpy as np\n'), ((3542, 3572), 'matplotlib.path.Path', 'mplPath.Path', (['corners[r, :, :]'], {}), '(corners[r, :, :])\n', (3554, 3572), True, 'import matplotlib.path as mplPath\n'), ((3826, 3835), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (3832, 3835), True, 'import numpy as np\n'), ((3867, 3876), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (3873, 3876), True, 'import numpy as np\n'), ((8800, 8813), 'numpy.shape', 'np.shape', (['pos'], {}), '(pos)\n', (8808, 8813), True, 'import numpy as np\n'), ((9037, 9064), 'numpy.full', 'np.full', (['pos.shape[1]', '(True)'], {}), '(pos.shape[1], True)\n', (9044, 9064), True, 'import numpy as np\n'), ((9249, 9262), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (9254, 9262), False, 'from builtins import range\n'), ((9764, 9777), 'builtins.range', 'range', (['npores'], {}), '(npores)\n', (9769, 9777), False, 'from builtins import range\n'), ((10091, 10134), 'builtins.range', 'range', (['(comp_ppore * j)', '(comp_ppore * (j + 1))'], {}), '(comp_ppore * j, comp_ppore * (j + 1))\n', (10096, 10134), False, 'from builtins import range\n'), ((14663, 14682), 'numpy.shape', 'np.shape', (['pos.shape'], {}), '(pos.shape)\n', (14671, 14682), True, 'import numpy as np\n'), ((15422, 15445), 'matplotlib.path.Path', 'mplPath.Path', (['v[t, ...]'], {}), '(v[t, ...])\n', (15434, 15445), True, 'import matplotlib.path as mplPath\n'), ((16568, 16606), 'numpy.histogram', 'np.histogram', (['pore[:, 2]'], {'bins': 'npoints'}), '(pore[:, 2], bins=npoints)\n', (16580, 16606), True, 'import numpy as np\n'), ((16661, 16690), 'numpy.digitize', 'np.digitize', (['pore[:, 2]', 'bins'], {}), '(pore[:, 2], bins)\n', (16672, 16690), True, 'import numpy as np\n'), ((16858, 16879), 'builtins.range', 'range', (['(1)', '(npoints + 1)'], {}), '(1, npoints + 1)\n', (16863, 16879), False, 'from builtins import range\n'), ((18114, 18167), 'LLC_Membranes.llclib.file_rw.save_object', 'file_rw.save_object', (['(centers, bin_centers)', 'savename'], {}), '((centers, bin_centers), savename)\n', (18133, 18167), False, 'from LLC_Membranes.llclib import file_rw, transform, topology\n'), ((25956, 25987), 'numpy.where', 'np.where', (['(d[:, 1] > 0.5 * y_box)'], {}), '(d[:, 1] > 0.5 * y_box)\n', (25964, 25987), True, 'import numpy as np\n'), ((26036, 26068), 'numpy.where', 'np.where', (['(d[:, 1] < -0.5 * y_box)'], {}), '(d[:, 1] < -0.5 * y_box)\n', (26044, 26068), True, 'import numpy as np\n'), ((29183, 29192), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (29189, 29192), True, 'import numpy as np\n'), ((29203, 29212), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (29209, 29212), True, 'import numpy as np\n'), ((30969, 30994), 'numpy.where', 'np.where', (['(xy[:, 1] > ybox)'], {}), '(xy[:, 1] > ybox)\n', (30977, 30994), True, 'import numpy as np\n'), ((31044, 31066), 'numpy.where', 'np.where', (['(xy[:, 1] < 0)'], {}), '(xy[:, 1] < 0)\n', (31052, 31066), True, 'import numpy as np\n'), ((31160, 31200), 'numpy.where', 'np.where', (['(xy[:, 0] - xy[:, 1] / m < -tol)'], {}), '(xy[:, 0] - xy[:, 1] / m < -tol)\n', (31168, 31200), True, 'import numpy as np\n'), ((31232, 31275), 'numpy.where', 'np.where', (['(xy[:, 0] - (xy[:, 1] - b) / m > 0)'], {}), '(xy[:, 0] - (xy[:, 1] - b) / m > 0)\n', (31240, 31275), True, 'import numpy as np\n'), ((31298, 31331), 'numpy.where', 'np.where', (['(xy[:, 0] < xy[:, 1] / m)'], {}), '(xy[:, 0] < xy[:, 1] / m)\n', (31306, 31331), True, 'import numpy as np\n'), ((31360, 31399), 'numpy.where', 'np.where', (['(xy[:, 0] > (xy[:, 1] - b) / m)'], {}), '(xy[:, 0] > (xy[:, 1] - b) / m)\n', (31368, 31399), True, 'import numpy as np\n'), ((33312, 33326), 'numpy.fft.fftn', 'np.fft.fftn', (['H'], {}), '(H)\n', (33323, 33326), True, 'import numpy as np\n'), ((9094, 9140), 'numpy.where', 'np.where', (['(pos[i, :, 2] > box[i, 2, 2] + buffer)'], {}), '(pos[i, :, 2] > box[i, 2, 2] + buffer)\n', (9102, 9140), True, 'import numpy as np\n'), ((9178, 9209), 'numpy.where', 'np.where', (['(pos[i, :, 2] < buffer)'], {}), '(pos[i, :, 2] < buffer)\n', (9186, 9209), True, 'import numpy as np\n'), ((9439, 9482), 'builtins.range', 'range', (['(comp_ppore * j)', '(comp_ppore * (j + 1))'], {}), '(comp_ppore * j, comp_ppore * (j + 1))\n', (9444, 9482), False, 'from builtins import range\n'), ((15284, 15310), 'numpy.mean', 'np.mean', (['v[..., 0]'], {'axis': '(1)'}), '(v[..., 0], axis=1)\n', (15291, 15310), True, 'import numpy as np\n'), ((15312, 15338), 'numpy.mean', 'np.mean', (['v[..., 1]'], {'axis': '(1)'}), '(v[..., 1], axis=1)\n', (15319, 15338), True, 'import numpy as np\n'), ((15340, 15357), 'numpy.zeros', 'np.zeros', (['nframes'], {}), '(nframes)\n', (15348, 15357), True, 'import numpy as np\n'), ((16369, 16432), 'numpy.where', 'np.where', (['(pore[:, 2] < 0)', '(pore[:, 2] + box[t, 2, 2])', 'pore[:, 2]'], {}), '(pore[:, 2] < 0, pore[:, 2] + box[t, 2, 2], pore[:, 2])\n', (16377, 16432), True, 'import numpy as np\n'), ((16466, 16540), 'numpy.where', 'np.where', (['(pore[:, 2] > box[t, 2, 2])', '(pore[:, 2] - box[t, 2, 2])', 'pore[:, 2]'], {}), '(pore[:, 2] > box[t, 2, 2], pore[:, 2] - box[t, 2, 2], pore[:, 2])\n', (16474, 16540), True, 'import numpy as np\n'), ((17075, 17139), 'LLC_Membranes.llclib.transform.translate', 'transform.translate', (['pore[atom_indices, :]', 'before', 'center[t, :]'], {}), '(pore[atom_indices, :], before, center[t, :])\n', (17094, 17139), False, 'from LLC_Membranes.llclib import file_rw, transform, topology\n'), ((17212, 17233), 'builtins.range', 'range', (['shift.shape[0]'], {}), '(shift.shape[0])\n', (17217, 17233), False, 'from builtins import range\n'), ((17626, 17670), 'LLC_Membranes.llclib.transform.translate', 'transform.translate', (['c', 'center[t, :]', 'before'], {}), '(c, center[t, :], before)\n', (17645, 17670), False, 'from LLC_Membranes.llclib import file_rw, transform, topology\n'), ((3711, 3730), 'numpy.where', 'np.where', (['contained'], {}), '(contained)\n', (3719, 3730), True, 'import numpy as np\n'), ((15232, 15251), 'numpy.zeros', 'np.zeros', (['[nframes]'], {}), '([nframes])\n', (15240, 15251), True, 'import numpy as np\n'), ((16203, 16221), 'numpy.min', 'np.min', (['pore[:, 2]'], {}), '(pore[:, 2])\n', (16209, 16221), True, 'import numpy as np\n'), ((16229, 16247), 'numpy.max', 'np.max', (['pore[:, 2]'], {}), '(pore[:, 2])\n', (16235, 16247), True, 'import numpy as np\n'), ((16816, 16830), 'builtins.range', 'range', (['npoints'], {}), '(npoints)\n', (16821, 16830), False, 'from builtins import range\n'), ((16917, 16947), 'numpy.where', 'np.where', (['(section_indices == l)'], {}), '(section_indices == l)\n', (16925, 16947), True, 'import numpy as np\n'), ((17555, 17577), 'numpy.mean', 'np.mean', (['shift'], {'axis': '(0)'}), '(shift, axis=0)\n', (17562, 17577), True, 'import numpy as np\n'), ((29906, 29922), 'numpy.where', 'np.where', (['(d <= r)'], {}), '(d <= r)\n', (29914, 29922), True, 'import numpy as np\n'), ((30115, 30131), 'numpy.where', 'np.where', (['(d <= r)'], {}), '(d <= r)\n', (30123, 30131), True, 'import numpy as np\n')] |
# Copyright (c) 2011-2020, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by <NAME>, <NAME>, and <NAME>
# e-mail: <EMAIL>
# LLNL-CODE-507071
# All rights reserved.
# This file is part of PDV. For details, see <URL describing code and
# how to download source>. Please also read "Additional BSD Notice".
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the disclaimer (as noted below)
# in the documentation and/or other materials provided with the
# distribution. Neither the name of the LLNS/LLNL nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
# LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# Additional BSD Notice
# 1. This notice is required to be provided under our contract with
# the U.S. Department of Energy (DOE). This work was produced at
# Lawrence Livermore National Laboratory under Contract
# No. DE-AC52-07NA27344 with the DOE.
# 2. Neither the United States Government nor Lawrence Livermore
# National Security, LLC nor any of their employees, makes any
# warranty, express or implied, or assumes any liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents
# that its use would not infringe privately-owned rights.
# 3. Also, reference herein to any specific commercial products,
# process, or services by trade name, trademark, manufacturer or
# otherwise does not necessarily constitute or imply its endorsement,
# recommendation, or favoring by the United States Government or
# Lawrence Livermore National Security, LLC. The views and opinions
# of authors expressed herein do not necessarily state or reflect
# those of the United States Government or Lawrence Livermore National
# Security, LLC, and shall not be used for advertising or product
# endorsement purposes.
import sys
import numpy as np
from scipy import interpolate
class Curve(object):
name = ''
filename = ''
plotname = ''
color = ''
edited = False
scatter = False
linespoints = False
linewidth = None
linestyle = '-'
drawstyle = 'default'
dashes = None
hidden = False
x = np.empty(0)
y = np.empty(0)
ebar = None #errorbar
erange = None #errorrange
marker = '.' # Use matplotlib markers when setting directly
markerstyle = None
markersize = 3
markerfacecolor = None
markeredgecolor = None
plotprecedence = 0
xlabel = ''
ylabel = ''
title = ''
legend_show = True
def __init__(self, filename='', name=''):
self.filename = filename
self.name = name
def __add__(a, b):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str(a.plotname + ' + ' + b.plotname + ' ').strip(' ')
ia, ib = getinterp(a, b)
if ia.x is not None and ib.x is not None:
c.x = ia.x
c.y = ia.y + ib.y
return c
def __sub__(a, b):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str(a.plotname + ' - ' + b.plotname + ' ').strip(' ')
ia, ib = getinterp(a, b)
if ia.x is not None and ib.x is not None:
c.x = ia.x
c.y = ia.y - ib.y
return c
def __mul__(a, b):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str(a.plotname + ' * ' + b.plotname + ' ').strip(' ')
ia, ib = getinterp(a, b)
if ia.x is not None and ib.x is not None:
c.x = ia.x
c.y = ia.y * ib.y
return c
def __div__(a, b):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str(a.plotname + ' / ' + b.plotname + ' ').strip(' ')
ia, ib = getinterp(a, b)
if ia.x is not None and ib.x is not None:
c.x = ia.x
zero_indices = np.where(ib.y == 0)
for idx in zero_indices:
ib.y[idx] = 0.000000001
c.y = ia.y / ib.y
for idx in zero_indices:
c.y[idx] = float(sys.maxsize)
return c
def __truediv__(a,b):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str(a.plotname + ' / ' + b.plotname + ' ').strip(' ')
ia, ib = getinterp(a, b)
if ia.x is not None and ib.x is not None:
c.x = ia.x
zero_indices = np.where(ib.y == 0)
for idx in zero_indices:
ib.y[idx] = 0.000000001
c.y = ia.y / ib.y
for idx in zero_indices:
c.y[idx] = float(sys.maxsize)
return c
def __pow__(a, b):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str(a.plotname + '^' + str(b)).strip(' ')
c.x = np.array(a.x)
c.y = np.power(a.y, b)
nans = np.isnan(c.y) #remove NaNs
c.x = c.x[~nans]
c.y = c.y[~nans]
return c
def __neg__(a):
c = Curve('', '')
c.drawstyle = a.drawstyle
c.plotname = str('-' + a.plotname)
c.x = np.array(a.x)
c.y = np.array(-a.y)
return c
##return a new copy of the curve object##
def copy(self):
c = Curve(self.filename, self.name)
c.plotname = self.plotname
c.x = np.array(self.x)
c.y = np.array(self.y)
c.color = self.color
c.edited = self.edited
c.scatter = self.scatter
c.linespoints = self.linespoints
c.linewidth = self.linewidth
c.linestyle = self.linestyle
c.drawstyle = self.drawstyle
c.dashes = self.dashes
c.hidden = self.hidden
c.marker = self.marker
c.markerstyle = self.markerstyle
c.markersize = self.markersize
c.markeredgecolor = self.markeredgecolor
c.markerfacecolor = self.markerfacecolor
c.ebar = self.ebar
c.erange = self.erange
c.plotprecedence = self.plotprecedence
c.xlabel = self.xlabel
c.ylabel = self.ylabel
c.title = self.title
return c
##return a new normalized copy of the curve object##
def normalize(self):
c = self.copy()
norm = np.linalg.norm(c.y)
if norm == 0:
return c
c.y /= float(norm)
c.name = "Normalized %s" % self.plotname
return c
def getinterp(a, b, left=None, right=None, samples=100, match='domain'):
"""
Gets the interpolated and domain matched versions of the two curves.
:param a: Curve A
:type a: curve
:param b: Curve B
:type b: curve
:param left: Value to return for `x < a.x[0]`, default is `a.y[0]`.
:type left: float, optional
:param right: Value to return for `x > a.x[-1]`, default is `a.y[-1]`.
:type: right: float, optional
:param match {'domain','step'},optional: A string indicating how to interpolate the two curves
:type match: str
:returns: curve pair -- the interpolated and domain matched versions of a and b
"""
if match == 'domain':
ux = list(set(a.x).union(set(b.x))) #get union of xvals
ux.sort()
ia = a.copy()
ia.x = np.array(ux)
ia.y = np.interp(ux, a.x, a.y, left, right) # interpolate y vals
ib = Curve('', '')
ib.x = np.array(ux)
ib.y = np.interp(ux, b.x, b.y, left, right) # interpolate y vals
return ia, ib
elif match == 'step':
ax, step = np.linspace(min(a.x), max(a.x), num=samples, retstep=True)
bxsamples = int((max(b.x) - min(b.x)) / step)
if bxsamples < 1:
bxsamples = 1
bx = np.linspace(min(b.x), max(b.x), bxsamples)
ia = a.copy()
ia.x = ax
ia.y = np.interp(ax, a.x, a.y, left, right) # interpolate y vals
ib = Curve('', '')
ib.x = bx
ib.y = np.interp(bx, b.x, b.y, left, right) # interpolate y vals
return ia, ib
else:
raise ValueError("{} is not a supported option for match".format(match))
def interp1d(a, num=100, retstep=False):
"""
Gets the interpolated values of the curve with the specified number of samples.
:param a: Curve A
:type a: curve
:param num: Number of samples to generate. Default is 100. Must be non-negative.
:type: num: int, optional
:param retstep: return the spacing between samples
:type: retstep: bool, optional
:returns: ia: curve -- the interpolated and dimensions matched version of a
step: float, optional -- only returned if retstep is True. Size of the spacing between samples
"""
num = int(num)
f = interpolate.interp1d(a.x, a.y, kind='linear', bounds_error=False, fill_value=0)
ia = a.copy()
if retstep:
ia.x, step = np.linspace(min(a.x), max(a.x), num=num, retstep=True)
ia.y = f(ia.x)
return ia, step
else:
ia.x = np.linspace(min(a.x), max(a.x), num=num, retstep=False)
ia.y = f(ia.x)
return ia
def append(a, b):
"""
Merge curve a and curve b over the union of their domains. Where domains overlap, take
the average of the curve's y-values.
:param a: Curve A
:type a: curve
:param b: Curve B
:type b: curve
:return: a new curve resulting from the merging of curve a and curve b
"""
ux = list(set(a.x).union(set(b.x))) #get union of xvals
ux.sort()
aub = Curve('','')
aub.x = np.array(ux)
aub.y = np.zeros(len(aub.x))
for i in range(len(aub.x)):
xval = aub.x[i]
aidx = np.where(a.x == xval)[0]
bidx = np.where(b.x == xval)[0]
has_a = len(aidx) != 0
has_b = len(bidx) != 0
sum = float(0)
tot = 0
if has_a:
for idx in aidx:
sum += float(a.y[idx])
tot += 1
if has_b:
for idx in bidx:
sum += float(b.y[idx])
tot += 1
aub.y[i] = sum / float(tot)
return aub
| [
"numpy.power",
"numpy.where",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.isnan",
"numpy.empty",
"numpy.interp",
"numpy.linalg.norm"
] | [((3437, 3448), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3445, 3448), True, 'import numpy as np\n'), ((3457, 3468), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3465, 3468), True, 'import numpy as np\n'), ((9942, 10021), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['a.x', 'a.y'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(0)'}), "(a.x, a.y, kind='linear', bounds_error=False, fill_value=0)\n", (9962, 10021), False, 'from scipy import interpolate\n'), ((10740, 10752), 'numpy.array', 'np.array', (['ux'], {}), '(ux)\n', (10748, 10752), True, 'import numpy as np\n'), ((6073, 6086), 'numpy.array', 'np.array', (['a.x'], {}), '(a.x)\n', (6081, 6086), True, 'import numpy as np\n'), ((6101, 6117), 'numpy.power', 'np.power', (['a.y', 'b'], {}), '(a.y, b)\n', (6109, 6117), True, 'import numpy as np\n'), ((6133, 6146), 'numpy.isnan', 'np.isnan', (['c.y'], {}), '(c.y)\n', (6141, 6146), True, 'import numpy as np\n'), ((6376, 6389), 'numpy.array', 'np.array', (['a.x'], {}), '(a.x)\n', (6384, 6389), True, 'import numpy as np\n'), ((6404, 6418), 'numpy.array', 'np.array', (['(-a.y)'], {}), '(-a.y)\n', (6412, 6418), True, 'import numpy as np\n'), ((6604, 6620), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (6612, 6620), True, 'import numpy as np\n'), ((6635, 6651), 'numpy.array', 'np.array', (['self.y'], {}), '(self.y)\n', (6643, 6651), True, 'import numpy as np\n'), ((7513, 7532), 'numpy.linalg.norm', 'np.linalg.norm', (['c.y'], {}), '(c.y)\n', (7527, 7532), True, 'import numpy as np\n'), ((8481, 8493), 'numpy.array', 'np.array', (['ux'], {}), '(ux)\n', (8489, 8493), True, 'import numpy as np\n'), ((8509, 8545), 'numpy.interp', 'np.interp', (['ux', 'a.x', 'a.y', 'left', 'right'], {}), '(ux, a.x, a.y, left, right)\n', (8518, 8545), True, 'import numpy as np\n'), ((8610, 8622), 'numpy.array', 'np.array', (['ux'], {}), '(ux)\n', (8618, 8622), True, 'import numpy as np\n'), ((8638, 8674), 'numpy.interp', 'np.interp', (['ux', 'b.x', 'b.y', 'left', 'right'], {}), '(ux, b.x, b.y, left, right)\n', (8647, 8674), True, 'import numpy as np\n'), ((5152, 5171), 'numpy.where', 'np.where', (['(ib.y == 0)'], {}), '(ib.y == 0)\n', (5160, 5171), True, 'import numpy as np\n'), ((5678, 5697), 'numpy.where', 'np.where', (['(ib.y == 0)'], {}), '(ib.y == 0)\n', (5686, 5697), True, 'import numpy as np\n'), ((9043, 9079), 'numpy.interp', 'np.interp', (['ax', 'a.x', 'a.y', 'left', 'right'], {}), '(ax, a.x, a.y, left, right)\n', (9052, 9079), True, 'import numpy as np\n'), ((9163, 9199), 'numpy.interp', 'np.interp', (['bx', 'b.x', 'b.y', 'left', 'right'], {}), '(bx, b.x, b.y, left, right)\n', (9172, 9199), True, 'import numpy as np\n'), ((10859, 10880), 'numpy.where', 'np.where', (['(a.x == xval)'], {}), '(a.x == xval)\n', (10867, 10880), True, 'import numpy as np\n'), ((10899, 10920), 'numpy.where', 'np.where', (['(b.x == xval)'], {}), '(b.x == xval)\n', (10907, 10920), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 15:26:29 2015
@author: J-R
"""
import numpy as np
import astropy.io.fits as pf
from configobj import ConfigObj
import ipdb
import matplotlib.pyplot as plt
import medis.speckle_nulling.sn_hardware as hardware
import medis.speckle_nulling.dm_functions as DM
import flatmapfunctions as fmap
import detect_speckles
from validate import Validator
import medis.speckle_nulling.sn_preprocessing as pre
from shift import shift
from Estimation_1ref import Estimation_1ref
from SaveFits import SaveFits
if __name__ == "__main__":
ampl = 1. # amplitude
nb_im = 1. #(attention si > à 1 il faut faire une medianne du cube)
ray = 22 # en pixel
Alpha = 5. # en degré
Xi = 66. # en pixel
Alpha = Alpha/180.*np.pi
nb_ite = 5 # number of iteration
exptime = 4
dirwr = '/data1/home/aousr/Desktop/speckle_nulling/SCC/'
# We read a cube of frequency
#hdulist = pf.open(dirwr + 'Freq_DM_map_cube.fits')
hdulist = pf.open(dirwr + 'Freq_start.fits')
Cube_freq = hdulist[0].data
hdulist.close()
# Read the Commande Matrix
hdulist = pf.open(dirwr + 'Mat_com.fits')
Mat_com = hdulist[0].data
hdulist.close()
# Read image
hdulist = pf.open( dirwr + 'Filtre_Image.fits')
Filtre_Image = hdulist[0].data
hdulist.close()
Filtre_Image = shift(Filtre_Image,-128,-128)
Filtre_Image = Filtre_Image[0:256,0:256]
# Read Filtre Fourier
hdulist = pf.open( dirwr + 'Filtre_Fourier.fits')
Filtre_Fourier = hdulist[0].data
hdulist.close()
# fichier de parametres pour PK3 et Pharo
hardwareconfigfile = 'speckle_instruments_scc.ini'
# initialisation de Pharo
# Real thing
pharo = hardware.PHARO_COM('PHARO', configfile = hardwareconfigfile)
# LOAD P3K HERE
# initialisation de P3k
p3k = hardware.P3K_COM('P3K_COM', configfile = hardwareconfigfile)
# LOAD and APPLY a flatmap
initial_flatmap = np.zeros((66, 66))
initial_flatmap = p3k.grab_current_flatmap()
# We apply to the DM the initial flatmap
status = p3k.load_new_flatmap(fmap.convert_hodm_telem(initial_flatmap))
#READ Image before any correction
im_flat = np.zeros((1024, 1024))
im_flat = pharo.take_src_return_imagedata(exptime) #works, tested
# We initialized the DM Shape to apply
phdm = initial_flatmap
# Correction Loop
ite = 0
while ite < nb_ite:
# we apply phdm to the DM and we take image(s)
status = p3k.load_new_flatmap(fmap.convert_hodm_telem(phdm))
w = 0
while w < nb_im:
im = pharo.take_src_return_imagedata(exptime)
im = im[:,:, np.newaxis]
if w == 0:
cube_im = im
w+=1
else:
cube_im = np.concatenate((cube_im,im), axis = 2)
w+=1
SCC_Image = shift(Cube,-128,-128)
#SaveFits(dirwr + "Mat_int.fits", Mat_int)
#pf.writeto("test"".fits", SCC_Image)
# from the image we obtain an estimation
Estimateur = Estimation_1ref(SCC_Image,Filtre_Image,Filtre_Fourier,ray,Alpha,Xi)
# We multiply the estimation of the electric field by
# the command matrix to obtained a commande for the DM
Vec_com = np.dot(Mat_com,Estimateur)
# We built the phdm to apply
i = 0
while i < len(Cube_freq):
phdm += Cube_freq[i,:,:]*Vec_com[i]
i+=1
# We apply the initial flatmap to the DM
status = p3k.load_new_flatmap(fmap.convert_hodm_telem(initial_flatmap))
| [
"shift.shift",
"Estimation_1ref.Estimation_1ref",
"medis.speckle_nulling.sn_hardware.PHARO_COM",
"numpy.zeros",
"numpy.dot",
"flatmapfunctions.convert_hodm_telem",
"numpy.concatenate",
"astropy.io.fits.open",
"medis.speckle_nulling.sn_hardware.P3K_COM"
] | [((1017, 1051), 'astropy.io.fits.open', 'pf.open', (["(dirwr + 'Freq_start.fits')"], {}), "(dirwr + 'Freq_start.fits')\n", (1024, 1051), True, 'import astropy.io.fits as pf\n'), ((1150, 1181), 'astropy.io.fits.open', 'pf.open', (["(dirwr + 'Mat_com.fits')"], {}), "(dirwr + 'Mat_com.fits')\n", (1157, 1181), True, 'import astropy.io.fits as pf\n'), ((1272, 1308), 'astropy.io.fits.open', 'pf.open', (["(dirwr + 'Filtre_Image.fits')"], {}), "(dirwr + 'Filtre_Image.fits')\n", (1279, 1308), True, 'import astropy.io.fits as pf\n'), ((1388, 1419), 'shift.shift', 'shift', (['Filtre_Image', '(-128)', '(-128)'], {}), '(Filtre_Image, -128, -128)\n', (1393, 1419), False, 'from shift import shift\n'), ((1508, 1546), 'astropy.io.fits.open', 'pf.open', (["(dirwr + 'Filtre_Fourier.fits')"], {}), "(dirwr + 'Filtre_Fourier.fits')\n", (1515, 1546), True, 'import astropy.io.fits as pf\n'), ((1767, 1825), 'medis.speckle_nulling.sn_hardware.PHARO_COM', 'hardware.PHARO_COM', (['"""PHARO"""'], {'configfile': 'hardwareconfigfile'}), "('PHARO', configfile=hardwareconfigfile)\n", (1785, 1825), True, 'import medis.speckle_nulling.sn_hardware as hardware\n'), ((1889, 1947), 'medis.speckle_nulling.sn_hardware.P3K_COM', 'hardware.P3K_COM', (['"""P3K_COM"""'], {'configfile': 'hardwareconfigfile'}), "('P3K_COM', configfile=hardwareconfigfile)\n", (1905, 1947), True, 'import medis.speckle_nulling.sn_hardware as hardware\n'), ((2004, 2022), 'numpy.zeros', 'np.zeros', (['(66, 66)'], {}), '((66, 66))\n', (2012, 2022), True, 'import numpy as np\n'), ((2263, 2285), 'numpy.zeros', 'np.zeros', (['(1024, 1024)'], {}), '((1024, 1024))\n', (2271, 2285), True, 'import numpy as np\n'), ((2160, 2200), 'flatmapfunctions.convert_hodm_telem', 'fmap.convert_hodm_telem', (['initial_flatmap'], {}), '(initial_flatmap)\n', (2183, 2200), True, 'import flatmapfunctions as fmap\n'), ((2986, 3009), 'shift.shift', 'shift', (['Cube', '(-128)', '(-128)'], {}), '(Cube, -128, -128)\n', (2991, 3009), False, 'from shift import shift\n'), ((3184, 3256), 'Estimation_1ref.Estimation_1ref', 'Estimation_1ref', (['SCC_Image', 'Filtre_Image', 'Filtre_Fourier', 'ray', 'Alpha', 'Xi'], {}), '(SCC_Image, Filtre_Image, Filtre_Fourier, ray, Alpha, Xi)\n', (3199, 3256), False, 'from Estimation_1ref import Estimation_1ref\n'), ((3396, 3423), 'numpy.dot', 'np.dot', (['Mat_com', 'Estimateur'], {}), '(Mat_com, Estimateur)\n', (3402, 3423), True, 'import numpy as np\n'), ((3656, 3696), 'flatmapfunctions.convert_hodm_telem', 'fmap.convert_hodm_telem', (['initial_flatmap'], {}), '(initial_flatmap)\n', (3679, 3696), True, 'import flatmapfunctions as fmap\n'), ((2593, 2622), 'flatmapfunctions.convert_hodm_telem', 'fmap.convert_hodm_telem', (['phdm'], {}), '(phdm)\n', (2616, 2622), True, 'import flatmapfunctions as fmap\n'), ((2883, 2920), 'numpy.concatenate', 'np.concatenate', (['(cube_im, im)'], {'axis': '(2)'}), '((cube_im, im), axis=2)\n', (2897, 2920), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
import powderday.config as cfg
import yt
from yt.frontends.sph.data_structures import ParticleDataset
from yt.geometry.selection_routines import AlwaysSelector
import pdb
ParticleDataset.filter_bbox = True
ParticleDataset._skip_cache = True
def octree_zoom_bbox_filter(fname,ds,bbox0,field_add):
ds.index
ad = ds.all_data()
print ('\n\n')
print ('----------------------------')
print ("[octree zoom_bbox_filter:] Calculating Center of Mass")
gas_com_x = np.sum(ad["gasdensity"] * ad["gascoordinates"][:,0])/np.sum(ad["gasdensity"])
gas_com_y = np.sum(ad["gasdensity"] * ad["gascoordinates"][:,1])/np.sum(ad["gasdensity"])
gas_com_z = np.sum(ad["gasdensity"] * ad["gascoordinates"][:,2])/np.sum(ad["gasdensity"])
com = [gas_com_x,gas_com_y,gas_com_z]
print ("[octree zoom_bbox_filter:] Center of Mass is at coordinates (kpc): ",com)
center = [cfg.model.x_cent,cfg.model.y_cent,cfg.model.z_cent]
print ('[octree zoom_bbox_filter:] using center: ',center)
box_len = cfg.par.zoom_box_len
#now begin the process of converting box_len to physical units in
#case we're in a cosmological simulation. We'll first give it
#units of proper kpc, then convert to code length (which for
#gadget is kpcm/h) for the bbox calculation (dropping the units of
#course). then when we re-convert to proper units, the box_len as
#input in parameters_master will be in proper units. if a
#simulation isn't cosmological, then the only difference here will
#be a 1/h
#yt 3.x
box_len = ds.quan(box_len,'kpc')
#yt 4.x
if yt.__version__ == '4.0.dev0':
box_len = float(box_len.to('code_length').value)
bbox_lim = box_len
else:
box_len = box_len.convert_to_units('code_length').value
bbox_lim = box_len
bbox1 = [[center[0]-bbox_lim,center[0]+bbox_lim],
[center[1]-bbox_lim,center[1]+bbox_lim],
[center[2]-bbox_lim,center[2]+bbox_lim]]
print ('[octree zoom] new zoomed bbox (comoving/h) in code units= ',bbox1)
#yt 3.x
#ds1 = yt.load(fname,bounding_box=bbox1,n_ref = cfg.par.n_ref,over_refine_factor=cfg.par.oref)
#What follows is tricky. Broadly, the plan is to create a yt
#region to cut out the dataset to our desired box size. In yt4.x,
#we will then pass around reg (which represents the cutout version
#of the ds), as well as ds (which is the original ds). the
#original ds will contain all the original parameters of the
#dataset. We pass around the octree itself in a newly created
#dictionary called reg.parameters
if yt.__version__ == '4.0.dev0':
#re load the field names, but now with the bounding box
#set. this will allow us to map the field names to those
#generated in the octree. this represents a massive
#inefficiency as we have to load the entire dataset a *second*
#time.
ds = field_add(fname,bounding_box = bbox1,ds=ds,add_smoothed_quantities=True)
ds.periodicity = (False,False,False)
reg = ds.region(center=center,left_edge = np.asarray(center)-bbox_lim,right_edge = np.asarray(center)+bbox_lim)
#ds1 = reg.ds
left = np.array([pos[0] for pos in bbox1])
right = np.array([pos[1] for pos in bbox1])
octree = ds.octree(left, right, n_ref=cfg.par.n_ref)#, force_build=True)
reg.parameters={}
reg.parameters['octree'] = octree
else:
#load up a cutout ds1 with a bounding_box so we can generate the octree on this dataset
ds1 = yt.load(fname,bounding_box = bbox1,n_ref=cfg.par.n_ref,over_refine_factor=cfg.par.oref)
ds1.periodicity = (False,False,False)
#now update the field names
ds1 = field_add(None,bounding_box = bbox1,ds=ds1,add_smoothed_quantities=True)
#now create the region so that we have the smoothed properties downstream correct
reg = ds1.region(center=center,left_edge = np.asarray(center)-bbox_lim,right_edge = np.asarray(center)+bbox_lim)
reg.parameters={}
saved = ds1.index.oct_handler.save_octree()
always = AlwaysSelector(None)
#ir1 = ds.index.oct_handler.ires(always) # refinement levels
reg.parameters["fc1"] = ds1.index.oct_handler.fcoords(always) # coordinates in code_length
reg.parameters["fw1"] = ds1.index.oct_handler.fwidth(always) # width of cell in code_length
reg.parameters["refined"] = saved['octree'].astype('bool')
reg.parameters["n_ref"] = ds1.index.oct_handler.n_ref
reg.parameters["max_level"] = ds1.index.oct_handler.max_level
reg.parameters["nocts"] = ds1.index.oct_handler.nocts
#re-add the new powderday convention fields; this time we need to
#make sure to do the ages calculation since it hasn't been done
#before.
#ds1 = field_add(None,bounding_box = bbox1,ds=ds1,starages=True)
return reg
def arepo_zoom(fname,ds,bbox0,field_add):
ds.index
ad = ds.all_data()
center = [cfg.model.x_cent,cfg.model.y_cent,cfg.model.z_cent]
print ('[zoom/arepo_zoom:] using center: ',center)
box_len = cfg.par.zoom_box_len
#now begin the process of converting box_len to physical units in
#case we're in a cosmological simulation. We'll first give it
#units of proper kpc, then convert to code length (which for
#gadget is kpcm/h) for the bbox calculation (dropping the units of
#course). then when we re-convert to proper units, the box_len as
#input in parameters_master will be in proper units. if a
#simulation isn't cosmological, then the only difference here will
#be a 1/h
#note: we dispense with the yt3.x options that are written in
#octree_zoom_bbox_filter since you can't read in an arepo sim as
#an arepo model without yt4.x (if it's yt3.x, it'll be read in via the sph_tributary
box_len = ds.quan(box_len,'kpc')
box_len = float(box_len.to('code_length').value)
bbox_lim = box_len
bbox1 = [[center[0]-bbox_lim,center[0]+bbox_lim],
[center[1]-bbox_lim,center[1]+bbox_lim],
[center[2]-bbox_lim,center[2]+bbox_lim]]
print ('[zoom/arepo_zoom] new zoomed bbox (comoving/h) in code units= ',bbox1)
#re load the field names, but now with the bounding box
#set. this will allow us to map the field names to those
#generated in the octree. this represents a massive
#inefficiency as we have to load the entire dataset a *second*
#time.
ds = field_add(fname,bounding_box = bbox1,ds=ds)
ds.periodicity = (False,False,False)
reg = ds.region(center=center,left_edge = np.asarray(center)-bbox_lim,right_edge = np.asarray(center)+bbox_lim)
return reg
def enzo_zoom(fname,ds,field_add):
#set up the cut out region from the main dataset. here, we create
#a yt region out of the parent ds, and then re-save this as a cutout dataset named ds1
center = ds.arr([cfg.model.x_cent,cfg.model.y_cent,cfg.model.z_cent],'code_length')
box_len = ds.quan(cfg.par.zoom_box_len,'kpc').to('code_length')
reg = ds.region(center, center-box_len, center+box_len)
#now play a game where we save the region as a dataset and then
#reload this as a new ds. we need to do this because the
#convenience function within hyperion, AMRGrid.from_yt requires a
#datasaet print("[enzo_tributary/enzo_m_gen]: saving the dataset
#as temp_enzo.h5")
reg.save_as_dataset('temp_enzo.h5',fields=[('all','creation_time'),('gas','metal_density'),('gas','density'),('newstars','metallicity_fraction'),('newstars','particle_mass'),('all', 'particle_index'),('index', 'grid_level'),('gas','dust_density')])
ds1 = yt.load('temp_enzo.h5')
ad1 = ds1.all_data()
print("[zoom/enzo_zoom]: temporarily saving temp_enzo.h5")
#now copy over all of the ds.index grid construction items that are in the region to the new dataset
ds1.domain_width = reg.right_edge - reg.left_edge
ds1.domain_left_edge = reg.left_edge
ds1.domain_right_edge = reg.right_edge
ds1.domain_center = reg.center
ds1.index.get_levels = reg.index.get_levels
ds1.index.get_smallest_ds = reg.index.get_smallest_dx
ds1.index.grid = reg.index.grid
ds1.index.grid_corners = reg.index.grid_corners
ds1.index.grid_dimensions = reg.index.grid_dimensions
ds1.index.grid_levels = reg.index.grid_levels
ds1.index.grid_left_edge = reg.index.grid_left_edge
ds1.index.grid_right_edge = reg.index.grid_right_edge
ds1.index.grid_particle_count = reg.index.grid_particle_count
ds1.index.grids = reg.index.grids
ds1.index.index_filename = reg.index.index_filename
ds1.index.max_level = reg.index.max_level
ds1.index_num_grids = reg.index.num_grids
ds1.index.num_stars = reg.index.num_stars
ds1.index.parameters = reg.index.parameters
return reg,ds1
| [
"numpy.asarray",
"numpy.sum",
"numpy.array",
"yt.load",
"yt.geometry.selection_routines.AlwaysSelector"
] | [((7825, 7848), 'yt.load', 'yt.load', (['"""temp_enzo.h5"""'], {}), "('temp_enzo.h5')\n", (7832, 7848), False, 'import yt\n'), ((548, 601), 'numpy.sum', 'np.sum', (["(ad['gasdensity'] * ad['gascoordinates'][:, 0])"], {}), "(ad['gasdensity'] * ad['gascoordinates'][:, 0])\n", (554, 601), True, 'import numpy as np\n'), ((601, 625), 'numpy.sum', 'np.sum', (["ad['gasdensity']"], {}), "(ad['gasdensity'])\n", (607, 625), True, 'import numpy as np\n'), ((642, 695), 'numpy.sum', 'np.sum', (["(ad['gasdensity'] * ad['gascoordinates'][:, 1])"], {}), "(ad['gasdensity'] * ad['gascoordinates'][:, 1])\n", (648, 695), True, 'import numpy as np\n'), ((695, 719), 'numpy.sum', 'np.sum', (["ad['gasdensity']"], {}), "(ad['gasdensity'])\n", (701, 719), True, 'import numpy as np\n'), ((736, 789), 'numpy.sum', 'np.sum', (["(ad['gasdensity'] * ad['gascoordinates'][:, 2])"], {}), "(ad['gasdensity'] * ad['gascoordinates'][:, 2])\n", (742, 789), True, 'import numpy as np\n'), ((789, 813), 'numpy.sum', 'np.sum', (["ad['gasdensity']"], {}), "(ad['gasdensity'])\n", (795, 813), True, 'import numpy as np\n'), ((3310, 3345), 'numpy.array', 'np.array', (['[pos[0] for pos in bbox1]'], {}), '([pos[0] for pos in bbox1])\n', (3318, 3345), True, 'import numpy as np\n'), ((3362, 3397), 'numpy.array', 'np.array', (['[pos[1] for pos in bbox1]'], {}), '([pos[1] for pos in bbox1])\n', (3370, 3397), True, 'import numpy as np\n'), ((3672, 3765), 'yt.load', 'yt.load', (['fname'], {'bounding_box': 'bbox1', 'n_ref': 'cfg.par.n_ref', 'over_refine_factor': 'cfg.par.oref'}), '(fname, bounding_box=bbox1, n_ref=cfg.par.n_ref, over_refine_factor=\n cfg.par.oref)\n', (3679, 3765), False, 'import yt\n'), ((4238, 4258), 'yt.geometry.selection_routines.AlwaysSelector', 'AlwaysSelector', (['None'], {}), '(None)\n', (4252, 4258), False, 'from yt.geometry.selection_routines import AlwaysSelector\n'), ((6762, 6780), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (6772, 6780), True, 'import numpy as np\n'), ((6803, 6821), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (6813, 6821), True, 'import numpy as np\n'), ((3202, 3220), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (3212, 3220), True, 'import numpy as np\n'), ((3243, 3261), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (3253, 3261), True, 'import numpy as np\n'), ((4072, 4090), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (4082, 4090), True, 'import numpy as np\n'), ((4113, 4131), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (4123, 4131), True, 'import numpy as np\n')] |
# coding: utf8
# !/usr/env/python
# This file has tests for the old style output writers to ensure backwards
# compatibility. All of the existing tests for output writers are kept as is.
# There are a few new ones too.
import glob
import os
import numpy as np
from terrainbento import Basic, NotCoreNodeBaselevelHandler
from terrainbento.utilities import filecmp
_TEST_OUTPUT_DIR = os.path.join(os.curdir, "output")
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def get_output_filepath(filename):
return os.path.join(_TEST_OUTPUT_DIR, filename)
def cleanup_files(searchpath):
files = glob.glob(searchpath)
for f in files:
os.remove(f)
# Some output writers
def output_writer_function_a(model):
average_elevation = np.mean(model.z[model.grid.core_nodes])
filepath = get_output_filepath(f"ow_func_a.{str(model.model_time)}.txt")
with open(filepath, "w") as f:
f.write(str(average_elevation))
def output_writer_function_b(model):
minimum_elevation = np.min(model.z[model.grid.core_nodes])
filepath = get_output_filepath(f"ow_func_b.{str(model.model_time)}.txt")
with open(filepath, "w") as f:
f.write(str(minimum_elevation))
class output_writer_class_a(object):
def __init__(self, model):
self.model = model
self.change = model.grid.at_node["cumulative_elevation_change"]
def run_one_step(self):
average_change = np.mean(self.change[self.model.grid.core_nodes])
model_time_str = str(self.model.model_time)
filepath = get_output_filepath(f"ow_class_a.{model_time_str}.txt")
with open(filepath, "w") as f:
f.write(str(average_change))
class output_writer_class_b(object):
def __init__(self, model):
self.model = model
self.change = model.grid.at_node["cumulative_elevation_change"]
def run_one_step(self):
min_change = np.min(self.change[self.model.grid.core_nodes])
model_time_str = str(self.model.model_time)
filepath = get_output_filepath(f"ow_class_b.{model_time_str}.txt")
with open(filepath, "w") as f:
f.write(str(min_change))
# Unchanged tests
# These tests should have minimal changes to ensure backwards compatibility
# I only changed where output files are saved (because failed tests don't clean
# up so they fill my test directory with junk files)
def test_one_function_writer(clock_08, almost_default_grid):
ncnblh = NotCoreNodeBaselevelHandler(
almost_default_grid, modify_core_nodes=True, lowering_rate=-1
)
# construct and run model
model = Basic(
clock_08,
almost_default_grid,
save_first_timestep=False,
water_erodibility=0.0,
regolith_transport_parameter=0.0,
boundary_handlers={"NotCoreNodeBaselevelHandler": ncnblh},
output_writers={"function": [output_writer_function_a]},
)
model.run()
# assert things were done correctly
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_func_a.20.0.txt")
test_file = get_output_filepath("ow_func_a.20.0.txt")
assert filecmp(test_file, truth_file) is True
model.remove_output_netcdfs()
cleanup_files("ow_func_a.*.txt")
def test_one_class_writer(clock_08, almost_default_grid):
ncnblh = NotCoreNodeBaselevelHandler(
almost_default_grid, modify_core_nodes=True, lowering_rate=-1
)
# construct and run model
model = Basic(
clock_08,
almost_default_grid,
save_first_timestep=False,
water_erodibility=0.0,
regolith_transport_parameter=0.0,
boundary_handlers={"NotCoreNodeBaselevelHandler": ncnblh},
output_writers={"class": [output_writer_class_a]},
)
model.run()
# assert things were done correctly
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_class_a.20.0.txt")
test_file = get_output_filepath("ow_class_a.20.0.txt")
assert filecmp(test_file, truth_file) is True
model.remove_output_netcdfs()
cleanup_files("ow_class_a.*.txt")
def test_two_function_writers(clock_08, almost_default_grid):
ncnblh = NotCoreNodeBaselevelHandler(
almost_default_grid, modify_core_nodes=True, lowering_rate=-1
)
# construct and run model
model = Basic(
clock_08,
almost_default_grid,
save_first_timestep=False,
water_erodibility=0.0,
regolith_transport_parameter=0.0,
boundary_handlers={"NotCoreNodeBaselevelHandler": ncnblh},
output_writers={
"function": [output_writer_function_a, output_writer_function_b]
},
)
model.run()
# assert things were done correctly
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_func_a.20.0.txt")
test_file = get_output_filepath("ow_func_a.20.0.txt")
assert filecmp(test_file, truth_file) is True
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_func_b.20.0.txt")
test_file = get_output_filepath("ow_func_b.20.0.txt")
assert filecmp(test_file, truth_file) is True
model.remove_output_netcdfs()
cleanup_files("ow_func_*.txt")
def test_two_class_writers(clock_08, almost_default_grid):
ncnblh = NotCoreNodeBaselevelHandler(
almost_default_grid, modify_core_nodes=True, lowering_rate=-1
)
# construct and run model
model = Basic(
clock_08,
almost_default_grid,
save_first_timestep=False,
water_erodibility=0.0,
regolith_transport_parameter=0.0,
boundary_handlers={"NotCoreNodeBaselevelHandler": ncnblh},
output_writers={
"class": [output_writer_class_a, output_writer_class_b]
},
)
model.run()
# assert things were done correctly
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_class_a.20.0.txt")
test_file = get_output_filepath("ow_class_a.20.0.txt")
assert filecmp(test_file, truth_file) is True
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_class_b.20.0.txt")
test_file = get_output_filepath("ow_class_b.20.0.txt")
assert filecmp(test_file, truth_file) is True
model.remove_output_netcdfs()
cleanup_files("ow_class_*.txt")
def test_all_four_writers(clock_08, almost_default_grid):
ncnblh = NotCoreNodeBaselevelHandler(
almost_default_grid, modify_core_nodes=True, lowering_rate=-1
)
# construct and run model
model = Basic(
clock_08,
almost_default_grid,
save_first_timestep=False,
water_erodibility=0.0,
regolith_transport_parameter=0.0,
boundary_handlers={"NotCoreNodeBaselevelHandler": ncnblh},
output_writers={
"function": [output_writer_function_a, output_writer_function_b],
"class": [output_writer_class_a, output_writer_class_b],
},
)
model.run()
# assert things were done correctly
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_func_a.20.0.txt")
test_file = get_output_filepath("ow_func_a.20.0.txt")
assert filecmp(test_file, truth_file) is True
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_func_b.20.0.txt")
test_file = get_output_filepath("ow_func_b.20.0.txt")
assert filecmp(test_file, truth_file) is True
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_class_a.20.0.txt")
test_file = get_output_filepath("ow_class_a.20.0.txt")
assert filecmp(test_file, truth_file) is True
truth_file = os.path.join(_TEST_DATA_DIR, "truth_ow_class_b.20.0.txt")
test_file = get_output_filepath("ow_class_b.20.0.txt")
assert filecmp(test_file, truth_file) is True
model.remove_output_netcdfs()
cleanup_files("ow_func_*.txt")
cleanup_files("ow_class_*.txt")
# New tests for old style output writers
def test_save_first_last_and_multiple_times(clock_08, almost_default_grid):
"""Test save_first_timestep, save_last_timestep, and saving at multiple
timesteps."""
ncnblh = NotCoreNodeBaselevelHandler(
almost_default_grid, modify_core_nodes=True, lowering_rate=-1
)
# construct and run model
model = Basic(
clock_08,
almost_default_grid,
water_erodibility=0.0,
regolith_transport_parameter=0.0,
boundary_handlers={"NotCoreNodeBaselevelHandler": ncnblh},
output_writers={
"function": [output_writer_function_a, output_writer_function_b],
"class": [output_writer_class_a, output_writer_class_b],
},
output_interval=6.0,
output_dir=_TEST_OUTPUT_DIR,
save_first_timestep=True,
save_last_timestep=True,
)
model.run()
for t in ["0.0", "6.0", "12.0", "18.0", "20.0"]:
# assert things were done correctly
filename_bases = [
f"ow_func_a.{t}.txt",
f"ow_func_b.{t}.txt",
f"ow_class_a.{t}.txt",
f"ow_class_b.{t}.txt",
]
for filename_base in filename_bases:
truth_file = os.path.join(_TEST_DATA_DIR, f"truth_{filename_base}")
test_file = os.path.join(os.curdir, "output", filename_base)
assert filecmp(test_file, truth_file) is True
model.remove_output_netcdfs()
cleanup_files(get_output_filepath("ow_func_*.txt"))
cleanup_files(get_output_filepath("ow_class_*.txt"))
| [
"numpy.mean",
"os.path.join",
"os.path.dirname",
"terrainbento.NotCoreNodeBaselevelHandler",
"numpy.min",
"terrainbento.Basic",
"terrainbento.utilities.filecmp",
"glob.glob",
"os.remove"
] | [((387, 420), 'os.path.join', 'os.path.join', (['os.curdir', '"""output"""'], {}), "(os.curdir, 'output')\n", (399, 420), False, 'import os\n'), ((451, 476), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'import os\n'), ((534, 574), 'os.path.join', 'os.path.join', (['_TEST_OUTPUT_DIR', 'filename'], {}), '(_TEST_OUTPUT_DIR, filename)\n', (546, 574), False, 'import os\n'), ((620, 641), 'glob.glob', 'glob.glob', (['searchpath'], {}), '(searchpath)\n', (629, 641), False, 'import glob\n'), ((768, 807), 'numpy.mean', 'np.mean', (['model.z[model.grid.core_nodes]'], {}), '(model.z[model.grid.core_nodes])\n', (775, 807), True, 'import numpy as np\n'), ((1024, 1062), 'numpy.min', 'np.min', (['model.z[model.grid.core_nodes]'], {}), '(model.z[model.grid.core_nodes])\n', (1030, 1062), True, 'import numpy as np\n'), ((2469, 2563), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['almost_default_grid'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-1)'}), '(almost_default_grid, modify_core_nodes=True,\n lowering_rate=-1)\n', (2496, 2563), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((2616, 2864), 'terrainbento.Basic', 'Basic', (['clock_08', 'almost_default_grid'], {'save_first_timestep': '(False)', 'water_erodibility': '(0.0)', 'regolith_transport_parameter': '(0.0)', 'boundary_handlers': "{'NotCoreNodeBaselevelHandler': ncnblh}", 'output_writers': "{'function': [output_writer_function_a]}"}), "(clock_08, almost_default_grid, save_first_timestep=False,\n water_erodibility=0.0, regolith_transport_parameter=0.0,\n boundary_handlers={'NotCoreNodeBaselevelHandler': ncnblh},\n output_writers={'function': [output_writer_function_a]})\n", (2621, 2864), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((2990, 3046), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_func_a.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_func_a.20.0.txt')\n", (3002, 3046), False, 'import os\n'), ((3300, 3394), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['almost_default_grid'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-1)'}), '(almost_default_grid, modify_core_nodes=True,\n lowering_rate=-1)\n', (3327, 3394), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((3447, 3689), 'terrainbento.Basic', 'Basic', (['clock_08', 'almost_default_grid'], {'save_first_timestep': '(False)', 'water_erodibility': '(0.0)', 'regolith_transport_parameter': '(0.0)', 'boundary_handlers': "{'NotCoreNodeBaselevelHandler': ncnblh}", 'output_writers': "{'class': [output_writer_class_a]}"}), "(clock_08, almost_default_grid, save_first_timestep=False,\n water_erodibility=0.0, regolith_transport_parameter=0.0,\n boundary_handlers={'NotCoreNodeBaselevelHandler': ncnblh},\n output_writers={'class': [output_writer_class_a]})\n", (3452, 3689), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((3815, 3872), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_class_a.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_class_a.20.0.txt')\n", (3827, 3872), False, 'import os\n'), ((4132, 4226), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['almost_default_grid'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-1)'}), '(almost_default_grid, modify_core_nodes=True,\n lowering_rate=-1)\n', (4159, 4226), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((4279, 4557), 'terrainbento.Basic', 'Basic', (['clock_08', 'almost_default_grid'], {'save_first_timestep': '(False)', 'water_erodibility': '(0.0)', 'regolith_transport_parameter': '(0.0)', 'boundary_handlers': "{'NotCoreNodeBaselevelHandler': ncnblh}", 'output_writers': "{'function': [output_writer_function_a, output_writer_function_b]}"}), "(clock_08, almost_default_grid, save_first_timestep=False,\n water_erodibility=0.0, regolith_transport_parameter=0.0,\n boundary_handlers={'NotCoreNodeBaselevelHandler': ncnblh},\n output_writers={'function': [output_writer_function_a,\n output_writer_function_b]})\n", (4284, 4557), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((4701, 4757), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_func_a.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_func_a.20.0.txt')\n", (4713, 4757), False, 'import os\n'), ((4884, 4940), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_func_b.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_func_b.20.0.txt')\n", (4896, 4940), False, 'import os\n'), ((5193, 5287), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['almost_default_grid'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-1)'}), '(almost_default_grid, modify_core_nodes=True,\n lowering_rate=-1)\n', (5220, 5287), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((5340, 5605), 'terrainbento.Basic', 'Basic', (['clock_08', 'almost_default_grid'], {'save_first_timestep': '(False)', 'water_erodibility': '(0.0)', 'regolith_transport_parameter': '(0.0)', 'boundary_handlers': "{'NotCoreNodeBaselevelHandler': ncnblh}", 'output_writers': "{'class': [output_writer_class_a, output_writer_class_b]}"}), "(clock_08, almost_default_grid, save_first_timestep=False,\n water_erodibility=0.0, regolith_transport_parameter=0.0,\n boundary_handlers={'NotCoreNodeBaselevelHandler': ncnblh},\n output_writers={'class': [output_writer_class_a, output_writer_class_b]})\n", (5345, 5605), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((5753, 5810), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_class_a.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_class_a.20.0.txt')\n", (5765, 5810), False, 'import os\n'), ((5938, 5995), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_class_b.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_class_b.20.0.txt')\n", (5950, 5995), False, 'import os\n'), ((6249, 6343), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['almost_default_grid'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-1)'}), '(almost_default_grid, modify_core_nodes=True,\n lowering_rate=-1)\n', (6276, 6343), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((6397, 6736), 'terrainbento.Basic', 'Basic', (['clock_08', 'almost_default_grid'], {'save_first_timestep': '(False)', 'water_erodibility': '(0.0)', 'regolith_transport_parameter': '(0.0)', 'boundary_handlers': "{'NotCoreNodeBaselevelHandler': ncnblh}", 'output_writers': "{'function': [output_writer_function_a, output_writer_function_b], 'class':\n [output_writer_class_a, output_writer_class_b]}"}), "(clock_08, almost_default_grid, save_first_timestep=False,\n water_erodibility=0.0, regolith_transport_parameter=0.0,\n boundary_handlers={'NotCoreNodeBaselevelHandler': ncnblh},\n output_writers={'function': [output_writer_function_a,\n output_writer_function_b], 'class': [output_writer_class_a,\n output_writer_class_b]})\n", (6402, 6736), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((6889, 6945), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_func_a.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_func_a.20.0.txt')\n", (6901, 6945), False, 'import os\n'), ((7072, 7128), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_func_b.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_func_b.20.0.txt')\n", (7084, 7128), False, 'import os\n'), ((7255, 7312), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_class_a.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_class_a.20.0.txt')\n", (7267, 7312), False, 'import os\n'), ((7440, 7497), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', '"""truth_ow_class_b.20.0.txt"""'], {}), "(_TEST_DATA_DIR, 'truth_ow_class_b.20.0.txt')\n", (7452, 7497), False, 'import os\n'), ((7941, 8035), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['almost_default_grid'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-1)'}), '(almost_default_grid, modify_core_nodes=True,\n lowering_rate=-1)\n', (7968, 8035), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((8089, 8509), 'terrainbento.Basic', 'Basic', (['clock_08', 'almost_default_grid'], {'water_erodibility': '(0.0)', 'regolith_transport_parameter': '(0.0)', 'boundary_handlers': "{'NotCoreNodeBaselevelHandler': ncnblh}", 'output_writers': "{'function': [output_writer_function_a, output_writer_function_b], 'class':\n [output_writer_class_a, output_writer_class_b]}", 'output_interval': '(6.0)', 'output_dir': '_TEST_OUTPUT_DIR', 'save_first_timestep': '(True)', 'save_last_timestep': '(True)'}), "(clock_08, almost_default_grid, water_erodibility=0.0,\n regolith_transport_parameter=0.0, boundary_handlers={\n 'NotCoreNodeBaselevelHandler': ncnblh}, output_writers={'function': [\n output_writer_function_a, output_writer_function_b], 'class': [\n output_writer_class_a, output_writer_class_b]}, output_interval=6.0,\n output_dir=_TEST_OUTPUT_DIR, save_first_timestep=True,\n save_last_timestep=True)\n", (8094, 8509), False, 'from terrainbento import Basic, NotCoreNodeBaselevelHandler\n'), ((670, 682), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (679, 682), False, 'import os\n'), ((1438, 1486), 'numpy.mean', 'np.mean', (['self.change[self.model.grid.core_nodes]'], {}), '(self.change[self.model.grid.core_nodes])\n', (1445, 1486), True, 'import numpy as np\n'), ((1913, 1960), 'numpy.min', 'np.min', (['self.change[self.model.grid.core_nodes]'], {}), '(self.change[self.model.grid.core_nodes])\n', (1919, 1960), True, 'import numpy as np\n'), ((3116, 3146), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (3123, 3146), False, 'from terrainbento.utilities import filecmp\n'), ((3943, 3973), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (3950, 3973), False, 'from terrainbento.utilities import filecmp\n'), ((4827, 4857), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (4834, 4857), False, 'from terrainbento.utilities import filecmp\n'), ((5010, 5040), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (5017, 5040), False, 'from terrainbento.utilities import filecmp\n'), ((5881, 5911), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (5888, 5911), False, 'from terrainbento.utilities import filecmp\n'), ((6066, 6096), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (6073, 6096), False, 'from terrainbento.utilities import filecmp\n'), ((7015, 7045), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (7022, 7045), False, 'from terrainbento.utilities import filecmp\n'), ((7198, 7228), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (7205, 7228), False, 'from terrainbento.utilities import filecmp\n'), ((7383, 7413), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (7390, 7413), False, 'from terrainbento.utilities import filecmp\n'), ((7568, 7598), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (7575, 7598), False, 'from terrainbento.utilities import filecmp\n'), ((8964, 9018), 'os.path.join', 'os.path.join', (['_TEST_DATA_DIR', 'f"""truth_{filename_base}"""'], {}), "(_TEST_DATA_DIR, f'truth_{filename_base}')\n", (8976, 9018), False, 'import os\n'), ((9043, 9091), 'os.path.join', 'os.path.join', (['os.curdir', '"""output"""', 'filename_base'], {}), "(os.curdir, 'output', filename_base)\n", (9055, 9091), False, 'import os\n'), ((9111, 9141), 'terrainbento.utilities.filecmp', 'filecmp', (['test_file', 'truth_file'], {}), '(test_file, truth_file)\n', (9118, 9141), False, 'from terrainbento.utilities import filecmp\n')] |
"""Auto pipeline for object detection task"""
# pylint: disable=bad-whitespace,missing-class-docstring,bare-except
import time
import os
import math
import copy
import logging
import pprint
import json
import pickle
from typing import Union, Tuple
import uuid
import shutil
import numpy as np
import pandas as pd
from autocfg import dataclass
import autogluon.core as ag
from autogluon.core.scheduler.reporter import FakeReporter
from autogluon.core.utils import get_cpu_count, get_gpu_count_all
from autogluon.core.task.base import BaseTask
from autogluon.core.searcher import LocalRandomSearcher
from gluoncv.auto.estimators.base_estimator import BaseEstimator
from gluoncv.auto.estimators import SSDEstimator, FasterRCNNEstimator, YOLOv3Estimator, CenterNetEstimator
from .utils import auto_suggest, config_to_nested
from gluoncv.auto.data.dataset import ObjectDetectionDataset
from gluoncv.auto.estimators.conf import _BEST_CHECKPOINT_FILE
__all__ = ['ObjectDetection']
@dataclass
class LiteConfig:
transfer : Union[type(None), str, ag.Space] = ag.Categorical('ssd_512_mobilenet1.0_coco', 'yolo3_mobilenet1.0_coco')
lr : Union[ag.Space, float] = 1e-3
num_trials : int = 1
epochs : Union[ag.Space, int] = 5
nthreads_per_trial : int = 32
ngpus_per_trial : int = 0
time_limits : int = 7 * 24 * 60 * 60 # 7 days
search_strategy : str = 'random'
dist_ip_addrs : Union[type(None), list, Tuple] = None
@dataclass
class DefaultConfig:
transfer : Union[type(None), str, ag.Space] = ag.Categorical('ssd_512_resnet50_v1_coco',
'yolo3_darknet53_coco',
'faster_rcnn_resnet50_v1b_coco',
'center_net_resnet50_v1b_coco')
lr : Union[ag.Space, float] = ag.Categorical(1e-3, 5e-3)
num_trials : int = 3
epochs : Union[ag.Space, int] = 10
nthreads_per_trial : int = 128
ngpus_per_trial : int = 8
time_limits : int = 7 * 24 * 60 * 60 # 7 days
search_strategy : str = 'random'
dist_ip_addrs : Union[type(None), list, Tuple] = None
def _train_object_detection(args, reporter):
"""
Parameters
----------
args: <class 'autogluon.utils.edict.EasyDict'>
"""
tic = time.time()
args = args.copy()
try:
task_id = int(args['task_id'])
except:
task_id = 0
final_fit = args.pop('final_fit', False)
# train, val data
train_data = args.pop('train_data')
val_data = args.pop('val_data')
# wall clock tick limit
wall_clock_tick = args.pop('wall_clock_tick')
log_dir = args.pop('log_dir', os.getcwd())
# exponential batch size for Int() space batch sizes
exp_batch_size = args.pop('exp_batch_size', False)
if exp_batch_size and 'batch_size' in args:
args['batch_size'] = 2 ** args['batch_size']
try:
task = args.pop('task')
dataset = args.pop('dataset')
num_trials = args.pop('num_trials')
except KeyError:
task = None
# convert user defined config to nested form
args = config_to_nested(args)
if wall_clock_tick < tic and not final_fit:
return {'traceback': 'timeout', 'args': str(args),
'time': 0, 'train_map': -1, 'valid_map': -1}
try:
valid_summary_file = 'fit_summary_obj_det.ag'
estimator_cls = args.pop('estimator', None)
if estimator_cls == FasterRCNNEstimator:
# safe guard if too many GT in dataset
train_dataset = train_data.to_mxnet()
max_gt_count = max([y[1].shape[0] for y in train_dataset]) + 20
args['faster_rcnn']['max_num_gt'] = max_gt_count
if final_fit:
# load from previous dumps
estimator = None
if os.path.isdir(log_dir):
is_valid_dir_fn = lambda d : d.startswith('.trial_') and os.path.isdir(os.path.join(log_dir, d))
trial_dirs = [d for d in os.listdir(log_dir) if is_valid_dir_fn(d)]
best_checkpoint = ''
best_acc = -1
result = {}
for dd in trial_dirs:
try:
with open(os.path.join(log_dir, dd, valid_summary_file), 'r') as f:
result = json.load(f)
acc = result.get('valid_map', -1)
if acc > best_acc and os.path.isfile(os.path.join(log_dir, dd, _BEST_CHECKPOINT_FILE)):
best_checkpoint = os.path.join(log_dir, dd, _BEST_CHECKPOINT_FILE)
best_acc = acc
except:
pass
if best_checkpoint:
estimator = estimator_cls.load(best_checkpoint)
if estimator is None:
if wall_clock_tick < tic:
result.update({'traceback': 'timeout'})
else:
# unknown error
final_fit = False
if not final_fit:
# create independent log_dir for each trial
trial_log_dir = os.path.join(log_dir, '.trial_{}'.format(task_id))
args['log_dir'] = trial_log_dir
estimator = estimator_cls(args, reporter=reporter)
# training
result = estimator.fit(train_data=train_data, val_data=val_data, time_limit=wall_clock_tick-tic)
with open(os.path.join(trial_log_dir, valid_summary_file), 'w') as f:
json.dump(result, f)
# save config and result
if task is not None:
trial_log = {}
trial_log.update(args)
trial_log.update(result)
json_str = json.dumps(trial_log)
time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
json_file_name = task + '_dataset-' + dataset + '_trials-' + str(num_trials) + '_' + time_str + '.json'
with open(json_file_name, 'w') as json_file:
json_file.write(json_str)
logging.info('Config and result in this trial have been saved to %s.', json_file_name)
except:
import traceback
return {'traceback': traceback.format_exc(), 'args': str(args),
'time': time.time() - tic, 'train_map': -1, 'valid_map': -1}
if estimator:
result.update({'model_checkpoint': pickle.dumps(estimator)})
return result
class ObjectDetection(BaseTask):
"""Object Detection general task.
Parameters
----------
config : dict
The configurations, can be nested dict.
logger : logging.Logger
The desired logger object, use `None` for module specific logger with default setting.
"""
Dataset = ObjectDetectionDataset
def __init__(self, config=None, logger=None):
super(ObjectDetection, self).__init__()
self._fit_summary = {}
self._logger = logger if logger is not None else logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
self._fit_summary = {}
self._results = {}
# cpu and gpu setting
cpu_count = get_cpu_count()
gpu_count = get_gpu_count_all()
# default settings
if not config:
if gpu_count < 1:
self._logger.info('No GPU detected/allowed, using most conservative search space.')
config = LiteConfig()
else:
config = DefaultConfig()
config = config.asdict()
else:
if not config.get('dist_ip_addrs', None):
ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
ngpus_per_trial = min(ngpus_per_trial, gpu_count)
if ngpus_per_trial < 1:
self._logger.info('No GPU detected/allowed, using most conservative search space.')
default_config = LiteConfig()
else:
default_config = DefaultConfig()
config = default_config.merge(config, allow_new_key=True).asdict()
# adjust cpu/gpu resources
if not config.get('dist_ip_addrs', None):
nthreads_per_trial = config.get('nthreads_per_trial', cpu_count)
nthreads_per_trial = min(nthreads_per_trial, cpu_count)
ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
if ngpus_per_trial > gpu_count:
ngpus_per_trial = gpu_count
self._logger.warning(
"The number of requested GPUs is greater than the number of available GPUs."
"Reduce the number to %d", ngpus_per_trial)
else:
raise ValueError('Please specify `nthreads_per_trial` and `ngpus_per_trial` '
'given that dist workers are available')
# fix estimator-transfer relationship
estimator = config.get('estimator', None)
transfer = config.get('transfer', None)
if estimator is not None and transfer is not None:
if isinstance(estimator, ag.Space):
estimator = estimator.data
elif isinstance(estimator, str):
estimator = [estimator]
if isinstance(transfer, ag.Space):
transfer = transfer.data
elif isinstance(transfer, str):
transfer = [transfer]
valid_transfer = []
for e in estimator:
for t in transfer:
if e in t:
valid_transfer.append(t)
if not valid_transfer:
raise ValueError(f'No matching `transfer` model for {estimator}')
if len(valid_transfer) == 1:
config['transfer'] = valid_transfer[0]
else:
config['transfer'] = ag.Categorical(*valid_transfer)
# additional configs
config['num_workers'] = nthreads_per_trial
config['gpus'] = [int(i) for i in range(ngpus_per_trial)]
config['seed'] = config.get('seed', np.random.randint(32,767))
config['final_fit'] = False
self._cleanup_disk = config.get('cleanup_disk', True)
self._config = config
# scheduler options
self.search_strategy = config.get('search_strategy', 'random')
self.search_options = config.get('search_options', {})
self.scheduler_options = {
'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'checkpoint': config.get('checkpoint', 'checkpoint/exp1.ag'),
'num_trials': config.get('num_trials', 2),
'time_out': config.get('time_limits', 60 * 60),
'resume': (len(config.get('resume', '')) > 0),
'visualizer': config.get('visualizer', 'none'),
'time_attr': 'epoch',
'reward_attr': 'map_reward',
'dist_ip_addrs': config.get('dist_ip_addrs', None),
'searcher': self.search_strategy,
'search_options': self.search_options,
'max_reward': config.get('max_reward', 0.9)}
def fit(self, train_data, val_data=None, train_size=0.9, random_state=None, time_limit=None):
"""Fit auto estimator given the input data.
Parameters
----------
train_data : pd.DataFrame or iterator
Training data.
val_data : pd.DataFrame or iterator, optional
Validation data, optional. If `train_data` is DataFrame, `val_data` will be split from
`train_data` given `train_size`.
train_size : float
The portion of train data split from original `train_data` if `val_data` is not provided.
random_state : int
Random state for splitting, for `np.random.seed`.
time_limit : int, default is None
The wall clock time limit(second) for fit process, if `None`, time limit is not enforced.
If `fit` takes longer than `time_limit`, the process will terminate early and return the
model prematurally.
Due to callbacks and additional validation functions, the `time_limit` may not be very precise
(few minutes allowance), but you can use it to safe-guard a very long training session.
If `time_limits` key set in __init__ with config, the `time_limit` value will overwrite configuration
if not `None`.
Returns
-------
Estimator
The estimator obtained by training on the specified dataset.
"""
config = self._config.copy()
if time_limit is None:
if config.get('time_limits', None):
time_limit = config['time_limits']
else:
time_limit = math.inf
elif not isinstance(time_limit, int):
raise TypeError(f'Invalid type `time_limit={time_limit}`, int or None expected')
self.scheduler_options['time_out'] = time_limit
wall_clock_tick = time.time() + time_limit
# split train/val before HPO to make fair comparisons
if not isinstance(train_data, pd.DataFrame):
assert val_data is not None, \
"Please provide `val_data` as we do not know how to split `train_data` of type: \
{}".format(type(train_data))
if val_data is None:
assert 0 <= train_size <= 1.0
if random_state:
np.random.seed(random_state)
split_mask = np.random.rand(len(train_data)) < train_size
train = train_data[split_mask]
val = train_data[~split_mask]
self._logger.info('Randomly split train_data into train[%d]/validation[%d] splits.',
len(train), len(val))
train_data, val_data = train, val
# automatically suggest some hyperparameters based on the dataset statistics(experimental)
estimator = config.get('estimator', None)
transfer = config.get('transfer', None)
if not transfer:
config['train_dataset'] = train_data
auto_suggest(config, estimator, self._logger)
config.pop('train_dataset')
# register args
config['train_data'] = train_data
config['val_data'] = val_data
config['wall_clock_tick'] = wall_clock_tick
config['log_dir'] = os.path.join(config.get('log_dir', os.getcwd()), str(uuid.uuid4())[:8])
start_time = time.time()
self._fit_summary = {}
self._results = {}
if config.get('num_trials', 1) < 2:
reporter = FakeReporter()
rand_config = LocalRandomSearcher(search_space=config).get_config()
self._logger.info("Starting fit without HPO")
cur_config = {**config}
cur_config.update(rand_config)
results = _train_object_detection({**cur_config}, reporter)
best_config = cur_config
best_config.pop('train_data', None)
best_config.pop('val_data', None)
self._fit_summary.update({'train_map': results.get('train_map', -1),
'valid_map': results.get('valid_map', -1),
'total_time': results.get('time', time.time() - start_time),
'best_config': best_config})
self._results = self._fit_summary
else:
self._logger.info("Starting HPO experiments")
results = self.run_fit(_train_object_detection, config, self.search_strategy,
self.scheduler_options)
if isinstance(results, dict):
ks = ('best_reward', 'best_config', 'total_time', 'config_history', 'reward_attr')
self._results.update({k: v for k, v in results.items() if k in ks})
end_time = time.time()
self._logger.info("Finished, total runtime is %.2f s", end_time - start_time)
if config.get('num_trials', 1) > 1:
best_config = {**config}
best_config.update(results['best_config'])
# convert best config to nested form
best_config = config_to_nested(best_config)
best_config.pop('train_data', None)
best_config.pop('val_data', None)
self._fit_summary.update({'train_map': results.get('train_map', -1),
'valid_map': results.get('valid_map', results.get('best_reward', -1)),
'total_time': results.get('total_time', time.time() - start_time),
'best_config': best_config})
self._logger.info(pprint.pformat(self._fit_summary, indent=2))
if self._cleanup_disk:
shutil.rmtree(config['log_dir'], ignore_errors=True)
model_checkpoint = results.get('model_checkpoint', None)
if model_checkpoint is None:
if results.get('traceback', '') == 'timeout':
raise TimeoutError(f'Unable to fit a usable model given `time_limit={time_limit}`')
raise RuntimeError(f'Unexpected error happened during fit: {pprint.pformat(results, indent=2)}')
estimator = pickle.loads(results['model_checkpoint'])
return estimator
def fit_summary(self):
return copy.copy(self._fit_summary)
def fit_history(self):
return copy.copy(self._results)
@classmethod
def load(cls, filename):
obj = BaseEstimator.load(filename)
# make sure not accidentally loading e.g. classification model
# pylint: disable=unidiomatic-typecheck
assert type(obj) in (SSDEstimator, FasterRCNNEstimator, YOLOv3Estimator, CenterNetEstimator)
return obj
| [
"logging.getLogger",
"autogluon.core.utils.get_cpu_count",
"pickle.dumps",
"gluoncv.auto.estimators.base_estimator.BaseEstimator.load",
"autogluon.core.Categorical",
"pickle.loads",
"copy.copy",
"logging.info",
"autogluon.core.utils.get_gpu_count_all",
"os.listdir",
"json.dumps",
"os.path.isdi... | [((1057, 1127), 'autogluon.core.Categorical', 'ag.Categorical', (['"""ssd_512_mobilenet1.0_coco"""', '"""yolo3_mobilenet1.0_coco"""'], {}), "('ssd_512_mobilenet1.0_coco', 'yolo3_mobilenet1.0_coco')\n", (1071, 1127), True, 'import autogluon.core as ag\n'), ((1523, 1658), 'autogluon.core.Categorical', 'ag.Categorical', (['"""ssd_512_resnet50_v1_coco"""', '"""yolo3_darknet53_coco"""', '"""faster_rcnn_resnet50_v1b_coco"""', '"""center_net_resnet50_v1b_coco"""'], {}), "('ssd_512_resnet50_v1_coco', 'yolo3_darknet53_coco',\n 'faster_rcnn_resnet50_v1b_coco', 'center_net_resnet50_v1b_coco')\n", (1537, 1658), True, 'import autogluon.core as ag\n'), ((1884, 1912), 'autogluon.core.Categorical', 'ag.Categorical', (['(0.001)', '(0.005)'], {}), '(0.001, 0.005)\n', (1898, 1912), True, 'import autogluon.core as ag\n'), ((2340, 2351), 'time.time', 'time.time', ([], {}), '()\n', (2349, 2351), False, 'import time\n'), ((2710, 2721), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2719, 2721), False, 'import os\n'), ((7254, 7269), 'autogluon.core.utils.get_cpu_count', 'get_cpu_count', ([], {}), '()\n', (7267, 7269), False, 'from autogluon.core.utils import get_cpu_count, get_gpu_count_all\n'), ((7290, 7309), 'autogluon.core.utils.get_gpu_count_all', 'get_gpu_count_all', ([], {}), '()\n', (7307, 7309), False, 'from autogluon.core.utils import get_cpu_count, get_gpu_count_all\n'), ((14565, 14576), 'time.time', 'time.time', ([], {}), '()\n', (14574, 14576), False, 'import time\n'), ((15976, 15987), 'time.time', 'time.time', ([], {}), '()\n', (15985, 15987), False, 'import time\n'), ((17328, 17369), 'pickle.loads', 'pickle.loads', (["results['model_checkpoint']"], {}), "(results['model_checkpoint'])\n", (17340, 17369), False, 'import pickle\n'), ((17438, 17466), 'copy.copy', 'copy.copy', (['self._fit_summary'], {}), '(self._fit_summary)\n', (17447, 17466), False, 'import copy\n'), ((17510, 17534), 'copy.copy', 'copy.copy', (['self._results'], {}), '(self._results)\n', (17519, 17534), False, 'import copy\n'), ((17596, 17624), 'gluoncv.auto.estimators.base_estimator.BaseEstimator.load', 'BaseEstimator.load', (['filename'], {}), '(filename)\n', (17614, 17624), False, 'from gluoncv.auto.estimators.base_estimator import BaseEstimator\n'), ((3861, 3883), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (3874, 3883), False, 'import os\n'), ((7073, 7100), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7090, 7100), False, 'import logging\n'), ((10171, 10197), 'numpy.random.randint', 'np.random.randint', (['(32)', '(767)'], {}), '(32, 767)\n', (10188, 10197), True, 'import numpy as np\n'), ((13094, 13105), 'time.time', 'time.time', ([], {}), '()\n', (13103, 13105), False, 'import time\n'), ((14702, 14716), 'autogluon.core.scheduler.reporter.FakeReporter', 'FakeReporter', ([], {}), '()\n', (14714, 14716), False, 'from autogluon.core.scheduler.reporter import FakeReporter\n'), ((16797, 16840), 'pprint.pformat', 'pprint.pformat', (['self._fit_summary'], {'indent': '(2)'}), '(self._fit_summary, indent=2)\n', (16811, 16840), False, 'import pprint\n'), ((16886, 16938), 'shutil.rmtree', 'shutil.rmtree', (["config['log_dir']"], {'ignore_errors': '(True)'}), "(config['log_dir'], ignore_errors=True)\n", (16899, 16938), False, 'import shutil\n'), ((5597, 5617), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (5606, 5617), False, 'import json\n'), ((5826, 5847), 'json.dumps', 'json.dumps', (['trial_log'], {}), '(trial_log)\n', (5836, 5847), False, 'import json\n'), ((6171, 6261), 'logging.info', 'logging.info', (['"""Config and result in this trial have been saved to %s."""', 'json_file_name'], {}), "('Config and result in this trial have been saved to %s.',\n json_file_name)\n", (6183, 6261), False, 'import logging\n'), ((6324, 6346), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6344, 6346), False, 'import traceback\n'), ((6506, 6529), 'pickle.dumps', 'pickle.dumps', (['estimator'], {}), '(estimator)\n', (6518, 6529), False, 'import pickle\n'), ((9948, 9979), 'autogluon.core.Categorical', 'ag.Categorical', (['*valid_transfer'], {}), '(*valid_transfer)\n', (9962, 9979), True, 'import autogluon.core as ag\n'), ((13537, 13565), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (13551, 13565), True, 'import numpy as np\n'), ((14506, 14517), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14515, 14517), False, 'import os\n'), ((5521, 5568), 'os.path.join', 'os.path.join', (['trial_log_dir', 'valid_summary_file'], {}), '(trial_log_dir, valid_summary_file)\n', (5533, 5568), False, 'import os\n'), ((5910, 5926), 'time.localtime', 'time.localtime', ([], {}), '()\n', (5924, 5926), False, 'import time\n'), ((6391, 6402), 'time.time', 'time.time', ([], {}), '()\n', (6400, 6402), False, 'import time\n'), ((14524, 14536), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14534, 14536), False, 'import uuid\n'), ((14743, 14783), 'autogluon.core.searcher.LocalRandomSearcher', 'LocalRandomSearcher', ([], {'search_space': 'config'}), '(search_space=config)\n', (14762, 14783), False, 'from autogluon.core.searcher import LocalRandomSearcher\n'), ((4039, 4058), 'os.listdir', 'os.listdir', (['log_dir'], {}), '(log_dir)\n', (4049, 4058), False, 'import os\n'), ((17271, 17304), 'pprint.pformat', 'pprint.pformat', (['results'], {'indent': '(2)'}), '(results, indent=2)\n', (17285, 17304), False, 'import pprint\n'), ((3972, 3996), 'os.path.join', 'os.path.join', (['log_dir', 'd'], {}), '(log_dir, d)\n', (3984, 3996), False, 'import os\n'), ((4369, 4381), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4378, 4381), False, 'import json\n'), ((15371, 15382), 'time.time', 'time.time', ([], {}), '()\n', (15380, 15382), False, 'import time\n'), ((16677, 16688), 'time.time', 'time.time', ([], {}), '()\n', (16686, 16688), False, 'import time\n'), ((4274, 4319), 'os.path.join', 'os.path.join', (['log_dir', 'dd', 'valid_summary_file'], {}), '(log_dir, dd, valid_summary_file)\n', (4286, 4319), False, 'import os\n'), ((4610, 4658), 'os.path.join', 'os.path.join', (['log_dir', 'dd', '_BEST_CHECKPOINT_FILE'], {}), '(log_dir, dd, _BEST_CHECKPOINT_FILE)\n', (4622, 4658), False, 'import os\n'), ((4509, 4557), 'os.path.join', 'os.path.join', (['log_dir', 'dd', '_BEST_CHECKPOINT_FILE'], {}), '(log_dir, dd, _BEST_CHECKPOINT_FILE)\n', (4521, 4557), False, 'import os\n')] |
#coding:utf-8
import cv2
import numpy as np
import sys
import os
COLOR_BG = (255,0,0)
COLOR_FG = (0,255,0)
def mask2color(mask):
r,c = mask.shape[:2]
color = np.zeros((r,c,3),np.uint8)
color[np.where((mask==0)|(mask==2))] = COLOR_BG
color[np.where((mask==1)|(mask==3))] = COLOR_FG
return color
def color2mask(color):
r,c = color.shape[:2]
mask = np.zeros((r,c),np.uint8)
mask[np.where((color==COLOR_BG).all(axis=2))] = 0
mask[np.where((color==COLOR_FG).all(axis=2))] = 1
return mask
def on_mouse(event,x,y,flags,param):
param.mouse_cb(event,x,y,flags)
def nothing(x):
pass
class InteractiveImageSegmentation:
def __init__(self):
self.winname = "InteractiveImageSegmentation"
self.img = np.zeros((0))
self.mask = np.zeros((0))
self.left_mouse_down = False
self.right_mouse_down = False
self.radius = 3
self.max_radius = 40
self.use_prev_mask = False
self.cur_mouse = (-1,-1)
self.draw_color = 0
cv2.namedWindow(self.winname)
cv2.setMouseCallback(self.winname, on_mouse, self)
cv2.createTrackbar('brush size',self.winname,self.radius,self.max_radius,nothing)
def mouse_cb(self,event,x,y,flags):
self.cur_mouse = (x,y)
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_down = True
elif event == cv2.EVENT_LBUTTONUP:
self.left_mouse_down = False
elif event == cv2.EVENT_RBUTTONDOWN:
self.right_mouse_down = True
elif event == cv2.EVENT_RBUTTONUP:
self.right_mouse_down = False
if (self.left_mouse_down or self.right_mouse_down) and self.mask.size>0 and self.img.size>0:
if flags & cv2.EVENT_FLAG_CTRLKEY:
cv2.circle(self.img, (x,y), self.radius, (COLOR_BG if self.left_mouse_down else tuple([k/3 for k in COLOR_BG])), -1)
cv2.circle(self.mask, (x,y), self.radius, (cv2.GC_BGD if self.left_mouse_down else cv2.GC_PR_BGD), -1)
elif flags & cv2.EVENT_FLAG_SHIFTKEY:
cv2.circle(self.img, (x,y), self.radius, (COLOR_FG if self.left_mouse_down else tuple([k/3 for k in COLOR_FG])), -1)
cv2.circle(self.mask, (x,y), self.radius, (cv2.GC_FGD if self.left_mouse_down else cv2.GC_PR_FGD), -1)
if event == cv2.EVENT_MOUSEWHEEL:
if flags<0:
diff_k = int(np.clip(self.radius*0.4,1,5))
self.radius+=diff_k
elif flags>0:
diff_k = int(np.clip(self.radius*0.4,1,5))
self.radius-=diff_k
self.radius = np.clip(self.radius, 1, self.max_radius)
cv2.setTrackbarPos('brush size', self.winname, self.radius)
def __init_mask(self, mask):
mask[:] = cv2.GC_PR_FGD
mask[:10,:] = cv2.GC_PR_BGD
def process(self, img):
self.img = np.copy(img)
if self.use_prev_mask==False or self.mask.shape[:2]!=self.img.shape[:2]:
self.mask = np.zeros(img.shape[:2],'uint8')
self.__init_mask(self.mask)
self.bgdModel = np.zeros((1,65),np.float64)
self.fgdModel = np.zeros((1,65),np.float64)
cv2.grabCut(img, self.mask, None, self.bgdModel, self.fgdModel, 1, cv2.GC_INIT_WITH_MASK)
while True:
self.radius = cv2.getTrackbarPos('brush size',self.winname)
color = mask2color(self.mask)
alpha = 0.5 if self.draw_color==0 else (1 if self.draw_color==1 else 0)
show_img = (self.img*alpha + color*(1-alpha)).astype('uint8')
cv2.circle(show_img, self.cur_mouse, self.radius, (200,200,200), (2 if self.left_mouse_down else 1))
cv2.imshow(self.winname,show_img)
cv2.imshow('color',color)
key = cv2.waitKey(100)
if key == ord('c'):
self.img = np.copy(img)
self.__init_mask(self.mask)
elif key == ord('q') or key == 27 or key==ord('s') or key==ord('p') or key==ord('n') or key == 10:
break
elif key == ord('w'):
self.draw_color = (self.draw_color+1)%3
elif key == ord('a') or key == 32:
cv2.putText(show_img, 'segmenting...', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255),2)
cv2.imshow(self.winname,show_img)
cv2.waitKey(1)
cv2.grabCut(img, self.mask, None, self.bgdModel, self.fgdModel, 1, cv2.GC_INIT_WITH_MASK)
self.img = np.copy(img)
return key
if __name__ == '__main__':
# if(len(sys.argv)!=3):
# print('Usage: interactive_image_segmentation.py [img_dir] [save_dir]')
# exit()
img_dir = 'temp/images'
save_dir = 'temp/images/out'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print('%s not exists, create it.'%save_dir)
print("================= Interactive Image Segmentation =================")
print("CTRL+left mouse button: select certain background pixels ")
print("SHIFT+left mouse button: select certain foreground pixels ")
print("CTRL+right mouse button: select possible background pixels ")
print("SHIFT+right mouse button: select possible foreground pixels ")
print("'a'/SPACE: run sengementation again")
print("'p': prev image 'n': next image")
print("'s'/ENTER: save label 'q'/ESC: exit")
iis = InteractiveImageSegmentation()
iis.use_prev_mask = True
fimglist = sorted([x for x in os.listdir(img_dir) if '.png' in x or '.jpg' in x])
idx = 0
while idx<len(fimglist) and os.path.exists(os.path.join(save_dir,fimglist[idx])):
idx += 1
while idx<len(fimglist):
fimg = fimglist[idx]
print('process %s'%fimg)
if os.path.exists(os.path.join(save_dir,fimg)):
iis.mask = color2mask(cv2.imread(os.path.join(save_dir,fimg)))
key = iis.process(cv2.imread(os.path.join(img_dir,fimg)))
if key == ord('s') or key == 10:
saveimg = os.path.join(save_dir, fimg)
cv2.imwrite(saveimg,mask2color(iis.mask))
print('save label %s.'%saveimg)
idx += 1
elif key == ord('p') and idx>0:
idx -= 1
elif key == ord('n') or key == 32:
idx += 1
elif key == ord('q') or key == 27:
break
iis.mask[np.where(iis.mask==cv2.GC_BGD)]=cv2.GC_PR_BGD
iis.mask[np.where(iis.mask==cv2.GC_FGD)]=cv2.GC_PR_FGD | [
"cv2.setMouseCallback",
"numpy.copy",
"os.path.exists",
"numpy.clip",
"os.listdir",
"os.makedirs",
"numpy.where",
"cv2.grabCut",
"cv2.setTrackbarPos",
"os.path.join",
"cv2.imshow",
"cv2.putText",
"numpy.zeros",
"cv2.circle",
"cv2.getTrackbarPos",
"cv2.waitKey",
"cv2.createTrackbar",
... | [((168, 197), 'numpy.zeros', 'np.zeros', (['(r, c, 3)', 'np.uint8'], {}), '((r, c, 3), np.uint8)\n', (176, 197), True, 'import numpy as np\n'), ((377, 403), 'numpy.zeros', 'np.zeros', (['(r, c)', 'np.uint8'], {}), '((r, c), np.uint8)\n', (385, 403), True, 'import numpy as np\n'), ((205, 240), 'numpy.where', 'np.where', (['((mask == 0) | (mask == 2))'], {}), '((mask == 0) | (mask == 2))\n', (213, 240), True, 'import numpy as np\n'), ((257, 292), 'numpy.where', 'np.where', (['((mask == 1) | (mask == 3))'], {}), '((mask == 1) | (mask == 3))\n', (265, 292), True, 'import numpy as np\n'), ((760, 771), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (768, 771), True, 'import numpy as np\n'), ((794, 805), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (802, 805), True, 'import numpy as np\n'), ((1040, 1069), 'cv2.namedWindow', 'cv2.namedWindow', (['self.winname'], {}), '(self.winname)\n', (1055, 1069), False, 'import cv2\n'), ((1078, 1128), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['self.winname', 'on_mouse', 'self'], {}), '(self.winname, on_mouse, self)\n', (1098, 1128), False, 'import cv2\n'), ((1137, 1226), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""brush size"""', 'self.winname', 'self.radius', 'self.max_radius', 'nothing'], {}), "('brush size', self.winname, self.radius, self.max_radius,\n nothing)\n", (1155, 1226), False, 'import cv2\n'), ((2902, 2914), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (2909, 2914), True, 'import numpy as np\n'), ((3116, 3145), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (3124, 3145), True, 'import numpy as np\n'), ((3168, 3197), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (3176, 3197), True, 'import numpy as np\n'), ((3204, 3298), 'cv2.grabCut', 'cv2.grabCut', (['img', 'self.mask', 'None', 'self.bgdModel', 'self.fgdModel', '(1)', 'cv2.GC_INIT_WITH_MASK'], {}), '(img, self.mask, None, self.bgdModel, self.fgdModel, 1, cv2.\n GC_INIT_WITH_MASK)\n', (3215, 3298), False, 'import cv2\n'), ((4787, 4811), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4801, 4811), False, 'import os\n'), ((4821, 4842), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4832, 4842), False, 'import os\n'), ((2639, 2679), 'numpy.clip', 'np.clip', (['self.radius', '(1)', 'self.max_radius'], {}), '(self.radius, 1, self.max_radius)\n', (2646, 2679), True, 'import numpy as np\n'), ((2692, 2751), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""brush size"""', 'self.winname', 'self.radius'], {}), "('brush size', self.winname, self.radius)\n", (2710, 2751), False, 'import cv2\n'), ((3020, 3052), 'numpy.zeros', 'np.zeros', (['img.shape[:2]', '"""uint8"""'], {}), "(img.shape[:2], 'uint8')\n", (3028, 3052), True, 'import numpy as np\n'), ((3341, 3387), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""brush size"""', 'self.winname'], {}), "('brush size', self.winname)\n", (3359, 3387), False, 'import cv2\n'), ((3599, 3703), 'cv2.circle', 'cv2.circle', (['show_img', 'self.cur_mouse', 'self.radius', '(200, 200, 200)', '(2 if self.left_mouse_down else 1)'], {}), '(show_img, self.cur_mouse, self.radius, (200, 200, 200), 2 if\n self.left_mouse_down else 1)\n', (3609, 3703), False, 'import cv2\n'), ((3712, 3746), 'cv2.imshow', 'cv2.imshow', (['self.winname', 'show_img'], {}), '(self.winname, show_img)\n', (3722, 3746), False, 'import cv2\n'), ((3758, 3784), 'cv2.imshow', 'cv2.imshow', (['"""color"""', 'color'], {}), "('color', color)\n", (3768, 3784), False, 'import cv2\n'), ((3802, 3818), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (3813, 3818), False, 'import cv2\n'), ((5638, 5675), 'os.path.join', 'os.path.join', (['save_dir', 'fimglist[idx]'], {}), '(save_dir, fimglist[idx])\n', (5650, 5675), False, 'import os\n'), ((5812, 5840), 'os.path.join', 'os.path.join', (['save_dir', 'fimg'], {}), '(save_dir, fimg)\n', (5824, 5840), False, 'import os\n'), ((6046, 6074), 'os.path.join', 'os.path.join', (['save_dir', 'fimg'], {}), '(save_dir, fimg)\n', (6058, 6074), False, 'import os\n'), ((6397, 6429), 'numpy.where', 'np.where', (['(iis.mask == cv2.GC_BGD)'], {}), '(iis.mask == cv2.GC_BGD)\n', (6405, 6429), True, 'import numpy as np\n'), ((6460, 6492), 'numpy.where', 'np.where', (['(iis.mask == cv2.GC_FGD)'], {}), '(iis.mask == cv2.GC_FGD)\n', (6468, 6492), True, 'import numpy as np\n'), ((1926, 2032), 'cv2.circle', 'cv2.circle', (['self.mask', '(x, y)', 'self.radius', '(cv2.GC_BGD if self.left_mouse_down else cv2.GC_PR_BGD)', '(-1)'], {}), '(self.mask, (x, y), self.radius, cv2.GC_BGD if self.\n left_mouse_down else cv2.GC_PR_BGD, -1)\n', (1936, 2032), False, 'import cv2\n'), ((3878, 3890), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (3885, 3890), True, 'import numpy as np\n'), ((5527, 5546), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (5537, 5546), False, 'import os\n'), ((5954, 5981), 'os.path.join', 'os.path.join', (['img_dir', 'fimg'], {}), '(img_dir, fimg)\n', (5966, 5981), False, 'import os\n'), ((2228, 2334), 'cv2.circle', 'cv2.circle', (['self.mask', '(x, y)', 'self.radius', '(cv2.GC_FGD if self.left_mouse_down else cv2.GC_PR_FGD)', '(-1)'], {}), '(self.mask, (x, y), self.radius, cv2.GC_FGD if self.\n left_mouse_down else cv2.GC_PR_FGD, -1)\n', (2238, 2334), False, 'import cv2\n'), ((2426, 2458), 'numpy.clip', 'np.clip', (['(self.radius * 0.4)', '(1)', '(5)'], {}), '(self.radius * 0.4, 1, 5)\n', (2433, 2458), True, 'import numpy as np\n'), ((5887, 5915), 'os.path.join', 'os.path.join', (['save_dir', 'fimg'], {}), '(save_dir, fimg)\n', (5899, 5915), False, 'import os\n'), ((2547, 2579), 'numpy.clip', 'np.clip', (['(self.radius * 0.4)', '(1)', '(5)'], {}), '(self.radius * 0.4, 1, 5)\n', (2554, 2579), True, 'import numpy as np\n'), ((4221, 4321), 'cv2.putText', 'cv2.putText', (['show_img', '"""segmenting..."""', '(10, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(2)'], {}), "(show_img, 'segmenting...', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, \n 1, (0, 255, 255), 2)\n", (4232, 4321), False, 'import cv2\n'), ((4329, 4363), 'cv2.imshow', 'cv2.imshow', (['self.winname', 'show_img'], {}), '(self.winname, show_img)\n', (4339, 4363), False, 'import cv2\n'), ((4379, 4393), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4390, 4393), False, 'import cv2\n'), ((4410, 4504), 'cv2.grabCut', 'cv2.grabCut', (['img', 'self.mask', 'None', 'self.bgdModel', 'self.fgdModel', '(1)', 'cv2.GC_INIT_WITH_MASK'], {}), '(img, self.mask, None, self.bgdModel, self.fgdModel, 1, cv2.\n GC_INIT_WITH_MASK)\n', (4421, 4504), False, 'import cv2\n'), ((4527, 4539), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (4534, 4539), True, 'import numpy as np\n')] |
import os
import cv2
import xml.etree.ElementTree as ET
import numpy as np
# This function is used to load data
# Path to image file, and xml_file are given as input, and it returns image, bounding_box, class as output
def read_sample(image_path, label_path):
image_path = image_path.strip("\n")
label_path = label_path.strip("\n")
assert os.path.exists(image_path), "Image file does not exist."
assert os.path.exists(label_path), "Label file does not exist."
image = cv2.imread(image_path) # read image in bgr format
bboxes, classes = [], []
xml_root = ET.parse(label_path).getroot()
objects = xml_root.findall("object")
for i, obj in enumerate(objects):
name = obj.find("name").text
bndbox = obj.find("bndbox")
# the reason why we use float() is because some value in bndbox are float
xmin = float(bndbox.find("xmin").text)
ymin = float(bndbox.find("ymin").text)
xmax = float(bndbox.find("xmax").text)
ymax = float(bndbox.find("ymax").text)
bboxes.append([xmin, ymin, xmax, ymax])
classes.append(name)
return np.array(image, dtype=np.float), np.array(bboxes, dtype=np.float), classes
# This function creates and returns a list of strings, each string contains the path of image_file and xml_file
# It takes split file as input, which contain the names of image files in every new line
def generate_samples_from_split(split_file, images_dir, xml_dir):
assert os.path.isfile(split_file), "split_file does not exists."
assert os.path.isdir(images_dir), "images_dir is not a directory."
assert os.path.isdir(xml_dir), "xml_dir is not a directory."
samples = []
with open(split_file, "r") as split_file:
lines = split_file.readlines()
for line in lines:
image_file = os.path.join(images_dir, line.strip("\n") + ".jpg")
xml_file = os.path.join(xml_dir, line.strip("\n") + ".xml")
sample = f"{image_file} {xml_file}"
samples.append(sample)
return samples
| [
"os.path.exists",
"xml.etree.ElementTree.parse",
"os.path.isfile",
"numpy.array",
"os.path.isdir",
"cv2.imread"
] | [((353, 379), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (367, 379), False, 'import os\n'), ((421, 447), 'os.path.exists', 'os.path.exists', (['label_path'], {}), '(label_path)\n', (435, 447), False, 'import os\n'), ((491, 513), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (501, 513), False, 'import cv2\n'), ((1483, 1509), 'os.path.isfile', 'os.path.isfile', (['split_file'], {}), '(split_file)\n', (1497, 1509), False, 'import os\n'), ((1552, 1577), 'os.path.isdir', 'os.path.isdir', (['images_dir'], {}), '(images_dir)\n', (1565, 1577), False, 'import os\n'), ((1623, 1645), 'os.path.isdir', 'os.path.isdir', (['xml_dir'], {}), '(xml_dir)\n', (1636, 1645), False, 'import os\n'), ((1127, 1158), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float'}), '(image, dtype=np.float)\n', (1135, 1158), True, 'import numpy as np\n'), ((1160, 1192), 'numpy.array', 'np.array', (['bboxes'], {'dtype': 'np.float'}), '(bboxes, dtype=np.float)\n', (1168, 1192), True, 'import numpy as np\n'), ((586, 606), 'xml.etree.ElementTree.parse', 'ET.parse', (['label_path'], {}), '(label_path)\n', (594, 606), True, 'import xml.etree.ElementTree as ET\n')] |
#!/usr/bin/env python
"""Auxiliary numerical tools
"""
from math import acos, pi, sqrt
from warnings import warn
#from . import log as log
import anuga.utilities.log as anuga_log
import numpy as num
#After having migrated to numpy we should use the native NAN.
#num.seterr(divide='warn')
num.seterr(divide='ignore') # Ignore division error here for the time being
NAN = (num.array([1])/0.)[0]
# Static variable used by get_machine_precision
machine_precision = None
def safe_acos(x):
"""Safely compute acos
Protect against cases where input argument x is outside the allowed
interval [-1.0, 1.0] by no more than machine precision
"""
error_msg = 'Input to acos is outside allowed domain [-1.0, 1.0].'+\
'I got %.12f' %x
warning_msg = 'Changing argument to acos from %.18f to %.1f' %(x, sign(x))
eps = get_machine_precision() # Machine precision
if x < -1.0:
if x < -1.0 - eps:
raise ValueError(error_msg)
else:
x = -1.0
if x > 1.0:
if x > 1.0 + eps:
raise ValueError(error_msg)
else:
x = 1.0
return acos(x)
def sign(x):
if x > 0: return 1
if x < 0: return -1
if x == 0: return 0
def is_scalar(x):
"""True if x is a scalar (constant numeric value)
"""
return isinstance(x, (int, float))
def angle(v1, v2=None):
"""Compute angle between 2D vectors v1 and v2.
If v2 is not specified it will default
to e1 (the unit vector in the x-direction)
The angle is measured as a number in [0, 2pi] from v2 to v1.
"""
# Prepare two numeric vectors
if v2 is None:
v2 = [1.0, 0.0] # Unit vector along the x-axis
v1 = ensure_numeric(v1, float)
v2 = ensure_numeric(v2, float)
# Normalise
v1 = v1/num.sqrt(num.sum(v1**2))
v2 = v2/num.sqrt(num.sum(v2**2))
# Compute angle
p = num.inner(v1, v2)
c = num.inner(v1, normal_vector(v2)) # Projection onto normal
# (negative cross product)
theta = safe_acos(p)
# Correct if v1 is in quadrant 3 or 4 with respect to v2 (as the x-axis)
# If v2 was the unit vector [1,0] this would correspond to the test
# if v1[1] < 0: theta = 2*pi-theta
# In general we use the sign of the projection onto the normal.
if c < 0:
#Quadrant 3 or 4
theta = 2*pi-theta
return theta
def anglediff(v0, v1):
"""Compute difference between angle of vector v0 (x0, y0) and v1 (x1, y1).
This is used for determining the ordering of vertices,
e.g. for checking if they are counter clockwise.
Always return a positive value
"""
from math import pi
a0 = angle(v0)
a1 = angle(v1)
#Ensure that difference will be positive
if a0 < a1:
a0 += 2*pi
return a0-a1
def normal_vector(v):
"""Normal vector to v.
Returns vector 90 degrees counter clockwise to and of same length as v
"""
return num.array([-v[1], v[0]], float)
#def crossproduct_length(v1, v2):
# return v1[0]*v2[1]-v2[0]*v1[1]
def mean(x):
"""Mean value of a vector
"""
return(float(num.sum(x))/len(x))
def cov(x, y=None):
"""Covariance of vectors x and y.
If y is None: return cov(x, x)
"""
if y is None:
y = x
x = ensure_numeric(x)
y = ensure_numeric(y)
msg = 'Lengths must be equal: len(x) == %d, len(y) == %d' %(len(x), len(y))
assert(len(x)==len(y)), msg
N = len(x)
cx = x - mean(x)
cy = y - mean(y)
p = num.inner(cx,cy)/N
return(p)
def err(x, y=0, n=2, relative=True):
"""Relative error of ||x-y|| to ||y||
n = 2: Two norm
n = None: Max norm
If denominator evaluates to zero or
if y is omitted or
if keyword relative is False,
absolute error is returned
If there is x and y, n=2 and relative=False, this will calc;
sqrt(sum_over_x&y((xi - yi)^2))
Given this value (err), to calc the root mean square deviation, do
err/sqrt(n)
where n is the number of elements,(len(x))
"""
x = ensure_numeric(x)
if y:
y = ensure_numeric(y)
if n == 2:
err = norm(x-y)
if relative is True:
try:
err = err/norm(y)
except:
pass
else:
err = max(abs(x-y))
if relative is True:
try:
err = err/max(abs(y))
except:
pass
return err
def norm(x):
"""2-norm of x
"""
y = num.ravel(x)
p = num.sqrt(num.inner(y,y))
return p
def corr(x, y=None):
"""Correlation of x and y
If y is None return autocorrelation of x
"""
from math import sqrt
if y is None:
y = x
varx = cov(x)
vary = cov(y)
if varx == 0 or vary == 0:
C = 0
else:
C = cov(x,y)/sqrt(varx * vary)
return(C)
def ensure_numeric(A, typecode=None):
"""Ensure that sequence is a numeric array.
Inputs:
A: Sequence. If A is already a numeric array it will be returned
unaltered
If not, an attempt is made to convert it to a numeric
array
A: Scalar. Return 0-dimensional array containing that value. Note
that a 0-dim array DOES NOT HAVE A LENGTH UNDER numpy.
A: String. Array of ASCII values (numpy can't handle this)
A:None. Return None
typecode: numeric type. If specified, use this in the conversion.
If not, let numpy package decide.
typecode will always be one of float, int, etc.
Note that num.array(A, dtype) will sometimes copy. Use 'copy=False' to
copy only when required.
This function is necessary as array(A) can cause memory overflow.
"""
# if isinstance(A, basestring):
# msg = 'Sorry, cannot handle strings in ensure_numeric()'
# raise Exception, msg
if A is None:
return None
if typecode is None:
if isinstance(A, num.ndarray):
return num.ascontiguousarray(A)
else:
return num.ascontiguousarray(num.array(A))
else:
return num.ascontiguousarray(num.array(A, dtype=typecode, copy=False))
def histogram(a, bins, relative=False):
"""Standard histogram straight from the numeric manual
If relative is True, values will be normalised againts the total and
thus represent frequencies rather than counts.
"""
n = num.searchsorted(num.sort(a), bins)
n = num.concatenate([n, [len(a)]], axis=0) #??default#
hist = n[1:]-n[:-1]
if relative is True:
hist = hist/float(num.sum(hist))
return hist
def create_bins(data, number_of_bins = None):
"""Safely create bins for use with histogram
If data contains only one point or is constant, one bin will be created.
If number_of_bins in omitted 10 bins will be created
"""
mx = max(data)
mn = min(data)
if mx == mn:
bins = num.array([mn])
else:
if number_of_bins is None:
number_of_bins = 10
bins = num.arange(mn, mx, (mx-mn)/number_of_bins)
return bins
def get_machine_precision():
"""Calculate the machine precision for Floats
Depends on static variable machine_precision in this module
as this would otherwise require too much computation.
"""
global machine_precision
if machine_precision is None:
epsilon = 1.
while epsilon/2 + 1. > 1.:
epsilon /= 2
machine_precision = epsilon
return machine_precision
####################################################################
#Python versions of function that are also implemented in numerical_tools_ext.c
# FIXME (Ole): Delete these and update tests
#
def gradient_python(x0, y0, x1, y1, x2, y2, q0, q1, q2):
"""
"""
det = (y2-y0)*(x1-x0) - (y1-y0)*(x2-x0)
a = (y2-y0)*(q1-q0) - (y1-y0)*(q2-q0)
a /= det
b = (x1-x0)*(q2-q0) - (x2-x0)*(q1-q0)
b /= det
return a, b
def gradient2_python(x0, y0, x1, y1, q0, q1):
"""Compute radient based on two points and enforce zero gradient
in the direction orthogonal to (x1-x0), (y1-y0)
"""
#Old code
#det = x0*y1 - x1*y0
#if det != 0.0:
# a = (y1*q0 - y0*q1)/det
# b = (x0*q1 - x1*q0)/det
#Correct code (ON)
det = (x1-x0)**2 + (y1-y0)**2
if det != 0.0:
a = (x1-x0)*(q1-q0)/det
b = (y1-y0)*(q1-q0)/det
return a, b
################################################################################
# Decision functions for numeric package objects.
# It is a little tricky to decide if a numpy thing is of type float.
# These functions hide numpy-specific details of how we do this.
################################################################################
def is_num_float(obj):
'''Is an object a numeric package float object?'''
try:
return obj.dtype.char in num.typecodes['Float']
except AttributeError:
return False
def is_num_int(obj):
'''Is an object a numeric package int object?'''
try:
return obj.dtype.char in num.typecodes['Integer']
except AttributeError:
return False
#-----------------
# Initialise module
from .util_ext import gradient, gradient2
if __name__ == '__main__':
pass
| [
"math.acos",
"numpy.sort",
"math.sqrt",
"numpy.inner",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.sum",
"numpy.ravel",
"numpy.seterr",
"numpy.arange"
] | [((292, 319), 'numpy.seterr', 'num.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (302, 319), True, 'import numpy as num\n'), ((1147, 1154), 'math.acos', 'acos', (['x'], {}), '(x)\n', (1151, 1154), False, 'from math import acos, pi, sqrt\n'), ((1904, 1921), 'numpy.inner', 'num.inner', (['v1', 'v2'], {}), '(v1, v2)\n', (1913, 1921), True, 'import numpy as num\n'), ((2997, 3028), 'numpy.array', 'num.array', (['[-v[1], v[0]]', 'float'], {}), '([-v[1], v[0]], float)\n', (3006, 3028), True, 'import numpy as num\n'), ((4586, 4598), 'numpy.ravel', 'num.ravel', (['x'], {}), '(x)\n', (4595, 4598), True, 'import numpy as num\n'), ((375, 389), 'numpy.array', 'num.array', (['[1]'], {}), '([1])\n', (384, 389), True, 'import numpy as num\n'), ((3560, 3577), 'numpy.inner', 'num.inner', (['cx', 'cy'], {}), '(cx, cy)\n', (3569, 3577), True, 'import numpy as num\n'), ((4616, 4631), 'numpy.inner', 'num.inner', (['y', 'y'], {}), '(y, y)\n', (4625, 4631), True, 'import numpy as num\n'), ((6604, 6615), 'numpy.sort', 'num.sort', (['a'], {}), '(a)\n', (6612, 6615), True, 'import numpy as num\n'), ((7104, 7119), 'numpy.array', 'num.array', (['[mn]'], {}), '([mn])\n', (7113, 7119), True, 'import numpy as num\n'), ((7213, 7259), 'numpy.arange', 'num.arange', (['mn', 'mx', '((mx - mn) / number_of_bins)'], {}), '(mn, mx, (mx - mn) / number_of_bins)\n', (7223, 7259), True, 'import numpy as num\n'), ((1822, 1838), 'numpy.sum', 'num.sum', (['(v1 ** 2)'], {}), '(v1 ** 2)\n', (1829, 1838), True, 'import numpy as num\n'), ((1859, 1875), 'numpy.sum', 'num.sum', (['(v2 ** 2)'], {}), '(v2 ** 2)\n', (1866, 1875), True, 'import numpy as num\n'), ((3171, 3181), 'numpy.sum', 'num.sum', (['x'], {}), '(x)\n', (3178, 3181), True, 'import numpy as num\n'), ((4924, 4941), 'math.sqrt', 'sqrt', (['(varx * vary)'], {}), '(varx * vary)\n', (4928, 4941), False, 'from math import sqrt\n'), ((6161, 6185), 'numpy.ascontiguousarray', 'num.ascontiguousarray', (['A'], {}), '(A)\n', (6182, 6185), True, 'import numpy as num\n'), ((6302, 6342), 'numpy.array', 'num.array', (['A'], {'dtype': 'typecode', 'copy': '(False)'}), '(A, dtype=typecode, copy=False)\n', (6311, 6342), True, 'import numpy as num\n'), ((6241, 6253), 'numpy.array', 'num.array', (['A'], {}), '(A)\n', (6250, 6253), True, 'import numpy as num\n'), ((6762, 6775), 'numpy.sum', 'num.sum', (['hist'], {}), '(hist)\n', (6769, 6775), True, 'import numpy as num\n')] |
import cv2
import numpy
from PIL import Image
def LoadImage(path):
pilImage = Image.open(path)
npImage = numpy.array(pilImage)
loadedImage = cv2.cvtColor(npImage, cv2.COLOR_RGB2BGR)
return loadedImage | [
"numpy.array",
"PIL.Image.open",
"cv2.cvtColor"
] | [((83, 99), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (93, 99), False, 'from PIL import Image\n'), ((114, 135), 'numpy.array', 'numpy.array', (['pilImage'], {}), '(pilImage)\n', (125, 135), False, 'import numpy\n'), ((154, 194), 'cv2.cvtColor', 'cv2.cvtColor', (['npImage', 'cv2.COLOR_RGB2BGR'], {}), '(npImage, cv2.COLOR_RGB2BGR)\n', (166, 194), False, 'import cv2\n')] |
import numpy as np
import random
import time
import json
from model.model_wrapper import Models
from socketIO_client import SocketIO
from utils.model_dump import *
import os
import logging
import argparse
logging.getLogger('socketIO-client').setLevel(logging.WARNING)
random.seed(2018)
datestr = time.strftime('%m%d')
log_dir = os.path.join('experiments', 'logs', datestr)
if not os.path.exists(log_dir):
raise FileNotFoundError("{} not found".format(log_dir))
def load_json(filename):
with open(filename) as f:
return json.load(f)
class LocalModel(object):
def __init__(self, task_config):
"""
Inputs:
model: should be a python class refering to pytorch model (torch.nn.Module)
data_collected: a list with train/val/test dataset
"""
self.model_name = task_config['model_name']
self.epoch = task_config['local_epoch']
self.model = getattr(Models, self.model_name)(task_config)
def get_weights(self):
return self.model.get_weights()
def set_weights(self, new_weights):
self.model.set_weights(new_weights)
def train_one_round(self):
losses = []
for i in range(1, self.epoch + 1):
loss = self.model.train_one_epoch()
losses.append(loss)
# total_loss, mAP, recall = self.model.eval(self.model.dataloader, self.model.yolo, test_num=1000)
#return self.model.get_weights(), total_loss, mAP, recall
return self.model.get_weights(), sum(losses) / len(losses)
def evaluate(self):
loss, acc, recall = self.model.evaluate()
return loss, acc, recall
# A federated client is a process that can go to sleep / wake up intermittently
# it learns the global model by communication with the server;
# it contributes to the global model by sending its local gradients.
class FederatedClient(object):
MAX_DATASET_SIZE_KEPT = 6000
def __init__(self, server_host, server_port, task_config_filename,
gpu, ignore_load):
os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpu
self.task_config = load_json(task_config_filename)
# self.data_path = self.task_config['data_path']
print(self.task_config)
self.ignore_load = ignore_load
self.local_model = None
self.dataset = None
self.log_filename = self.task_config['log_filename']
# logger
self.logger = logging.getLogger("client")
self.fh = logging.FileHandler(os.path.join(log_dir, os.path.basename(self.log_filename)))
self.fh.setLevel(logging.INFO)
# create console handler with a higher log level
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.fh.setFormatter(self.formatter)
self.ch.setFormatter(self.formatter)
# add the handlers to the logger
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
self.logger.info(self.task_config)
self.sio = SocketIO(server_host, server_port, None, {'timeout': 36000})
self.register_handles()
print("sent wakeup")
self.sio.emit('client_wake_up')
self.sio.wait()
########## Socket Event Handler ##########
def on_init(self):
print('on init')
self.local_model = LocalModel(self.task_config)
print("local model initialized done.")
# ready to be dispatched for training
self.sio.emit('client_ready')
def load_stat(self):
loadavg = {}
with open("/proc/loadavg") as fin:
con = fin.read().split()
loadavg['lavg_1'] = con[0]
loadavg['lavg_5'] = con[1]
loadavg['lavg_15'] = con[2]
loadavg['nr'] = con[3]
loadavg['last_pid'] = con[4]
return loadavg['lavg_15']
def register_handles(self):
########## Socket IO messaging ##########
def on_connect():
print('connect')
def on_disconnect():
print('disconnect')
def on_reconnect():
print('reconnect')
def on_request_update(*args):
req = args[0]
print("update requested")
cur_round = req['round_number']
self.logger.info("### Round {} ###".format(cur_round))
if cur_round == 0:
self.logger.info("received initial model")
print(req['current_weights'])
weights = pickle_string_to_obj(req['current_weights'])
self.local_model.set_weights(weights)
my_weights, train_loss = self.local_model.train_one_round()
print(train_loss)
pickle_string_weights = obj_to_pickle_string(my_weights)
resp = {
'round_number': cur_round,
'weights': pickle_string_weights,
'train_size': self.local_model.model.train_size,
'train_loss': train_loss
}
self.logger.info("client_train_loss {}".format(train_loss))
if 'aggregation' in req and req['aggregation']: #NOTE: disabled from server side
client_test_loss, client_test_map, client_test_recall = self.local_model.evaluate()
client_test_map = np.nan_to_num(client_test_map)
client_test_recall = np.nan_to_num(client_test_recall)
resp['client_test_loss'] = client_test_loss
resp['client_test_map'] = client_test_map
resp['client_test_recall'] = client_test_recall
resp['client_test_size'] = self.local_model.model.valid_size
self.logger.info("client_test_loss {}".format(client_test_loss))
self.logger.info("client_test_map {}".format(client_test_map))
self.logger.info("client_test_recall {}".format(client_test_recall))
print("Emit client_update")
self.sio.emit('client_update', resp)
self.logger.info("sent trained model to server")
print("Emited...")
def on_stop_and_eval(*args):
self.logger.info("received aggregated model from server")
req = args[0]
cur_time = time.time()
if req['weights_format'] == 'pickle':
weights = pickle_string_to_obj(req['current_weights'])
self.local_model.set_weights(weights)
print('get weights')
self.logger.info("receiving weight time is {}".format(time.time() - cur_time))
server_loss, server_map, server_recall = self.local_model.evaluate()
server_map = np.nan_to_num(server_map)
server_recall = np.nan_to_num(server_recall)
resp = {
'test_size': self.local_model.model.valid_size,
'test_loss': server_loss,
'test_map': server_map,
'test_recall': server_recall
}
print("Emit client_eval")
self.sio.emit('client_eval', resp)
if req['STOP']:
print("Federated training finished ...")
exit(0)
def on_check_client_resource(*args):
req = args[0]
print("check client resource.")
if self.ignore_load:
load_average = 0.15
print("Ignore load average")
else:
load_average = self.load_stat()
print("Load average:", load_average)
resp = {
'round_number': req['round_number'],
'load_rate': load_average
}
self.sio.emit('check_client_resource_done', resp)
self.sio.on('connect', on_connect)
self.sio.on('disconnect', on_disconnect)
self.sio.on('reconnect', on_reconnect)
self.sio.on('init', self.on_init)
self.sio.on('request_update', on_request_update)
self.sio.on('stop_and_eval', on_stop_and_eval)
self.sio.on('check_client_resource', on_check_client_resource)
# TODO: later: simulate datagen for long-running train-serve service
# i.e. the local dataset can increase while training
# self.lock = threading.Lock()
# def simulate_data_gen(self):
# num_items = random.randint(10, FederatedClient.MAX_DATASET_SIZE_KEPT * 2)
# for _ in range(num_items):
# with self.lock:
# # (X, Y)
# self.collected_data_train += [self.datasource.sample_single_non_iid()]
# # throw away older data if size > MAX_DATASET_SIZE_KEPT
# self.collected_data_train = self.collected_data_train[-FederatedClient.MAX_DATASET_SIZE_KEPT:]
# print(self.collected_data_train[-1][1])
# self.intermittently_sleep(p=.2, low=1, high=3)
# threading.Thread(target=simulate_data_gen, args=(self,)).start()
def intermittently_sleep(self, p=.1, low=10, high=100):
if (random.random() < p):
time.sleep(random.randint(low, high))
# possible: use a low-latency pubsub system for gradient update, and do "gossip"
# e.g. Google cloud pubsub, Amazon SNS
# https://developers.google.com/nearby/connections/overview
# https://pypi.python.org/pypi/pyp2p
# class PeerToPeerClient(FederatedClient):
# def __init__(self):
# super(PushBasedClient, self).__init__()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, required=True, help="which GPU to run")
parser.add_argument("--config_file", type=str, required=True, help="task config file")
parser.add_argument("--ignore_load", default=True, help="whether ignore load or not")
parser.add_argument("--port", type=int, required=True, help="server port")
opt = parser.parse_args()
print(opt)
if not os.path.exists(opt.config_file):
raise FileNotFoundError('{} does not exist'.format(opt.config_file))
print("client run on {}".format(opt.gpu))
try:
FederatedClient("127.0.0.1", opt.port, opt.config_file, opt.gpu, opt.ignore_load)
except ConnectionError:
print('The server is down. Try again later.')
| [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"argparse.ArgumentParser",
"numpy.nan_to_num",
"logging.Formatter",
"time.strftime",
"os.path.join",
"random.seed",
"os.path.basename",
"json.load",
"random.random",
"time.time",
"random.randint",
"socketIO_client.SocketIO"
... | [((271, 288), 'random.seed', 'random.seed', (['(2018)'], {}), '(2018)\n', (282, 288), False, 'import random\n'), ((299, 320), 'time.strftime', 'time.strftime', (['"""%m%d"""'], {}), "('%m%d')\n", (312, 320), False, 'import time\n'), ((331, 375), 'os.path.join', 'os.path.join', (['"""experiments"""', '"""logs"""', 'datestr'], {}), "('experiments', 'logs', datestr)\n", (343, 375), False, 'import os\n'), ((383, 406), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (397, 406), False, 'import os\n'), ((9651, 9676), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9674, 9676), False, 'import argparse\n'), ((208, 244), 'logging.getLogger', 'logging.getLogger', (['"""socketIO-client"""'], {}), "('socketIO-client')\n", (225, 244), False, 'import logging\n'), ((540, 552), 'json.load', 'json.load', (['f'], {}), '(f)\n', (549, 552), False, 'import json\n'), ((2444, 2471), 'logging.getLogger', 'logging.getLogger', (['"""client"""'], {}), "('client')\n", (2461, 2471), False, 'import logging\n'), ((2684, 2707), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2705, 2707), False, 'import logging\n'), ((2827, 2900), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (2844, 2900), False, 'import logging\n'), ((3174, 3234), 'socketIO_client.SocketIO', 'SocketIO', (['server_host', 'server_port', 'None', "{'timeout': 36000}"], {}), "(server_host, server_port, None, {'timeout': 36000})\n", (3182, 3234), False, 'from socketIO_client import SocketIO\n'), ((10076, 10107), 'os.path.exists', 'os.path.exists', (['opt.config_file'], {}), '(opt.config_file)\n', (10090, 10107), False, 'import os\n'), ((6389, 6400), 'time.time', 'time.time', ([], {}), '()\n', (6398, 6400), False, 'import time\n'), ((6807, 6832), 'numpy.nan_to_num', 'np.nan_to_num', (['server_map'], {}), '(server_map)\n', (6820, 6832), True, 'import numpy as np\n'), ((6861, 6889), 'numpy.nan_to_num', 'np.nan_to_num', (['server_recall'], {}), '(server_recall)\n', (6874, 6889), True, 'import numpy as np\n'), ((9194, 9209), 'random.random', 'random.random', ([], {}), '()\n', (9207, 9209), False, 'import random\n'), ((2532, 2567), 'os.path.basename', 'os.path.basename', (['self.log_filename'], {}), '(self.log_filename)\n', (2548, 2567), False, 'import os\n'), ((5444, 5474), 'numpy.nan_to_num', 'np.nan_to_num', (['client_test_map'], {}), '(client_test_map)\n', (5457, 5474), True, 'import numpy as np\n'), ((5512, 5545), 'numpy.nan_to_num', 'np.nan_to_num', (['client_test_recall'], {}), '(client_test_recall)\n', (5525, 5545), True, 'import numpy as np\n'), ((9239, 9264), 'random.randint', 'random.randint', (['low', 'high'], {}), '(low, high)\n', (9253, 9264), False, 'import random\n'), ((6676, 6687), 'time.time', 'time.time', ([], {}), '()\n', (6685, 6687), False, 'import time\n')] |
"""Usage: run_K_maps.py <R_s>
Generates various TeslaMax models, using common paramaters and
the value of the external radius <R_s>, and optimizes them for several
ramp profile.
The results are saved in a file 'map_K_Rs_<R_s>.txt', with the argument
printed in mm. The first lines of this file are a string representation of the
dictionary of common parameters for all simulations; the remainder rows form a
table, with columns for the parameters;
the last columns is the cost function K.
"""
# coding: utf-8
from pathlib import Path
import os
from docopt import docopt
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize, differential_evolution, basinhopping
from pandas import Series, DataFrame
import teslamax
from teslamax import TeslaMaxGeometry, TeslaMaxPreDesign, TeslaMaxModel
OVERWRITE = True
args = docopt(__doc__,help=True)
print(args)
R_s = float(args["<R_s>"])
os.chdir(str(Path.home() / "code" / "TeslaMax"))
params_optimization_ref = {"R_i": 0.010,
"h_fc": 0.005,
"R_s": 1e-3*R_s,
"R_o": 40e-3,
"R_e": 0.3,
"n_II": 2,
"n_IV": 4,
"phi_C_II": 15,
"mu_r_II": 1.05,
"mu_r_IV": 1.05,
"linear_iron": 1,
"mu_r_iron": 5e3,
}
B_rem = 1.4
B_min = 0.0
field_fraction = 0.35
params_optimization_ref["F_M[%]"] = field_fraction*100
target_function = teslamax.calculate_ramp_profile
map_file_path = Path("map_K_Rs_%d.txt" %(R_s))
# #### Generate the results file
params_header_str = str(params_optimization_ref).replace(',',',\n') + '\n\n'
print(params_header_str)
if OVERWRITE:
map_file_path.write_text(params_header_str)
# ### Update the results file
# We define a range of values for the external radius and the
# iron-magnet separating angle and calculate the cost function.
phi_S_values = np.array([35,45,55])
B_max_min = 1.00
B_max_max = 1.20
B_max_step = 0.01
B_max_values = np.arange(B_max_min,B_max_max+B_max_step,B_max_step)
h_gap_min = 15
h_gap_max = 25
h_gap_step = 1
h_gap_values = 1e-3*np.arange(h_gap_min,h_gap_max + h_gap_step, h_gap_step)
params = params_optimization_ref.copy()
n = params["n_II"] + params["n_IV"]
params = teslamax.expand_parameters_from_remanence_array(
B_rem*np.ones(n),
params,
"B_rem")
COLUMNS_NAMES_STR = '\t'.join(['phi_S[deg]',
'h_gap[mm]',
'B_max[T]',
'K[]\n'])
print(COLUMNS_NAMES_STR)
if OVERWRITE:
with map_file_path.open(mode='a') as f:
f.write(COLUMNS_NAMES_STR)
for B_max in B_max_values:
target_args = (B_max,B_min,field_fraction)
for h_gap in h_gap_values:
params["h_gap"] = h_gap
for phi_S in phi_S_values:
params["phi_S_II"] = phi_S
params["phi_S_IV"] = phi_S
try:
tmpd = TeslaMaxPreDesign(params)
alpha_B_rem_g = tmpd.get_optimal_remanence_angles(
target_function,
target_args)
K = tmpd.calculate_functional_target(alpha_B_rem_g,
target_function,
target_args)
results_str = "%.1f\t%.3f\t%.3f\t%.6f" %(
phi_S,
1e3*h_gap,
B_max,
K)
results_str = results_str + "\n"
print(results_str)
with map_file_path.open(mode='a',buffering=1) as f:
f.write(results_str)
f.flush()
os.fsync(f.fileno()),
except:
continue
| [
"numpy.ones",
"pathlib.Path",
"pathlib.Path.home",
"numpy.array",
"teslamax.TeslaMaxPreDesign",
"docopt.docopt",
"numpy.arange"
] | [((871, 897), 'docopt.docopt', 'docopt', (['__doc__'], {'help': '(True)'}), '(__doc__, help=True)\n', (877, 897), False, 'from docopt import docopt\n'), ((1665, 1694), 'pathlib.Path', 'Path', (["('map_K_Rs_%d.txt' % R_s)"], {}), "('map_K_Rs_%d.txt' % R_s)\n", (1669, 1694), False, 'from pathlib import Path\n'), ((2072, 2094), 'numpy.array', 'np.array', (['[35, 45, 55]'], {}), '([35, 45, 55])\n', (2080, 2094), True, 'import numpy as np\n'), ((2164, 2220), 'numpy.arange', 'np.arange', (['B_max_min', '(B_max_max + B_max_step)', 'B_max_step'], {}), '(B_max_min, B_max_max + B_max_step, B_max_step)\n', (2173, 2220), True, 'import numpy as np\n'), ((2285, 2341), 'numpy.arange', 'np.arange', (['h_gap_min', '(h_gap_max + h_gap_step)', 'h_gap_step'], {}), '(h_gap_min, h_gap_max + h_gap_step, h_gap_step)\n', (2294, 2341), True, 'import numpy as np\n'), ((2488, 2498), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2495, 2498), True, 'import numpy as np\n'), ((951, 962), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (960, 962), False, 'from pathlib import Path\n'), ((3120, 3145), 'teslamax.TeslaMaxPreDesign', 'TeslaMaxPreDesign', (['params'], {}), '(params)\n', (3137, 3145), False, 'from teslamax import TeslaMaxGeometry, TeslaMaxPreDesign, TeslaMaxModel\n')] |
"""This file contains utilities for processing list files."""
from typing import List, Tuple
from numba import njit
import numpy as np
from .lst_to_crd import LST2CRD
def ascii_to_ndarray(
data_list: List[str], fmt: LST2CRD.ASCIIFormat, channel: int, tag: int = None
) -> Tuple[np.ndarray, np.ndarray]:
"""Turn ASCII LST data to a numpy array.
Fixme: Might want to break this into two routines, with and without tag
Takes the whole data block and returns the data in a properly formatted numpy array.
For speed, using numba JITing.
:param data_list: Data, directly supplied from the TDC block.
:param fmt: Format of the data
:param channel: Channel the data is in
:param tag: Channel the tag is in, or None if no tag
:return: Data, Tag Data
"""
# prepare the data and list
data_arr = np.empty((len(data_list), 2), dtype=np.uint32)
data_arr_tag = None
# initalize stuff for tags
if tag is not None:
data_arr_tag = np.empty(
len(data_list), dtype=np.uint32
) # only sweep, not the channel
# some helper variables for easy conversion
binary_width = fmt.value[0]
boundaries = fmt.value[1]
# counter for ions in the right channel
ion_counter = 0
tag_counter = 0
# transform to bin number with correct length
for data in data_list:
if data != "":
bin_tmp = f"{int(data, 16):{binary_width}b}".replace(" ", "0")
# parse data
tmp_channel = int(bin_tmp[boundaries[2][0] : boundaries[2][1]], 2)
if tmp_channel == channel:
swp_val, time_val = get_sweep_time_ascii(
bin_tmp, boundaries[0], boundaries[1]
)
data_arr[ion_counter][0] = swp_val
data_arr[ion_counter][1] = time_val
ion_counter += 1
elif tmp_channel == tag:
swp_val, _ = get_sweep_time_ascii(bin_tmp, boundaries[0], boundaries[1])
data_arr_tag[tag_counter] = swp_val
tag_counter += 1
data_arr = data_arr[:ion_counter]
if tag is not None:
data_arr_tag = data_arr_tag[:tag_counter]
return data_arr, data_arr_tag
def get_sweep_time_ascii(
data: str, sweep_b: Tuple[int, int], time_b: Tuple[int, int]
) -> Tuple[int, int]:
"""Get sweep and time from a given ASCII string.
:param data: ASCII string
:param sweep_b: Boundaries of sweep
:param time_b: Boundaries of time
:return: sweep, time
"""
sweep_val = int(data[sweep_b[0] : sweep_b[1]], 2)
time_val = int(data[time_b[0] : time_b[1]], 2)
return sweep_val, time_val
@njit
def transfer_lst_to_crd_data(
data_in: np.ndarray, max_sweep: int, ion_range: int
) -> Tuple[np.ndarray, np.ndarray, bool]: # pragma: nocover
"""Transfer lst file specific data to the crd format.
:param data_in: Array: One ion per line, two entries: sweep first (shot), then time
:param max_sweep: the maximum sweep that can be represented by data resolution
:param ion_range: Valid range of the data in multiples of 100ps bins
:return: Array of how many ions are in each shot, Array of all arrival times of
these ions, and a bool if there are any ions out of range
"""
data = data_in.copy()
# go through and sort out max range issues
threshold = max_sweep // 2
multiplier = 0
last_shot = data[0][0]
for it in range(1, data.shape[0]):
curr_shot = data[it][0]
if (
curr_shot < threshold < last_shot and last_shot - curr_shot > threshold
): # need to flip forward
multiplier += 1
elif (
last_shot < threshold < curr_shot and curr_shot - last_shot > threshold
): # flip back
multiplier -= 1
# modify data
adder = multiplier * max_sweep
data[it][0] += adder
last_shot = curr_shot
# now sort the np array
data_sort = data[data[:, 0].argsort()]
# now create the shots and ions arrays and fill them
shots = np.zeros(data_sort[:, 0].max(), dtype=np.uint32)
ions = np.empty(
len(data_sort[:, 1][np.where(data_sort[:, 1] <= ion_range)]), dtype=np.uint32
)
it = 0
ions_out_of_range = False
for shot, ion in data_sort:
if ion <= ion_range:
shots[shot - 1] += 1 # zero versus one based
ions[it] = ion
it += 1
else:
ions_out_of_range = True
return shots, ions, ions_out_of_range
| [
"numpy.where"
] | [((4190, 4228), 'numpy.where', 'np.where', (['(data_sort[:, 1] <= ion_range)'], {}), '(data_sort[:, 1] <= ion_range)\n', (4198, 4228), True, 'import numpy as np\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy.testing as npt
from calour._testing import Tests
import calour as ca
class ExperimentTests(Tests):
def setUp(self):
super().setUp()
self.test1 = ca.read_amplicon(self.test1_biom, self.test1_samp,
min_reads=1000, normalize=10000)
def test_filter_mz_rt(self):
# load an mzmine2 metabolomics table, and associated gnps clusterinfo file
exp = ca.read_ms(self.mzmine2_csv, sample_metadata_file=self.gnps_map,
data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)
# mz filtering
res = exp.filter_mz_rt(100)
self.assertEqual(len(res.feature_metadata), 1)
self.assertEqual(res.feature_metadata['MZ'].values, [100])
res = exp.filter_mz_rt([100, 201])
self.assertEqual(len(res.feature_metadata), 1)
self.assertEqual(res.feature_metadata['MZ'].values, [100])
res = exp.filter_mz_rt([100, 201], mz_tolerance=1)
self.assertEqual(len(res.feature_metadata), 2)
npt.assert_array_equal(res.feature_metadata['MZ'].values, [100, 200])
res = exp.filter_mz_rt([100, 201], negate=True)
self.assertEqual(len(res.feature_metadata), 5)
# rt filtering
res = exp.filter_mz_rt(rt=[1, 2.5])
self.assertEqual(len(res.feature_metadata), 1)
self.assertEqual(res.feature_metadata['RT'].values, [1])
res = exp.filter_mz_rt(rt=[1, 2.5], rt_tolerance=0.5)
self.assertEqual(len(res.feature_metadata), 3)
npt.assert_array_equal(res.feature_metadata['RT'].values, [1, 2, 3])
# complex - both mz and rt
res = exp.filter_mz_rt([101, 200, 400, 505], [1, 3, 4, 5], mz_tolerance=2)
self.assertEqual(res.shape[1], 2)
def test_get_spurious_duplicates(self):
# load an mzmine2 metabolomics table, and associated gnps clusterinfo file
exp = ca.read_ms(self.mzmine2_csv, sample_metadata_file=self.gnps_map,
data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)
# get rid of the all 0s metabolite (to get rid of std=0 warning)
exp = exp.filter_sum_abundance(0.1)
res = exp.get_spurious_duplicates()
# no samples filtered away
self.assertEqual(res.shape[0], 6)
# default parameters don't identify and suspicious features
self.assertEqual(res.shape[1], 0)
res = exp.get_spurious_duplicates(mz_tolerance=100, rt_tolerance=0.5)
self.assertEqual(res.shape[1], 0)
res = exp.get_spurious_duplicates(rt_tolerance=1)
self.assertEqual(res.shape[1], 0)
res = exp.get_spurious_duplicates(mz_tolerance=100, rt_tolerance=1)
self.assertEqual(res.shape[1], 2)
res = exp.get_spurious_duplicates(mz_tolerance=100, rt_tolerance=1, corr_thresh=0.2)
self.assertEqual(res.shape[1], 4)
def test_merge_similar_features(self):
# load an mzmine2 metabolomics table, and associated gnps clusterinfo file
exp = ca.read_ms(self.mzmine2_csv, sample_metadata_file=self.gnps_map,
data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)
# no merging since features are far away
res = exp.merge_similar_features()
self.assertEqual(res.shape[1], 6)
# a little merging
res = exp.merge_similar_features(mz_tolerance=100, rt_tolerance=1)
self.assertEqual(res.shape[1], 3)
self.assertEqual(res.feature_metadata.at[85022, '_calour_merge_ids'], '85022;93277')
# a lot of merging
res = exp.merge_similar_features(mz_tolerance=400, rt_tolerance=6)
self.assertEqual(res.shape[1], 2)
self.assertEqual(res.feature_metadata.at[121550, '_calour_merge_ids'], '121550')
| [
"calour.read_ms",
"calour.read_amplicon",
"numpy.testing.assert_array_equal"
] | [((532, 619), 'calour.read_amplicon', 'ca.read_amplicon', (['self.test1_biom', 'self.test1_samp'], {'min_reads': '(1000)', 'normalize': '(10000)'}), '(self.test1_biom, self.test1_samp, min_reads=1000,\n normalize=10000)\n', (548, 619), True, 'import calour as ca\n'), ((785, 928), 'calour.read_ms', 'ca.read_ms', (['self.mzmine2_csv'], {'sample_metadata_file': 'self.gnps_map', 'data_file_type': '"""mzmine2"""', 'use_gnps_id_from_AllFiles': '(False)', 'normalize': 'None'}), "(self.mzmine2_csv, sample_metadata_file=self.gnps_map,\n data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)\n", (795, 928), True, 'import calour as ca\n'), ((1421, 1490), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["res.feature_metadata['MZ'].values", '[100, 200]'], {}), "(res.feature_metadata['MZ'].values, [100, 200])\n", (1443, 1490), True, 'import numpy.testing as npt\n'), ((1917, 1985), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (["res.feature_metadata['RT'].values", '[1, 2, 3]'], {}), "(res.feature_metadata['RT'].values, [1, 2, 3])\n", (1939, 1985), True, 'import numpy.testing as npt\n'), ((2289, 2432), 'calour.read_ms', 'ca.read_ms', (['self.mzmine2_csv'], {'sample_metadata_file': 'self.gnps_map', 'data_file_type': '"""mzmine2"""', 'use_gnps_id_from_AllFiles': '(False)', 'normalize': 'None'}), "(self.mzmine2_csv, sample_metadata_file=self.gnps_map,\n data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)\n", (2299, 2432), True, 'import calour as ca\n'), ((3421, 3564), 'calour.read_ms', 'ca.read_ms', (['self.mzmine2_csv'], {'sample_metadata_file': 'self.gnps_map', 'data_file_type': '"""mzmine2"""', 'use_gnps_id_from_AllFiles': '(False)', 'normalize': 'None'}), "(self.mzmine2_csv, sample_metadata_file=self.gnps_map,\n data_file_type='mzmine2', use_gnps_id_from_AllFiles=False, normalize=None)\n", (3431, 3564), True, 'import calour as ca\n')] |
import numpy as np
from . import _fastmath_ext
__all__ = ['polar_dec']
def polar_dec(matrices):
"""
Batched polar decomposition of an array of stacked matrices,
e.g. given matrices [M1, M2, ..., Mn], decomposes each matrix
into rotation and skew-symmetric matrices.
>>> matrices = np.random.random((10, 3, 3))
>>> rotations, stretches = polar_dec(matrices)
>>> np.allclose([np.linalg.det(R) for R in rotations], 1.0)
True
"""
matrices = np.asarray(matrices)
if matrices.ndim == 2:
matrices = matrices[np.newaxis]
single_matrix = True
else:
single_matrix = False
Rs, Ss = _fastmath_ext.polar_dec(matrices)
if single_matrix:
return Rs[0], Ss[0]
else:
return Rs, Ss
| [
"numpy.asarray"
] | [((482, 502), 'numpy.asarray', 'np.asarray', (['matrices'], {}), '(matrices)\n', (492, 502), True, 'import numpy as np\n')] |
import os
import re
import cv2
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
class RealVideos(Dataset):
def __init__(self):
self.img_dir = '/shared/results/Skopia/videos_8frames'
self.ch, self.duration, self.h, self.w = 3, 8, 1024, 1024
self.video_names = self._load_videos()
def _load_videos(self):
images = os.listdir(self.img_dir)
img_name = re.compile('([0-9]+_[0-9]+)_[0-7]+.jpg')
video_names = {img_name.search(img).groups()[0] for img in images}
return list(video_names)
def __len__(self):
return len(self.video_names)
def __getitem__(self, i):
video = np.empty((self.duration, self.ch, self.h, self.w), dtype=np.float32)
video_name = self.video_names[i]
for t in range(self.duration):
img_path = self.img_dir + f'/{video_name}_{t}.jpg'
img = np.asarray(Image.open(img_path), dtype=np.float32).transpose(2, 0, 1)
video[t] = img
# mean = np.mean(video, axis=(2, 3), keepdims=True)
# std = np.std(video, axis=(2, 3), keepdims=True)
# video = (video - mean) / std
video -= 127.5
video /= 127.5
return torch.as_tensor(video)
if __name__ == '__main__':
real_videos = RealVideos()
print(len(real_videos))
| [
"torch.as_tensor",
"os.listdir",
"PIL.Image.open",
"re.compile",
"numpy.empty"
] | [((398, 422), 'os.listdir', 'os.listdir', (['self.img_dir'], {}), '(self.img_dir)\n', (408, 422), False, 'import os\n'), ((442, 482), 're.compile', 're.compile', (['"""([0-9]+_[0-9]+)_[0-7]+.jpg"""'], {}), "('([0-9]+_[0-9]+)_[0-7]+.jpg')\n", (452, 482), False, 'import re\n'), ((699, 767), 'numpy.empty', 'np.empty', (['(self.duration, self.ch, self.h, self.w)'], {'dtype': 'np.float32'}), '((self.duration, self.ch, self.h, self.w), dtype=np.float32)\n', (707, 767), True, 'import numpy as np\n'), ((1244, 1266), 'torch.as_tensor', 'torch.as_tensor', (['video'], {}), '(video)\n', (1259, 1266), False, 'import torch\n'), ((940, 960), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (950, 960), False, 'from PIL import Image\n')] |
import numpy as np
from gam.clustering import KMedoids
from gam.spearman_distance import spearman_squared_distance
def test_kmedoids():
""""Run kmedoids on sample attributions"""
kmedoids_2 = KMedoids(2, dist_func=spearman_squared_distance, max_iter=1000, tol=0.0001, init_medoids=None)
attributions = np.array([(0.2, 0.8), (0.1, 0.9), (0.91, 0.09), (0.88, 0.12)])
kmedoids_2.fit(attributions, verbose=False)
# test that 2 attributions are in each cluster
assert(sum(kmedoids_2.members) == 2)
| [
"gam.clustering.KMedoids",
"numpy.array"
] | [((202, 300), 'gam.clustering.KMedoids', 'KMedoids', (['(2)'], {'dist_func': 'spearman_squared_distance', 'max_iter': '(1000)', 'tol': '(0.0001)', 'init_medoids': 'None'}), '(2, dist_func=spearman_squared_distance, max_iter=1000, tol=0.0001,\n init_medoids=None)\n', (210, 300), False, 'from gam.clustering import KMedoids\n'), ((316, 378), 'numpy.array', 'np.array', (['[(0.2, 0.8), (0.1, 0.9), (0.91, 0.09), (0.88, 0.12)]'], {}), '([(0.2, 0.8), (0.1, 0.9), (0.91, 0.09), (0.88, 0.12)])\n', (324, 378), True, 'import numpy as np\n')] |
import copy
import numpy as np
import torch
class Memory:
def __init__(self, memory_size, nb_total_classes, rehearsal, fixed=True):
self.memory_size = memory_size
self.nb_total_classes = nb_total_classes
self.rehearsal = rehearsal
self.fixed = fixed
self.x = self.y = self.t = None
self.nb_classes = 0
@property
def memory_per_class(self):
if self.fixed:
return self.memory_size // self.nb_total_classes
return self.memory_size // self.nb_classes if self.nb_classes > 0 else self.memory_size
def get_dataset(self, base_dataset):
dataset = copy.deepcopy(base_dataset)
dataset._x = self.x
dataset._y = self.y
dataset._t = self.t
return dataset
def get(self):
return self.x, self.y, self.t
def __len__(self):
return len(self.x) if self.x is not None else 0
def save(self, path):
np.savez(
path,
x=self.x, y=self.y, t=self.t
)
def load(self, path):
data = np.load(path)
self.x = data["x"]
self.y = data["y"]
self.t = data["t"]
assert len(self) <= self.memory_size, len(self)
self.nb_classes = len(np.unique(self.y))
def reduce(self):
x, y, t = [], [], []
for class_id in np.unique(self.y):
indexes = np.where(self.y == class_id)[0]
x.append(self.x[indexes[:self.memory_per_class]])
y.append(self.y[indexes[:self.memory_per_class]])
t.append(self.t[indexes[:self.memory_per_class]])
self.x = np.concatenate(x)
self.y = np.concatenate(y)
self.t = np.concatenate(t)
def add(self, dataset, model, nb_new_classes):
self.nb_classes += nb_new_classes
x, y, t = herd_samples(dataset, model, self.memory_per_class, self.rehearsal)
#assert len(y) == self.memory_per_class * nb_new_classes, (len(y), self.memory_per_class, nb_new_classes)
if self.x is None:
self.x, self.y, self.t = x, y, t
else:
if not self.fixed:
self.reduce()
self.x = np.concatenate((self.x, x))
self.y = np.concatenate((self.y, y))
self.t = np.concatenate((self.t, t))
def herd_samples(dataset, model, memory_per_class, rehearsal):
x, y, t = dataset._x, dataset._y, dataset._t
if rehearsal == "random":
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
indexes.append(
np.random.choice(class_indexes, size=memory_per_class)
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "closest" in rehearsal:
if rehearsal == 'closest_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=0, keepdims=True)
distances = np.power(class_features - class_mean, 2).sum(-1)
class_closest_indexes = np.argsort(distances)
indexes.append(
class_indexes[class_closest_indexes[:memory_per_class]]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "furthest" in rehearsal:
if rehearsal == 'furthest_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=0, keepdims=True)
distances = np.power(class_features - class_mean, 2).sum(-1)
class_furthest_indexes = np.argsort(distances)[::-1]
indexes.append(
class_indexes[class_furthest_indexes[:memory_per_class]]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "icarl":
if rehearsal == 'icarl_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
indexes.append(
class_indexes[icarl_selection(class_features, memory_per_class)]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
else:
raise ValueError(f"Unknown rehearsal method {rehearsal}!")
def extract_features(dataset, model, ensemble_handling='last'):
#transform = copy.deepcopy(dataset.trsf.transforms)
#dataset.trsf = transforms.Compose(transform[-2:])
loader = torch.utils.data.DataLoader(
dataset,
batch_size=128,
num_workers=2,
pin_memory=True,
drop_last=False,
shuffle=False
)
features, targets = [], []
with torch.no_grad():
for x, y, _ in loader:
if hasattr(model, 'module'):
feats, _, _ = model.module.forward_features(x.cuda())
else:
feats, _, _ = model.forward_features(x.cuda())
if isinstance(feats, list):
if ensemble_handling == 'last':
feats = feats[-1]
elif ensemble_handling == 'all':
feats = torch.cat(feats, dim=1)
else:
raise NotImplementedError(f'Unknown handdling of multiple features {ensemble_handling}')
elif len(feats.shape) == 3: # joint tokens
if ensemble_handling == 'last':
feats = feats[-1]
elif ensemble_handling == 'all':
feats = feats.permute(1, 0, 2).view(len(x), -1)
else:
raise NotImplementedError(f'Unknown handdling of multiple features {ensemble_handling}')
feats = feats.cpu().numpy()
y = y.numpy()
features.append(feats)
targets.append(y)
features = np.concatenate(features)
targets = np.concatenate(targets)
#dataset.trsf = transforms.Compose(transform)
return features, targets
def icarl_selection(features, nb_examplars):
D = features.T
D = D / (np.linalg.norm(D, axis=0) + 1e-8)
mu = np.mean(D, axis=1)
herding_matrix = np.zeros((features.shape[0],))
w_t = mu
iter_herding, iter_herding_eff = 0, 0
while not (
np.sum(herding_matrix != 0) == min(nb_examplars, features.shape[0])
) and iter_herding_eff < 1000:
tmp_t = np.dot(w_t, D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if herding_matrix[ind_max] == 0:
herding_matrix[ind_max] = 1 + iter_herding
iter_herding += 1
w_t = w_t + mu - D[:, ind_max]
herding_matrix[np.where(herding_matrix == 0)[0]] = 10000
return herding_matrix.argsort()[:nb_examplars]
def get_finetuning_dataset(dataset, memory, finetuning='balanced'):
if finetuning == 'balanced':
x, y, t = memory.get()
new_dataset = copy.deepcopy(dataset)
new_dataset._x = x
new_dataset._y = y
new_dataset._t = t
elif finetuning in ('all', 'none'):
new_dataset = dataset
else:
raise NotImplementedError(f'Unknown finetuning method {finetuning}')
return new_dataset
| [
"numpy.mean",
"numpy.savez",
"numpy.unique",
"torch.utils.data.DataLoader",
"numpy.where",
"numpy.random.choice",
"numpy.power",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.concatenate",
"copy.deepcopy",
"torch.no_grad",
"nump... | [((5237, 5357), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(128)', 'num_workers': '(2)', 'pin_memory': '(True)', 'drop_last': '(False)', 'shuffle': '(False)'}), '(dataset, batch_size=128, num_workers=2,\n pin_memory=True, drop_last=False, shuffle=False)\n', (5264, 5357), False, 'import torch\n'), ((6588, 6612), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (6602, 6612), True, 'import numpy as np\n'), ((6627, 6650), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (6641, 6650), True, 'import numpy as np\n'), ((6853, 6871), 'numpy.mean', 'np.mean', (['D'], {'axis': '(1)'}), '(D, axis=1)\n', (6860, 6871), True, 'import numpy as np\n'), ((6893, 6923), 'numpy.zeros', 'np.zeros', (['(features.shape[0],)'], {}), '((features.shape[0],))\n', (6901, 6923), True, 'import numpy as np\n'), ((646, 673), 'copy.deepcopy', 'copy.deepcopy', (['base_dataset'], {}), '(base_dataset)\n', (659, 673), False, 'import copy\n'), ((955, 999), 'numpy.savez', 'np.savez', (['path'], {'x': 'self.x', 'y': 'self.y', 't': 'self.t'}), '(path, x=self.x, y=self.y, t=self.t)\n', (963, 999), True, 'import numpy as np\n'), ((1076, 1089), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1083, 1089), True, 'import numpy as np\n'), ((1353, 1370), 'numpy.unique', 'np.unique', (['self.y'], {}), '(self.y)\n', (1362, 1370), True, 'import numpy as np\n'), ((1630, 1647), 'numpy.concatenate', 'np.concatenate', (['x'], {}), '(x)\n', (1644, 1647), True, 'import numpy as np\n'), ((1665, 1682), 'numpy.concatenate', 'np.concatenate', (['y'], {}), '(y)\n', (1679, 1682), True, 'import numpy as np\n'), ((1700, 1717), 'numpy.concatenate', 'np.concatenate', (['t'], {}), '(t)\n', (1714, 1717), True, 'import numpy as np\n'), ((2498, 2510), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2507, 2510), True, 'import numpy as np\n'), ((2698, 2721), 'numpy.concatenate', 'np.concatenate', (['indexes'], {}), '(indexes)\n', (2712, 2721), True, 'import numpy as np\n'), ((5450, 5465), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5463, 5465), False, 'import torch\n'), ((7124, 7138), 'numpy.dot', 'np.dot', (['w_t', 'D'], {}), '(w_t, D)\n', (7130, 7138), True, 'import numpy as np\n'), ((7157, 7173), 'numpy.argmax', 'np.argmax', (['tmp_t'], {}), '(tmp_t)\n', (7166, 7173), True, 'import numpy as np\n'), ((7640, 7662), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (7653, 7662), False, 'import copy\n'), ((1258, 1275), 'numpy.unique', 'np.unique', (['self.y'], {}), '(self.y)\n', (1267, 1275), True, 'import numpy as np\n'), ((2182, 2209), 'numpy.concatenate', 'np.concatenate', (['(self.x, x)'], {}), '((self.x, x))\n', (2196, 2209), True, 'import numpy as np\n'), ((2231, 2258), 'numpy.concatenate', 'np.concatenate', (['(self.y, y)'], {}), '((self.y, y))\n', (2245, 2258), True, 'import numpy as np\n'), ((2280, 2307), 'numpy.concatenate', 'np.concatenate', (['(self.t, t)'], {}), '((self.t, t))\n', (2294, 2307), True, 'import numpy as np\n'), ((3038, 3050), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3047, 3050), True, 'import numpy as np\n'), ((3498, 3521), 'numpy.concatenate', 'np.concatenate', (['indexes'], {}), '(indexes)\n', (3512, 3521), True, 'import numpy as np\n'), ((6810, 6835), 'numpy.linalg.norm', 'np.linalg.norm', (['D'], {'axis': '(0)'}), '(D, axis=0)\n', (6824, 6835), True, 'import numpy as np\n'), ((7390, 7419), 'numpy.where', 'np.where', (['(herding_matrix == 0)'], {}), '(herding_matrix == 0)\n', (7398, 7419), True, 'import numpy as np\n'), ((1394, 1422), 'numpy.where', 'np.where', (['(self.y == class_id)'], {}), '(self.y == class_id)\n', (1402, 1422), True, 'import numpy as np\n'), ((2540, 2563), 'numpy.where', 'np.where', (['(y == class_id)'], {}), '(y == class_id)\n', (2548, 2563), True, 'import numpy as np\n'), ((2611, 2665), 'numpy.random.choice', 'np.random.choice', (['class_indexes'], {'size': 'memory_per_class'}), '(class_indexes, size=memory_per_class)\n', (2627, 2665), True, 'import numpy as np\n'), ((3186, 3232), 'numpy.mean', 'np.mean', (['class_features'], {'axis': '(0)', 'keepdims': '(True)'}), '(class_features, axis=0, keepdims=True)\n', (3193, 3232), True, 'import numpy as np\n'), ((3342, 3363), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (3352, 3363), True, 'import numpy as np\n'), ((3839, 3851), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3848, 3851), True, 'import numpy as np\n'), ((4307, 4330), 'numpy.concatenate', 'np.concatenate', (['indexes'], {}), '(indexes)\n', (4321, 4330), True, 'import numpy as np\n'), ((7005, 7032), 'numpy.sum', 'np.sum', (['(herding_matrix != 0)'], {}), '(herding_matrix != 0)\n', (7011, 7032), True, 'import numpy as np\n'), ((3080, 3103), 'numpy.where', 'np.where', (['(y == class_id)'], {}), '(y == class_id)\n', (3088, 3103), True, 'import numpy as np\n'), ((3987, 4033), 'numpy.mean', 'np.mean', (['class_features'], {'axis': '(0)', 'keepdims': '(True)'}), '(class_features, axis=0, keepdims=True)\n', (3994, 4033), True, 'import numpy as np\n'), ((4629, 4641), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4638, 4641), True, 'import numpy as np\n'), ((4894, 4917), 'numpy.concatenate', 'np.concatenate', (['indexes'], {}), '(indexes)\n', (4908, 4917), True, 'import numpy as np\n'), ((3257, 3297), 'numpy.power', 'np.power', (['(class_features - class_mean)', '(2)'], {}), '(class_features - class_mean, 2)\n', (3265, 3297), True, 'import numpy as np\n'), ((3881, 3904), 'numpy.where', 'np.where', (['(y == class_id)'], {}), '(y == class_id)\n', (3889, 3904), True, 'import numpy as np\n'), ((4144, 4165), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (4154, 4165), True, 'import numpy as np\n'), ((5894, 5917), 'torch.cat', 'torch.cat', (['feats'], {'dim': '(1)'}), '(feats, dim=1)\n', (5903, 5917), False, 'import torch\n'), ((4058, 4098), 'numpy.power', 'np.power', (['(class_features - class_mean)', '(2)'], {}), '(class_features - class_mean, 2)\n', (4066, 4098), True, 'import numpy as np\n'), ((4671, 4694), 'numpy.where', 'np.where', (['(y == class_id)'], {}), '(y == class_id)\n', (4679, 4694), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 10 19:54:46 2021
@author: marina
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
# Root Mean Squared Error calculation
def rmse_error(df, str_1, str_2):
mse = sum((df[str_1] - df[str_2])**2)/df.shape[0]
mse = np.sqrt(mse)
print("[RMSE " + str_1 + "]: " + str(mse))
return mse
#--> Specify file to be analyzed
path = '/home/marina/Pi_Git/ros_radar_mine/record_radar_data/all_'
date = '2021-05-07'
time = '09-58-09'
csvfile = path + date + '-' + time + '.csv'
OPTI_offset = 0.42 #0.42 #0.51
#--> Import data as dataframe -> df and fix OptiTrack offset
df = pd.read_csv(csvfile)
df["range_op"] -= OPTI_offset
# PLOT filtered range
fig, axs = plt.subplots(2)
### SUBPLOT 1 ###
#ax1 = df.plot(x="time", y=["range_flt", "range_op", "h_ref"], grid=True,
# title="Range vs. Time", style=['-','-','--'])
ax1 = df.plot(x="time", y=["range_op", "h_ref"], grid=True,
title="Range vs. Time", style=['-','--'], ax=axs[0])
#ax1.set_xlim(x_min,x_max)
#ax1.set_ylim(y_min,y_max)
ax1.set_xlabel("Time [s]")
ax1.set_ylabel("Height [m]")
ax1.legend(["$r_{measured}$", "$r_{OptiTrack}$", "$h_{ref}$"], loc='center left', bbox_to_anchor=(1, 0.5))
#ax1.legend(["$r_{measured}$", "$r_{OptiTrack}$", "$h_{ref}$"], loc='center left', bbox_to_anchor=(1, 0.5))
### SUBPLOT 2 ###
ax2 = df.plot(x="time", y=["dcmotor"], grid=True,
title="Motor command vs. Time", style=['-'], ax=axs[1])
#ax1.set_xlim(x_min,x_max)
#ax1.set_ylim(y_min,y_max)
ax2.set_xlabel("Time [s]")
ax2.set_ylabel("Motor command")
#ax1.legend(["$r_{measured}$", "$r_{OptiTrack}$", "$h_{ref}$"], loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.show()
mse = rmse_error(df, "h_ref", "range_op") | [
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((700, 720), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (711, 720), True, 'import pandas as pd\n'), ((785, 800), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (797, 800), True, 'import matplotlib.pyplot as plt\n'), ((1767, 1785), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1783, 1785), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1796), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1794, 1796), True, 'import matplotlib.pyplot as plt\n'), ((340, 352), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (347, 352), True, 'import numpy as np\n')] |
from scipy.optimize import linear_sum_assignment
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
from sklearn.neighbors import NearestNeighbors as KNN # http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings("ignore")
class My_Sammon_mapping:
def __init__(self, X, embedding_dimensionality, n_neighbors=None, dataset_can_be_plotted=False,
embedded_data_can_be_plotted=False, max_iterations=100, learning_rate=0.1, init_type="PCA",
color_meshgrid=None, colormap=plt.cm.brg):
# X: samples are put column-wise in matrix
self.X = X
self.n_dimensions = X.shape[0]
self.n_samples = X.shape[1]
self.embedding_dimensionality = embedding_dimensionality
if n_neighbors is None:
self.n_neighbors = self.n_samples - 1
else:
self.n_neighbors = n_neighbors
self.dataset_can_be_plotted = dataset_can_be_plotted
self.embedded_data_can_be_plotted = embedded_data_can_be_plotted
self.max_iterations = max_iterations
self.learning_rate = learning_rate
self.init_type = init_type #--> PCA, random
self.color_meshgrid = color_meshgrid
self.colormap = colormap
def fit_transform(self, continue_from_previous_run=False, which_iteration_to_load=0):
X_low_dim = self.Quasi_Newton_optimization(X=self.X, max_iterations=self.max_iterations,
path_save_base="./algorithm_files/dim_reduction/Sammon/",
continue_from_previous_run=continue_from_previous_run, which_iteration_to_load=which_iteration_to_load)
return X_low_dim
def Quasi_Newton_optimization(self, X, max_iterations=100, save_each_how_many_epochs=5, path_save_base="./algorithm_files/dim_reduction/Sammon/", continue_from_previous_run=False, which_iteration_to_load=0):
# X: column-wise samples
if not continue_from_previous_run:
# self.save_scatter_of_data(data_=X, data_name="X", path_save_numpy=path_save_base, path_save_plot=path_save_base, color_map=self.colormap, color_meshgrid=X[-1, :], do_plot=self.dataset_can_be_plotted)
self.save_scatter_of_data(data_=X, data_name="X", path_save_numpy=path_save_base, path_save_plot=path_save_base, color_map=self.colormap, color_meshgrid=self.color_meshgrid, do_plot=self.dataset_can_be_plotted)
iteration_start = 0
objective_function_toSave = []
if self.init_type == "random":
X_low_dim = np.random.rand(self.embedding_dimensionality, self.n_samples) # --> rand in [0,1)
elif self.init_type == "PCA":
pca = PCA(n_components=self.embedding_dimensionality)
X_low_dim = (pca.fit_transform(X.T)).T
else:
X = self.load_variable(name_of_variable="X", path=path_save_base)
X_low_dim = self.load_variable(name_of_variable="X_low_dim_iteration_"+str(which_iteration_to_load), path=path_save_base+"iterations_numpy/")
iteration_start = which_iteration_to_load
objective_function_toSave = self.load_variable(name_of_variable="objective_function", path=path_save_base)
KNN_distance_matrix_initial, neighbors_indices = self.find_KNN_distance_matrix(X=X, n_neighbors=self.n_neighbors)
normalization_factor = sum(sum(KNN_distance_matrix_initial))
for iteration_index in range(iteration_start, max_iterations):
print("Performing quasi Newton, iteration " + str(iteration_index))
All_NN_distance_matrix, _ = self.find_KNN_distance_matrix(X=X_low_dim, n_neighbors=self.n_samples-1)
for sample_index in range(self.n_samples):
for dimension_index in range(self.embedding_dimensionality):
# --- calculate gradient and second derivative of gradient (Hessian):
gradient_term = 0.0
Hessian_term = 0.0
for neighbor_index in range(self.n_neighbors):
neighbor_index_in_dataset = neighbors_indices[sample_index, neighbor_index]
d = All_NN_distance_matrix[sample_index, neighbor_index_in_dataset]
d_initial = KNN_distance_matrix_initial[sample_index, neighbor_index_in_dataset]
gradient_term += ((d - d_initial) / (d * d_initial)) * (X_low_dim[dimension_index, sample_index] - X_low_dim[dimension_index, neighbor_index_in_dataset])
Hessian_term += ((d - d_initial) / (d * d_initial)) - ((X_low_dim[dimension_index, sample_index] - X_low_dim[dimension_index, neighbor_index_in_dataset])**2 / d**3)
gradient_term *= (1 / normalization_factor)
Hessian_term *= (1 / normalization_factor)
gradient_ = gradient_term
Hessian_ = Hessian_term
# --- update solution:
X_low_dim[dimension_index, sample_index] = X_low_dim[dimension_index, sample_index] - (self.learning_rate * abs(1/Hessian_) * gradient_)
# calculate the objective function:
objective_function_distance_part = 0.0
for sample_index in range(self.n_samples):
temp_ = 0.0
for neighbor_index in range(self.n_neighbors):
neighbor_index_in_dataset = neighbors_indices[sample_index, neighbor_index]
d = All_NN_distance_matrix[sample_index, neighbor_index_in_dataset]
d_initial = KNN_distance_matrix_initial[sample_index, neighbor_index_in_dataset]
temp_ += (d - d_initial)**2 / d_initial
objective_function_distance_part += (1 / normalization_factor) * temp_
objective_function = 0.5 * objective_function_distance_part
objective_function_toSave.append(objective_function)
print("iteration " + str(iteration_index) + ": objective cost = " + str(objective_function))
if (iteration_index % save_each_how_many_epochs) == 0:
# self.save_scatter_of_data(data_=X_low_dim, data_name="X_low_dim_iteration_"+str(iteration_index), path_save_numpy=path_save_base+"iterations_numpy/", path_save_plot=path_save_base+"iterations_plot/", color_map=self.colormap, color_meshgrid=X[-1, :], do_plot=self.embedded_data_can_be_plotted)
self.save_scatter_of_data(data_=X_low_dim, data_name="X_low_dim_iteration_"+str(iteration_index), path_save_numpy=path_save_base+"iterations_numpy/", path_save_plot=path_save_base+"iterations_plot/", color_map=self.colormap, color_meshgrid=self.color_meshgrid, do_plot=self.embedded_data_can_be_plotted)
# X_low_dim_outliersRemoved, color_meshgrid_outliersRemoved = self.remove_outliers(data_=X_low_dim, color_meshgrid=X[-1, :])
X_low_dim_outliersRemoved, color_meshgrid_outliersRemoved = self.remove_outliers(data_=X_low_dim, color_meshgrid=self.color_meshgrid)
self.save_scatter_of_data(data_=X_low_dim_outliersRemoved, data_name="X_low_dim_iteration_"+str(iteration_index), path_save_numpy=path_save_base+"iterations_numpy_noOutliers/", path_save_plot=path_save_base+"iterations_plot_noOutliers/", color_map=self.colormap, color_meshgrid=color_meshgrid_outliersRemoved, do_plot=self.embedded_data_can_be_plotted)
self.save_variable(variable=np.asarray(objective_function_toSave), name_of_variable="objective_function", path_to_save=path_save_base)
self.save_np_array_to_txt(variable=np.column_stack((np.array([i for i in range(iteration_index+1)]).T, np.asarray(objective_function_toSave).T)), name_of_variable="objective_function", path_to_save=path_save_base)
return X_low_dim
def save_scatter_of_data(self, data_, data_name, path_save_numpy, path_save_plot, color_map, color_meshgrid, do_plot=True):
self.save_variable(variable=data_, name_of_variable=data_name, path_to_save=path_save_numpy)
if do_plot:
if not os.path.exists(path_save_plot):
os.makedirs(path_save_plot)
if data_.shape[0] == 2:
ax = plt.subplot()
ax.scatter(data_[0, :], data_[1, :], c=color_meshgrid, cmap=color_map, edgecolors='k')
elif data_.shape[0] == 3:
ax = plt.subplot(projection='3d')
ax.scatter(data_[0, :], data_[1, :], data_[2, :], c=color_meshgrid, cmap=color_map, edgecolors='k')
# plt.show()
plt.savefig(path_save_plot + data_name + ".png")
plt.clf()
# plt.close()
def find_KNN_distance_matrix(self, X, n_neighbors):
# X: column-wise samples
# returns KNN_distance_matrix: row-wise --> shape: (n_samples, n_samples) where zero for not neighbors
# returns neighbors_indices: row-wise --> shape: (n_samples, n_neighbors)
knn = KNN(n_neighbors=n_neighbors+1, algorithm='kd_tree', n_jobs=-1) #+1 because the point itself is also counted
knn.fit(X=X.T)
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors.kneighbors_graph
# the following function gives n_samples*n_samples matrix, and puts 0 for diagonal and also where points are not connected directly in KNN graph
# if K=n_samples, only diagonal is zero.
Euclidean_distance_matrix = knn.kneighbors_graph(X=X.T, n_neighbors=n_neighbors+1, mode='distance') #--> gives Euclidean distances
KNN_distance_matrix = Euclidean_distance_matrix.toarray()
neighbors_indices = np.zeros((KNN_distance_matrix.shape[0], n_neighbors))
for sample_index in range(KNN_distance_matrix.shape[0]):
neighbors_indices[sample_index, :] = np.ravel(np.asarray(np.where(KNN_distance_matrix[sample_index, :] != 0)))
neighbors_indices = neighbors_indices.astype(int)
return KNN_distance_matrix, neighbors_indices
def remove_outliers(self, data_, color_meshgrid):
# data_: column-wise samples
data_outliers_removed = data_.copy()
color_meshgrid_outliers_removed = color_meshgrid.copy()
for dimension_index in range(data_.shape[0]):
data_dimension = data_[dimension_index, :].ravel()
# Set upper and lower limit to 3 standard deviation
data_dimension_std = np.std(data_dimension)
data_dimension_mean = np.mean(data_dimension)
anomaly_cut_off = data_dimension_std * 3
lower_limit = data_dimension_mean - anomaly_cut_off
upper_limit = data_dimension_mean + anomaly_cut_off
samples_to_keep = []
for sample_index in range(data_outliers_removed.shape[1]):
sample_ = data_outliers_removed[:, sample_index]
if sample_[dimension_index] > upper_limit or sample_[dimension_index] < lower_limit:
samples_to_keep.append(False)
else:
samples_to_keep.append(True)
data_outliers_removed = data_outliers_removed.compress(samples_to_keep, axis=1)
color_meshgrid_outliers_removed = color_meshgrid_outliers_removed.compress(samples_to_keep)
return data_outliers_removed, color_meshgrid_outliers_removed
def save_variable(self, variable, name_of_variable, path_to_save='./'):
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
if not os.path.exists(path_to_save): # https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
os.makedirs(path_to_save)
file_address = path_to_save + name_of_variable + '.pckl'
f = open(file_address, 'wb')
pickle.dump(variable, f)
f.close()
def load_variable(self, name_of_variable, path='./'):
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
file_address = path + name_of_variable + '.pckl'
f = open(file_address, 'rb')
variable = pickle.load(f)
f.close()
return variable
def save_np_array_to_txt(self, variable, name_of_variable, path_to_save='./'):
if type(variable) is list:
variable = np.asarray(variable)
# https://stackoverflow.com/questions/22821460/numpy-save-2d-array-to-text-file/22822701
if not os.path.exists(path_to_save): # https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
os.makedirs(path_to_save)
file_address = path_to_save + name_of_variable + '.txt'
np.set_printoptions(threshold=np.inf, linewidth=np.inf) # turn off summarization, line-wrapping
with open(file_address, 'w') as f:
f.write(np.array2string(variable, separator=', ')) | [
"numpy.mean",
"os.path.exists",
"pickle.dump",
"matplotlib.pyplot.savefig",
"os.makedirs",
"numpy.random.rand",
"sklearn.decomposition.PCA",
"numpy.array2string",
"numpy.where",
"matplotlib.pyplot.clf",
"pickle.load",
"numpy.asarray",
"numpy.zeros",
"sklearn.neighbors.NearestNeighbors",
... | [((330, 363), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (353, 363), False, 'import warnings\n'), ((9097, 9161), 'sklearn.neighbors.NearestNeighbors', 'KNN', ([], {'n_neighbors': '(n_neighbors + 1)', 'algorithm': '"""kd_tree"""', 'n_jobs': '(-1)'}), "(n_neighbors=n_neighbors + 1, algorithm='kd_tree', n_jobs=-1)\n", (9100, 9161), True, 'from sklearn.neighbors import NearestNeighbors as KNN\n'), ((9816, 9869), 'numpy.zeros', 'np.zeros', (['(KNN_distance_matrix.shape[0], n_neighbors)'], {}), '((KNN_distance_matrix.shape[0], n_neighbors))\n', (9824, 9869), True, 'import numpy as np\n'), ((11979, 12003), 'pickle.dump', 'pickle.dump', (['variable', 'f'], {}), '(variable, f)\n', (11990, 12003), False, 'import pickle\n'), ((12303, 12317), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12314, 12317), False, 'import pickle\n'), ((12871, 12926), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'linewidth': 'np.inf'}), '(threshold=np.inf, linewidth=np.inf)\n', (12890, 12926), True, 'import numpy as np\n'), ((8703, 8751), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_save_plot + data_name + '.png')"], {}), "(path_save_plot + data_name + '.png')\n", (8714, 8751), True, 'import matplotlib.pyplot as plt\n'), ((8764, 8773), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8771, 8773), True, 'import matplotlib.pyplot as plt\n'), ((10585, 10607), 'numpy.std', 'np.std', (['data_dimension'], {}), '(data_dimension)\n', (10591, 10607), True, 'import numpy as np\n'), ((10642, 10665), 'numpy.mean', 'np.mean', (['data_dimension'], {}), '(data_dimension)\n', (10649, 10665), True, 'import numpy as np\n'), ((11705, 11733), 'os.path.exists', 'os.path.exists', (['path_to_save'], {}), '(path_to_save)\n', (11719, 11733), False, 'import os\n'), ((11843, 11868), 'os.makedirs', 'os.makedirs', (['path_to_save'], {}), '(path_to_save)\n', (11854, 11868), False, 'import os\n'), ((12502, 12522), 'numpy.asarray', 'np.asarray', (['variable'], {}), '(variable)\n', (12512, 12522), True, 'import numpy as np\n'), ((12635, 12663), 'os.path.exists', 'os.path.exists', (['path_to_save'], {}), '(path_to_save)\n', (12649, 12663), False, 'import os\n'), ((12773, 12798), 'os.makedirs', 'os.makedirs', (['path_to_save'], {}), '(path_to_save)\n', (12784, 12798), False, 'import os\n'), ((2687, 2748), 'numpy.random.rand', 'np.random.rand', (['self.embedding_dimensionality', 'self.n_samples'], {}), '(self.embedding_dimensionality, self.n_samples)\n', (2701, 2748), True, 'import numpy as np\n'), ((8212, 8242), 'os.path.exists', 'os.path.exists', (['path_save_plot'], {}), '(path_save_plot)\n', (8226, 8242), False, 'import os\n'), ((8260, 8287), 'os.makedirs', 'os.makedirs', (['path_save_plot'], {}), '(path_save_plot)\n', (8271, 8287), False, 'import os\n'), ((8345, 8358), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (8356, 8358), True, 'import matplotlib.pyplot as plt\n'), ((13031, 13072), 'numpy.array2string', 'np.array2string', (['variable'], {'separator': '""", """'}), "(variable, separator=', ')\n", (13046, 13072), True, 'import numpy as np\n'), ((2834, 2881), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'self.embedding_dimensionality'}), '(n_components=self.embedding_dimensionality)\n', (2837, 2881), False, 'from sklearn.decomposition import PCA\n'), ((8521, 8549), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (8532, 8549), True, 'import matplotlib.pyplot as plt\n'), ((10004, 10055), 'numpy.where', 'np.where', (['(KNN_distance_matrix[sample_index, :] != 0)'], {}), '(KNN_distance_matrix[sample_index, :] != 0)\n', (10012, 10055), True, 'import numpy as np\n'), ((7581, 7618), 'numpy.asarray', 'np.asarray', (['objective_function_toSave'], {}), '(objective_function_toSave)\n', (7591, 7618), True, 'import numpy as np\n'), ((7807, 7844), 'numpy.asarray', 'np.asarray', (['objective_function_toSave'], {}), '(objective_function_toSave)\n', (7817, 7844), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from swmmtoolbox import swmmtoolbox as swmm
# Get some labels to extract
filename = 'frutal.out'
c = swmm.catalog(filename)
c = pd.DataFrame(c)
# Get nodes only
item_names = c[c[0] == 'node'][1].values
items = pd.Series(np.repeat(item_names, 3)).astype(str)
# Get depth head and inflow
var_indices = pd.Series(np.tile([0, 1, 4], len(item_names))).astype(str)
item_types = pd.Series(np.repeat('node', len(items))).astype(str)
# Construct label strings
labels = (item_types + ',' + items + ',' + var_indices).values.tolist()
# Extract data
result = swmm.fast_extract(filename, *labels)
| [
"swmmtoolbox.swmmtoolbox.catalog",
"swmmtoolbox.swmmtoolbox.fast_extract",
"pandas.DataFrame",
"numpy.repeat"
] | [((141, 163), 'swmmtoolbox.swmmtoolbox.catalog', 'swmm.catalog', (['filename'], {}), '(filename)\n', (153, 163), True, 'from swmmtoolbox import swmmtoolbox as swmm\n'), ((168, 183), 'pandas.DataFrame', 'pd.DataFrame', (['c'], {}), '(c)\n', (180, 183), True, 'import pandas as pd\n'), ((588, 624), 'swmmtoolbox.swmmtoolbox.fast_extract', 'swmm.fast_extract', (['filename', '*labels'], {}), '(filename, *labels)\n', (605, 624), True, 'from swmmtoolbox import swmmtoolbox as swmm\n'), ((260, 284), 'numpy.repeat', 'np.repeat', (['item_names', '(3)'], {}), '(item_names, 3)\n', (269, 284), True, 'import numpy as np\n')] |
from climpy.utils.file_path_utils import get_root_storage_path_on_hpc
import netCDF4
from climpy.utils.diag_decorators import time_interval_selection
# from climpy.utils.time_utils import process_time_range_impl
from climpy.utils.netcdf_utils import generate_netcdf_uniform_time_data
import numpy as np
__author__ = '<NAME> <<EMAIL>>'
@time_interval_selection
def prepare_avhrr_aod(zonal_mean=False):
file_name = 'aot_avhrr_1989-1992'
lat_name = 'latitude'
lon_name = 'longitude'
if zonal_mean:
file_name += '_zonal_mean'
lat_name = 'lat'
lon_name = 'lon'
file_path = get_root_storage_path_on_hpc() + 'Data/AVHRR/AOT/' + file_name + '.nc'
nc = netCDF4.Dataset(file_path)
time_data = generate_netcdf_uniform_time_data(nc.variables['time'])
# t_slice, time_data = process_time_range_impl(time_data[:], time_range_vo)
aod_data = nc.variables['aot1'][:] # [t_slice]
if zonal_mean:
aod_data = np.squeeze(aod_data)
lat_data = nc.variables[lat_name][:]
lon_data = nc.variables[lon_name][:]
vo = {}
vo['data'] = aod_data
vo['time'] = time_data
vo['lat'] = lat_data
vo['lon'] = lon_data
return vo
| [
"climpy.utils.netcdf_utils.generate_netcdf_uniform_time_data",
"netCDF4.Dataset",
"climpy.utils.file_path_utils.get_root_storage_path_on_hpc",
"numpy.squeeze"
] | [((697, 723), 'netCDF4.Dataset', 'netCDF4.Dataset', (['file_path'], {}), '(file_path)\n', (712, 723), False, 'import netCDF4\n'), ((741, 796), 'climpy.utils.netcdf_utils.generate_netcdf_uniform_time_data', 'generate_netcdf_uniform_time_data', (["nc.variables['time']"], {}), "(nc.variables['time'])\n", (774, 796), False, 'from climpy.utils.netcdf_utils import generate_netcdf_uniform_time_data\n'), ((968, 988), 'numpy.squeeze', 'np.squeeze', (['aod_data'], {}), '(aod_data)\n', (978, 988), True, 'import numpy as np\n'), ((617, 647), 'climpy.utils.file_path_utils.get_root_storage_path_on_hpc', 'get_root_storage_path_on_hpc', ([], {}), '()\n', (645, 647), False, 'from climpy.utils.file_path_utils import get_root_storage_path_on_hpc\n')] |
from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi
import numpy as np
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.App import CoreManager
from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, FrameBuffer
from PyEngine3D.Render import RenderTarget, ScreenQuad, Plane
from PyEngine3D.Utilities import *
from .Constants import *
def sqr(x):
return x * x
def omega(k):
return math.sqrt(9.81 * k * (1.0 + sqr(k / km)))
def frandom(seed_data):
return (seed_data >> (31 - 24)) / float(1 << 24)
def bitReverse(i, N):
i = int(i)
N = int(N)
j = i
M = N
Sum = 0
W = 1
M = int(M / 2)
while M != 0:
j = (i & M) > (M - 1)
Sum += j * W
W *= 2
M = int(M / 2)
return int(Sum)
def computeWeight(N, k):
return cos(2.0 * pi * k / float(N)), sin(2.0 * pi * k / float(N))
class Ocean:
DEFAULT_FFT_SEED = 1234
def __init__(self, **object_data):
self.name = object_data.get('name', 'ocean')
self.height = object_data.get('height', 0.0)
self.wind = object_data.get('wind', WIND)
self.omega = object_data.get('omega', OMEGA)
self.amplitude = object_data.get('amplitude', AMPLITUDE)
self.simulation_wind = object_data.get('simulation_wind', 1.0)
self.simulation_amplitude = object_data.get('simulation_amplitude', 3.0)
self.simulation_scale = object_data.get('simulation_scale', 1.0)
self.is_render_ocean = object_data.get('is_render_ocean', True)
self.attributes = Attributes()
self.acc_time = 0.0
self.fft_seed = Ocean.DEFAULT_FFT_SEED
self.simulation_size = GRID_SIZES * self.simulation_scale
self.renderer = CoreManager.instance().renderer
self.scene_manager = CoreManager.instance().scene_manager
self.resource_manager = CoreManager.instance().resource_manager
self.fft_init = None
self.fft_x = None
self.fft_y = None
self.fft_render = None
self.fft_variance = None
self.texture_spectrum_1_2 = None
self.texture_spectrum_3_4 = None
self.texture_slope_variance = None
self.texture_butterfly = None
self.quad = None
self.fft_grid = None
self.caustic_index = 0
self.texture_caustics = []
self.texture_foam = None
self.texture_noise = None
def initialize(self):
self.fft_seed = Ocean.DEFAULT_FFT_SEED
self.fft_init = self.resource_manager.get_material_instance('fft_ocean.init')
self.fft_x = self.resource_manager.get_material_instance('fft_ocean.fft_x')
self.fft_y = self.resource_manager.get_material_instance('fft_ocean.fft_y')
self.fft_render = self.resource_manager.get_material_instance('fft_ocean.render')
self.fft_variance = self.resource_manager.get_material_instance('fft_ocean.fft_variance')
self.texture_spectrum_1_2 = self.resource_manager.get_texture("fft_ocean.spectrum_1_2", default_texture=False)
self.texture_spectrum_3_4 = self.resource_manager.get_texture("fft_ocean.spectrum_3_4", default_texture=False)
self.texture_slope_variance = self.resource_manager.get_texture("fft_ocean.slope_variance", default_texture=False)
self.texture_butterfly = self.resource_manager.get_texture("fft_ocean.butterfly", default_texture=False)
self.quad = ScreenQuad.get_vertex_array_buffer()
self.fft_grid = Plane("FFT_Grid", mode=GL_QUADS, width=GRID_VERTEX_COUNT, height=GRID_VERTEX_COUNT, xz_plane=False)
if None in (self.texture_spectrum_1_2, self.texture_spectrum_3_4, self.texture_slope_variance, self.texture_butterfly):
self.generate_texture()
self.caustic_index = 0
self.texture_caustics = []
i = 0
while True:
resource_name = "common.water_caustic_%02d" % i
if self.resource_manager.texture_loader.hasResource(resource_name):
self.texture_caustics.append(self.resource_manager.get_texture(resource_name))
i += 1
continue
break
self.texture_foam = self.resource_manager.get_texture("common.water_foam")
self.texture_noise = self.resource_manager.get_texture("common.noise")
def get_attribute(self):
self.attributes.set_attribute('is_render_ocean', self.is_render_ocean)
self.attributes.set_attribute('height', self.height)
self.attributes.set_attribute('wind', self.wind)
self.attributes.set_attribute('omega', self.omega)
self.attributes.set_attribute('amplitude', self.amplitude)
self.attributes.set_attribute('simulation_wind', self.simulation_wind)
self.attributes.set_attribute('simulation_amplitude', self.simulation_amplitude)
self.attributes.set_attribute('simulation_scale', self.simulation_scale)
return self.attributes
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if hasattr(self, attribute_name):
setattr(self, attribute_name, attribute_value)
# recreate resources
if attribute_name in ('amplitude', 'wind', 'omega'):
self.generate_texture()
elif attribute_name == 'simulation_scale':
self.simulation_size = GRID_SIZES * self.simulation_scale
return self.attributes
def get_save_data(self):
save_data = dict(
is_render_ocean=self.is_render_ocean,
texture_type=self.__class__.__name__,
height=self.height,
wind=self.wind,
omega=self.omega,
amplitude=self.amplitude,
)
return save_data
def getSlopeVariance(self, kx, ky, spectrumSample0, spectrumSample1):
kSquare = kx * kx + ky * ky
real = spectrumSample0
img = spectrumSample1
hSquare = real * real + img * img
return kSquare * hSquare * 2.0
def spectrum(self, kx, ky, omnispectrum=False):
U10 = max(0.001, self.wind)
Omega = self.omega
Amp = self.amplitude
k = sqrt(kx * kx + ky * ky)
c = omega(k) / k
# spectral peak
kp = 9.81 * sqr(Omega / U10)
cp = omega(kp) / kp
# friction velocity
z0 = 3.7e-5 * sqr(U10) / 9.81 * pow(U10 / cp, 0.9)
u_star = 0.41 * U10 / log(10.0 / z0)
Lpm = exp(- 5.0 / 4.0 * sqr(kp / k))
gamma = 1.7 if Omega < 1.0 else 1.7 + 6.0 * log(Omega)
sigma = 0.08 * (1.0 + 4.0 / pow(Omega, 3.0))
Gamma = exp(-1.0 / (2.0 * sqr(sigma)) * sqr(sqrt(k / kp) - 1.0))
Jp = pow(gamma, Gamma)
Fp = Lpm * Jp * exp(- Omega / sqrt(10.0) * (sqrt(k / kp) - 1.0))
alphap = 0.006 * sqrt(Omega)
Bl = 0.5 * alphap * cp / c * Fp
alpham = 0.01
if u_star < cm:
alpham *= (1.0 + log(u_star / cm))
else:
alpham *= (1.0 + 3.0 * log(u_star / cm))
Fm = exp(-0.25 * sqr(k / km - 1.0))
Bh = 0.5 * alpham * cm / c * Fm * Lpm
if omnispectrum:
return Amp * (Bl + Bh) / (k * sqr(k))
a0 = log(2.0) / 4.0
ap = 4.0
am = 0.13 * u_star / cm
Delta = tanh(a0 + ap * pow(c / cp, 2.5) + am * pow(cm / c, 2.5))
phi = atan2(ky, kx)
if kx < 0.0:
return 0.0
else:
Bl *= 2.0
Bh *= 2.0
return Amp * (Bl + Bh) * (1.0 + Delta * cos(2.0 * phi)) / (2.0 * pi * sqr(sqr(k)))
def getSpectrumSample(self, i, j, lengthScale, kMin):
dk = 2.0 * pi / lengthScale
kx = i * dk
ky = j * dk
if abs(kx) < kMin and abs(ky) < kMin:
return 0.0, 0.0
else:
S = self.spectrum(kx, ky)
h = sqrt(S / 2.0) * dk
self.fft_seed = (self.fft_seed * 1103515245 + 12345) & 0x7FFFFFFF
phi = frandom(self.fft_seed) * 2.0 * pi
return h * cos(phi), h * sin(phi)
def computeButterflyLookupTexture(self, butterfly_data):
for i in range(PASSES):
nBlocks = int(pow(2.0, float(PASSES - 1 - i)))
nHInputs = int(pow(2.0, float(i)))
for j in range(nBlocks):
for k in range(nHInputs):
i1, i2, j1, j2 = 0, 0, 0, 0
if i == 0:
i1 = j * nHInputs * 2 + k
i2 = j * nHInputs * 2 + nHInputs + k
j1 = bitReverse(i1, FFT_SIZE)
j2 = bitReverse(i2, FFT_SIZE)
else:
i1 = j * nHInputs * 2 + k
i2 = j * nHInputs * 2 + nHInputs + k
j1 = i1
j2 = i2
wr, wi = computeWeight(FFT_SIZE, k * nBlocks)
offset1 = 4 * (i1 + i * FFT_SIZE)
butterfly_data[offset1 + 0] = (j1 + 0.5) / FFT_SIZE
butterfly_data[offset1 + 1] = (j2 + 0.5) / FFT_SIZE
butterfly_data[offset1 + 2] = wr
butterfly_data[offset1 + 3] = wi
offset2 = 4 * (i2 + i * FFT_SIZE)
butterfly_data[offset2 + 0] = (j1 + 0.5) / FFT_SIZE
butterfly_data[offset2 + 1] = (j2 + 0.5) / FFT_SIZE
butterfly_data[offset2 + 2] = -wr
butterfly_data[offset2 + 3] = -wi
def generateWavesSpectrum(self, spectrum12_data, spectrum34_data):
for y in range(FFT_SIZE):
for x in range(FFT_SIZE):
offset = 4 * (x + y * FFT_SIZE)
i = (x - FFT_SIZE) if (x >= FFT_SIZE / 2) else x
j = (y - FFT_SIZE) if (y >= FFT_SIZE / 2) else y
s12_0, s12_1 = self.getSpectrumSample(i, j, GRID1_SIZE, pi / GRID1_SIZE)
s12_2, s12_3 = self.getSpectrumSample(i, j, GRID2_SIZE, pi * FFT_SIZE / GRID1_SIZE)
s34_0, s34_1 = self.getSpectrumSample(i, j, GRID3_SIZE, pi * FFT_SIZE / GRID2_SIZE)
s34_2, s34_3 = self.getSpectrumSample(i, j, GRID4_SIZE, pi * FFT_SIZE / GRID3_SIZE)
spectrum12_data[offset: offset+4] = s12_0, s12_1, s12_2, s12_3
spectrum34_data[offset: offset+4] = s34_0, s34_1, s34_2, s34_3
def computeSlopeVarianceTex(self, spectrum12_data, spectrum34_data):
theoreticSlopeVariance = 0.0
k = 5e-3
while k < 1e3:
nextK = k * 1.001
theoreticSlopeVariance += k * k * self.spectrum(k, 0, True) * (nextK - k)
k = nextK
totalSlopeVariance = 0.0
for y in range(FFT_SIZE):
for x in range(FFT_SIZE):
offset = 4 * (x + y * FFT_SIZE)
i = 2.0 * pi * ((x - FFT_SIZE) if (x >= FFT_SIZE / 2) else x)
j = 2.0 * pi * ((y - FFT_SIZE) if (y >= FFT_SIZE / 2) else y)
s12_0, s12_1, s12_2, s12_3 = spectrum12_data[offset: offset + 4]
s34_0, s34_1, s34_2, s34_3 = spectrum34_data[offset: offset + 4]
totalSlopeVariance += self.getSlopeVariance(i/GRID1_SIZE, j/GRID1_SIZE, s12_0, s12_1)
totalSlopeVariance += self.getSlopeVariance(i/GRID2_SIZE, j/GRID2_SIZE, s12_2, s12_3)
totalSlopeVariance += self.getSlopeVariance(i/GRID3_SIZE, j/GRID3_SIZE, s34_0, s34_1)
totalSlopeVariance += self.getSlopeVariance(i/GRID4_SIZE, j/GRID4_SIZE, s34_2, s34_3)
self.fft_variance.use_program()
self.fft_variance.bind_uniform_data("GRID_SIZES", GRID_SIZES)
self.fft_variance.bind_uniform_data("slopeVarianceDelta", (theoreticSlopeVariance - totalSlopeVariance) * 0.5)
self.fft_variance.bind_uniform_data("N_SLOPE_VARIANCE", N_SLOPE_VARIANCE)
self.fft_variance.bind_uniform_data("spectrum_1_2_Sampler", self.texture_spectrum_1_2)
self.fft_variance.bind_uniform_data("spectrum_3_4_Sampler", self.texture_spectrum_3_4)
self.fft_variance.bind_uniform_data("FFT_SIZE", FFT_SIZE)
for layer in range(N_SLOPE_VARIANCE):
self.renderer.framebuffer_manager.bind_framebuffer(self.texture_slope_variance, target_layer=layer)
self.fft_variance.bind_uniform_data("c", layer)
self.quad.draw_elements()
def save_texture(self, texture):
resource = self.resource_manager.texture_loader.get_resource(texture.name)
if resource is None:
resource = self.resource_manager.texture_loader.create_resource(texture.name, texture)
self.resource_manager.texture_loader.save_resource(resource.name)
else:
old_texture = resource.get_data()
old_texture.delete()
resource.set_data(texture)
def generate_texture(self):
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glDepthFunc(GL_LEQUAL)
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glEnable(GL_DEPTH_TEST)
glDepthMask(True)
glDisable(GL_BLEND)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
spectrum12_data = np.zeros(FFT_SIZE * FFT_SIZE * 4, dtype=np.float32)
spectrum34_data = np.zeros(FFT_SIZE * FFT_SIZE * 4, dtype=np.float32)
butterfly_data = np.zeros(FFT_SIZE * PASSES * 4, dtype=np.float32)
self.generateWavesSpectrum(spectrum12_data, spectrum34_data)
self.computeButterflyLookupTexture(butterfly_data)
# create render targets
self.texture_spectrum_1_2 = CreateTexture(
name='fft_ocean.spectrum_1_2',
texture_type=Texture2D,
image_mode='RGBA',
width=FFT_SIZE,
height=FFT_SIZE,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
data=spectrum12_data,
)
self.texture_spectrum_3_4 = CreateTexture(
name='fft_ocean.spectrum_3_4',
texture_type=Texture2D,
image_mode='RGBA',
width=FFT_SIZE,
height=FFT_SIZE,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
data=spectrum34_data,
)
self.texture_slope_variance = CreateTexture(
name='fft_ocean.slope_variance',
texture_type=Texture3D,
image_mode='RGBA',
width=N_SLOPE_VARIANCE,
height=N_SLOPE_VARIANCE,
depth=N_SLOPE_VARIANCE,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP_TO_EDGE,
data_type=GL_FLOAT,
)
self.texture_butterfly = CreateTexture(
name='fft_ocean.butterfly',
texture_type=Texture2D,
image_mode='RGBA',
width=FFT_SIZE,
height=PASSES,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP_TO_EDGE,
data_type=GL_FLOAT,
data=butterfly_data,
)
self.computeSlopeVarianceTex(spectrum12_data, spectrum34_data)
self.save_texture(self.texture_spectrum_1_2)
self.save_texture(self.texture_spectrum_3_4)
self.save_texture(self.texture_slope_variance)
self.save_texture(self.texture_butterfly)
def update(self, delta):
self.acc_time += delta
self.caustic_index = int((self.acc_time * 20.0) % len(self.texture_caustics))
def simulateFFTWaves(self):
framebuffer_manager = CoreManager.instance().renderer.framebuffer_manager
RenderTargets = RenderTarget.RenderTargets
fft_a_framebuffer = framebuffer_manager.get_framebuffer(RenderTargets.FFT_A,
RenderTargets.FFT_A,
RenderTargets.FFT_A,
RenderTargets.FFT_A,
RenderTargets.FFT_A)
fft_b_framebuffer = framebuffer_manager.get_framebuffer(RenderTargets.FFT_B,
RenderTargets.FFT_B,
RenderTargets.FFT_B,
RenderTargets.FFT_B,
RenderTargets.FFT_B)
# initialize
fft_a_framebuffer.bind_framebuffer()
glClear(GL_COLOR_BUFFER_BIT)
self.fft_init.use_program()
self.fft_init.bind_uniform_data("FFT_SIZE", FFT_SIZE)
self.fft_init.bind_uniform_data("INVERSE_GRID_SIZES", INVERSE_GRID_SIZES)
self.fft_init.bind_uniform_data("spectrum_1_2_Sampler", self.texture_spectrum_1_2)
self.fft_init.bind_uniform_data("spectrum_3_4_Sampler", self.texture_spectrum_3_4)
self.fft_init.bind_uniform_data("t", self.acc_time * self.simulation_wind)
self.quad.draw_elements()
# # fft passes
self.fft_x.use_program()
self.fft_x.bind_uniform_data("butterflySampler", self.texture_butterfly)
for i in range(PASSES):
self.fft_x.bind_uniform_data("pass", float(i + 0.5) / PASSES)
if i % 2 == 0:
self.fft_x.bind_uniform_data("imgSampler", RenderTargets.FFT_A)
fft_b_framebuffer.bind_framebuffer()
else:
self.fft_x.bind_uniform_data("imgSampler", RenderTargets.FFT_B)
fft_a_framebuffer.bind_framebuffer()
self.quad.draw_elements()
self.fft_y.use_program()
self.fft_y.bind_uniform_data("butterflySampler", self.texture_butterfly)
for i in range(PASSES, PASSES * 2, 1):
self.fft_y.bind_uniform_data("pass", float(i - PASSES + 0.5) / PASSES)
if i % 2 == 0:
self.fft_y.bind_uniform_data("imgSampler", RenderTargets.FFT_A)
fft_b_framebuffer.bind_framebuffer()
else:
self.fft_y.bind_uniform_data("imgSampler", RenderTargets.FFT_B)
fft_a_framebuffer.bind_framebuffer()
self.quad.draw_elements()
RenderTargets.FFT_A.generate_mipmap()
def render_ocean(self, atmosphere, texture_scene, texture_linear_depth, texture_probe, texture_shadow):
self.fft_render.use_program()
self.fft_render.bind_material_instance()
self.fft_render.bind_uniform_data("height", self.height)
self.fft_render.bind_uniform_data("simulation_wind", self.simulation_wind)
self.fft_render.bind_uniform_data("simulation_amplitude", self.simulation_amplitude)
self.fft_render.bind_uniform_data("simulation_size", self.simulation_size)
self.fft_render.bind_uniform_data("cell_size", GRID_CELL_SIZE)
self.fft_render.bind_uniform_data("t", self.acc_time * self.simulation_wind)
self.fft_render.bind_uniform_data("fftWavesSampler", RenderTarget.RenderTargets.FFT_A)
self.fft_render.bind_uniform_data("slopeVarianceSampler", self.texture_slope_variance)
self.fft_render.bind_uniform_data('texture_scene', texture_scene)
self.fft_render.bind_uniform_data('texture_linear_depth', texture_linear_depth)
self.fft_render.bind_uniform_data('texture_probe', texture_probe)
self.fft_render.bind_uniform_data('texture_shadow', texture_shadow)
self.fft_render.bind_uniform_data('texture_noise', self.texture_noise)
self.fft_render.bind_uniform_data('texture_caustic', self.texture_caustics[self.caustic_index])
self.fft_render.bind_uniform_data('texture_foam', self.texture_foam)
# Bind Atmosphere
atmosphere.bind_precomputed_atmosphere(self.fft_render)
self.fft_grid.get_geometry().draw_elements()
| [
"PyEngine3D.App.CoreManager.instance",
"math.sqrt",
"PyEngine3D.Render.ScreenQuad.get_vertex_array_buffer",
"math.log",
"math.cos",
"numpy.zeros",
"math.sin",
"math.atan2",
"PyEngine3D.Render.Plane",
"PyEngine3D.OpenGLContext.CreateTexture"
] | [((3477, 3513), 'PyEngine3D.Render.ScreenQuad.get_vertex_array_buffer', 'ScreenQuad.get_vertex_array_buffer', ([], {}), '()\n', (3511, 3513), False, 'from PyEngine3D.Render import RenderTarget, ScreenQuad, Plane\n'), ((3538, 3642), 'PyEngine3D.Render.Plane', 'Plane', (['"""FFT_Grid"""'], {'mode': 'GL_QUADS', 'width': 'GRID_VERTEX_COUNT', 'height': 'GRID_VERTEX_COUNT', 'xz_plane': '(False)'}), "('FFT_Grid', mode=GL_QUADS, width=GRID_VERTEX_COUNT, height=\n GRID_VERTEX_COUNT, xz_plane=False)\n", (3543, 3642), False, 'from PyEngine3D.Render import RenderTarget, ScreenQuad, Plane\n'), ((6229, 6252), 'math.sqrt', 'sqrt', (['(kx * kx + ky * ky)'], {}), '(kx * kx + ky * ky)\n', (6233, 6252), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((7409, 7422), 'math.atan2', 'atan2', (['ky', 'kx'], {}), '(ky, kx)\n', (7414, 7422), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((13221, 13272), 'numpy.zeros', 'np.zeros', (['(FFT_SIZE * FFT_SIZE * 4)'], {'dtype': 'np.float32'}), '(FFT_SIZE * FFT_SIZE * 4, dtype=np.float32)\n', (13229, 13272), True, 'import numpy as np\n'), ((13299, 13350), 'numpy.zeros', 'np.zeros', (['(FFT_SIZE * FFT_SIZE * 4)'], {'dtype': 'np.float32'}), '(FFT_SIZE * FFT_SIZE * 4, dtype=np.float32)\n', (13307, 13350), True, 'import numpy as np\n'), ((13376, 13425), 'numpy.zeros', 'np.zeros', (['(FFT_SIZE * PASSES * 4)'], {'dtype': 'np.float32'}), '(FFT_SIZE * PASSES * 4, dtype=np.float32)\n', (13384, 13425), True, 'import numpy as np\n'), ((13624, 13914), 'PyEngine3D.OpenGLContext.CreateTexture', 'CreateTexture', ([], {'name': '"""fft_ocean.spectrum_1_2"""', 'texture_type': 'Texture2D', 'image_mode': '"""RGBA"""', 'width': 'FFT_SIZE', 'height': 'FFT_SIZE', 'internal_format': 'GL_RGBA16F', 'texture_format': 'GL_RGBA', 'min_filter': 'GL_NEAREST', 'mag_filter': 'GL_NEAREST', 'data_type': 'GL_FLOAT', 'wrap': 'GL_REPEAT', 'data': 'spectrum12_data'}), "(name='fft_ocean.spectrum_1_2', texture_type=Texture2D,\n image_mode='RGBA', width=FFT_SIZE, height=FFT_SIZE, internal_format=\n GL_RGBA16F, texture_format=GL_RGBA, min_filter=GL_NEAREST, mag_filter=\n GL_NEAREST, data_type=GL_FLOAT, wrap=GL_REPEAT, data=spectrum12_data)\n", (13637, 13914), False, 'from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, FrameBuffer\n'), ((14093, 14383), 'PyEngine3D.OpenGLContext.CreateTexture', 'CreateTexture', ([], {'name': '"""fft_ocean.spectrum_3_4"""', 'texture_type': 'Texture2D', 'image_mode': '"""RGBA"""', 'width': 'FFT_SIZE', 'height': 'FFT_SIZE', 'internal_format': 'GL_RGBA16F', 'texture_format': 'GL_RGBA', 'min_filter': 'GL_NEAREST', 'mag_filter': 'GL_NEAREST', 'data_type': 'GL_FLOAT', 'wrap': 'GL_REPEAT', 'data': 'spectrum34_data'}), "(name='fft_ocean.spectrum_3_4', texture_type=Texture2D,\n image_mode='RGBA', width=FFT_SIZE, height=FFT_SIZE, internal_format=\n GL_RGBA16F, texture_format=GL_RGBA, min_filter=GL_NEAREST, mag_filter=\n GL_NEAREST, data_type=GL_FLOAT, wrap=GL_REPEAT, data=spectrum34_data)\n", (14106, 14383), False, 'from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, FrameBuffer\n'), ((14564, 14883), 'PyEngine3D.OpenGLContext.CreateTexture', 'CreateTexture', ([], {'name': '"""fft_ocean.slope_variance"""', 'texture_type': 'Texture3D', 'image_mode': '"""RGBA"""', 'width': 'N_SLOPE_VARIANCE', 'height': 'N_SLOPE_VARIANCE', 'depth': 'N_SLOPE_VARIANCE', 'internal_format': 'GL_RGBA16F', 'texture_format': 'GL_RGBA', 'min_filter': 'GL_LINEAR', 'mag_filter': 'GL_LINEAR', 'wrap': 'GL_CLAMP_TO_EDGE', 'data_type': 'GL_FLOAT'}), "(name='fft_ocean.slope_variance', texture_type=Texture3D,\n image_mode='RGBA', width=N_SLOPE_VARIANCE, height=N_SLOPE_VARIANCE,\n depth=N_SLOPE_VARIANCE, internal_format=GL_RGBA16F, texture_format=\n GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, wrap=\n GL_CLAMP_TO_EDGE, data_type=GL_FLOAT)\n", (14577, 14883), False, 'from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, FrameBuffer\n'), ((15055, 15346), 'PyEngine3D.OpenGLContext.CreateTexture', 'CreateTexture', ([], {'name': '"""fft_ocean.butterfly"""', 'texture_type': 'Texture2D', 'image_mode': '"""RGBA"""', 'width': 'FFT_SIZE', 'height': 'PASSES', 'internal_format': 'GL_RGBA16F', 'texture_format': 'GL_RGBA', 'min_filter': 'GL_NEAREST', 'mag_filter': 'GL_NEAREST', 'wrap': 'GL_CLAMP_TO_EDGE', 'data_type': 'GL_FLOAT', 'data': 'butterfly_data'}), "(name='fft_ocean.butterfly', texture_type=Texture2D,\n image_mode='RGBA', width=FFT_SIZE, height=PASSES, internal_format=\n GL_RGBA16F, texture_format=GL_RGBA, min_filter=GL_NEAREST, mag_filter=\n GL_NEAREST, wrap=GL_CLAMP_TO_EDGE, data_type=GL_FLOAT, data=butterfly_data)\n", (15068, 15346), False, 'from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture2DArray, Texture3D, FrameBuffer\n'), ((1795, 1817), 'PyEngine3D.App.CoreManager.instance', 'CoreManager.instance', ([], {}), '()\n', (1815, 1817), False, 'from PyEngine3D.App import CoreManager\n'), ((1856, 1878), 'PyEngine3D.App.CoreManager.instance', 'CoreManager.instance', ([], {}), '()\n', (1876, 1878), False, 'from PyEngine3D.App import CoreManager\n'), ((1925, 1947), 'PyEngine3D.App.CoreManager.instance', 'CoreManager.instance', ([], {}), '()\n', (1945, 1947), False, 'from PyEngine3D.App import CoreManager\n'), ((6486, 6500), 'math.log', 'log', (['(10.0 / z0)'], {}), '(10.0 / z0)\n', (6489, 6500), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((6865, 6876), 'math.sqrt', 'sqrt', (['Omega'], {}), '(Omega)\n', (6869, 6876), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((7258, 7266), 'math.log', 'log', (['(2.0)'], {}), '(2.0)\n', (7261, 7266), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((6993, 7009), 'math.log', 'log', (['(u_star / cm)'], {}), '(u_star / cm)\n', (6996, 7009), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((7894, 7907), 'math.sqrt', 'sqrt', (['(S / 2.0)'], {}), '(S / 2.0)\n', (7898, 7907), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((15982, 16004), 'PyEngine3D.App.CoreManager.instance', 'CoreManager.instance', ([], {}), '()\n', (16002, 16004), False, 'from PyEngine3D.App import CoreManager\n'), ((6599, 6609), 'math.log', 'log', (['Omega'], {}), '(Omega)\n', (6602, 6609), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((7060, 7076), 'math.log', 'log', (['(u_star / cm)'], {}), '(u_star / cm)\n', (7063, 7076), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((8066, 8074), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (8069, 8074), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((8080, 8088), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (8083, 8088), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((6715, 6727), 'math.sqrt', 'sqrt', (['(k / kp)'], {}), '(k / kp)\n', (6719, 6727), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((6805, 6815), 'math.sqrt', 'sqrt', (['(10.0)'], {}), '(10.0)\n', (6809, 6815), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((6819, 6831), 'math.sqrt', 'sqrt', (['(k / kp)'], {}), '(k / kp)\n', (6823, 6831), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n'), ((7574, 7588), 'math.cos', 'cos', (['(2.0 * phi)'], {}), '(2.0 * phi)\n', (7577, 7588), False, 'from math import log, exp, sqrt, tanh, sin, cos, tan, atan2, ceil, pi\n')] |
import numpy as np
import warnings
from time import time
import pandas as pd
from itertools import count, product
from copy import deepcopy
from scipy import optimize
from sklearn import linear_model as LM
from sklearn.svm import SVC, LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.exceptions import ConvergenceWarning
from baselines.fairlearn.classred import expgrad
from baselines.fairlearn import moments
import baselines.fair_classification.utils as fc_ut
import baselines.fair_classification.loss_funcs as fc_lf
from baselines.fair_robust.unlabeled import UnlabeledFairRobust
# SeldonianML imports
from datasets import brazil_with_race as brazil
from core.base.sc import SeldonianClassifier
from utils.argsweep import ArgumentSweeper
from utils.rvs import ConstraintManager, get_parser, get_classification_cm
from utils.constraints import make_constraints
from utils.experiments import launcher
from utils.experiments import demographic_shift as ds
import utils
# Supress sklearn FutureWarnings for SGD
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=ConvergenceWarning)
########################
# Model Evaluators #
########################
# Wrapper for training each model and evaluating it under antagonistic resampling
def _evaluate_model(dataset, trainf, mp):
cm = get_classification_cm(mp['constraints'])
# Resample the base dataset uniformly to obtain a training dataset
n_train = dataset.resample_n_train
n_candidate = np.floor(mp['r_cand_v_safe'] * n_train).astype(int)
n_safety = n_train - n_candidate
dataset0 = dataset.resample(n_candidate=n_candidate, n_safety=n_safety, n_test=0)
t = time()
predictf, is_nsf = trainf(dataset0, mp)
t = time()-t
dshift_opts = { k:mp[k] for k in ['demographic_variable', 'demographic_variable_values', 'demographic_marginals','known_demographic_terms']}
acc_orig, g_orig, acc_ant, g_ant = ds.evaluate_antagonistic_demographic_shift(predictf, mp['constraints'], dataset, dshift_opts)
return {
'original_nsf' : is_nsf,
'original_acc' : acc_orig,
'original_g' : g_orig,
'antagonist_acc' : acc_ant,
'antagonist_g' : g_ant,
'runtime' : t
}
# Methods to train models
def _get_fairlearn(dataset, mp):
# Train the model
# Load the dataset and convert it to a pandas dataframe
split = dataset.training_splits()
Xt, Yt, Tt = split['X'], split['Y'], split['S']
Xt = pd.DataFrame(Xt)
# Convert Y to be in {0,1} instead of {-1,1} for compatibility with fairlearn
Yt[Yt==-1] = 0
Yt = pd.Series(Yt)
Tt = pd.Series(Tt)
# Use expgrad with a linear SVC
# Note that this fairlearn implementation only supports DemographicParity and EqualOpportunity
# When other definitions are requested, we enforce DP or EO based on which is most reasonable
defs = {
'demographicparity' : moments.DP,
'disparateimpact' : moments.EO,
'equalizedodds' : moments.EO,
'equalopportunity' : moments.EO,
'predictiveequality' : moments.EO }
cons = defs[mp['definition'].lower()]()
# Train fairlearn using expgrad with a linear SVC
base_model = LinearSVC(loss=mp['loss'], penalty=mp['penalty'], fit_intercept=mp['fit_intercept'])
results, hs = expgrad(Xt, Tt, Yt, base_model, cons=cons, eps=mp['fl_e'])
def predictf(X, results=results):
Yp = np.array(np.round(results.best_classifier(X)))
try:
Yp[Yp==0] = -1
except TypeError:
Yp = Yp if Yp == 1 else -1
return Yp
return predictf, False
def _get_fair_constraints(dataset, mp):
# FairConstraints is constructed to simultaneously enforce disparate impact and disparate treatment,
# thus the training process is the same regardless of the actual definition we're evaluating.
# Configure the constraints and weights
apply_fairness_constraints = 1
apply_accuracy_constraint = 0
sep_constraint = 0
gamma = None
e = -mp['e']*100
# Train the model using the cov that produced the smallest p >= e
split = dataset.training_splits()
X, Y, S, R = split['X'], split['Y'], split['S'], split['R']
x_control = {'S':S.astype(np.int64), 'R':R.astype(np.int64)}
sensitive_attrs = ['S', 'R']
sensitive_attrs_to_cov_thresh = {'S':0.1,
'R':{v:0.01 for v in dataset._unique_values['R']}}
w = fc_ut.train_model(X, Y, x_control, fc_lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
def predictf(_X):
Yp = np.sign(np.dot(_X, w))
try:
Yp[Yp==0] = -1
except:
pass
return Yp
return predictf, False
def _get_hoeff_sc(dataset, mp, enforce_robustness=False):
model_params = {
'verbose' : False,
'shape_error' : True,
'model_type' : mp['model_type'],
'ci_mode' : 'hoeffding',
'cs_scale' : mp['cs_scale'],
'robust_loss' : False}
if enforce_robustness:
for k in ['demographic_variable', 'demographic_variable_values', 'demographic_marginals','known_demographic_terms', 'robust_loss']:
model_params[k] = mp[k]
# Train SC using hoeffding's inequality
apply_fairness_constraints = 1
apply_accuracy_constraint = 0
sep_constraint = 0
gamma = None
e = -mp['e']*100
split = dataset.training_splits()
X, Y, S, R = split['X'], split['Y'], split['S'], split['R']
x_control = {'S':S.astype(np.int64), 'R':R.astype(np.int64)}
sensitive_attrs = ['S', 'R']
sensitive_attrs_to_cov_thresh = {'S':0.1,
'R':{v:0.01 for v in dataset._unique_values['R']}}
w = fc_ut.train_model(X, Y, x_control, fc_lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
model = SeldonianClassifier(mp['constraints'], mp['deltas'], **model_params)
accept = model.fit(dataset, n_iters=mp['n_iters'], optimizer_name=mp['optimizer'], theta0=w)
return model.predict, ~accept
def _get_ttest_sc(dataset, mp, enforce_robustness=False):
model_params = {
'verbose' : False,
'shape_error' : True,
'model_type' : mp['model_type'],
'ci_mode' : 'ttest',
'cs_scale' : mp['cs_scale'],
'robust_loss' : False}
if enforce_robustness:
for k in ['demographic_variable', 'demographic_variable_values', 'demographic_marginals','known_demographic_terms', 'robust_loss']:
model_params[k] = mp[k]
# Train SC using hoeffding's inequality
apply_fairness_constraints = 1
apply_accuracy_constraint = 0
sep_constraint = 0
gamma = None
e = -mp['e']*100
split = dataset.training_splits()
X, Y, S, R = split['X'], split['Y'], split['S'], split['R']
x_control = {'S':S.astype(np.int64), 'R':R.astype(np.int64)}
sensitive_attrs = ['S', 'R']
sensitive_attrs_to_cov_thresh = {'S':0.1,
'R':{v:0.01 for v in dataset._unique_values['R']}}
w = fc_ut.train_model(X, Y, x_control, fc_lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)
model = SeldonianClassifier(mp['constraints'], mp['deltas'], **model_params)
accept = model.fit(dataset, n_iters=mp['n_iters'], optimizer_name=mp['optimizer'], theta0=w)
return model.predict, ~accept
def _get_sgd(dataset, mp):
split = dataset.training_splits()
Xt, Yt = split['X'], split['Y']
if mp['loss']=='log':
model = LM.LogisticRegression(fit_intercept=mp['fit_intercept'])
else:
model = LM.SGDClassifier(loss=mp['loss'], penalty=mp['penalty'], fit_intercept=mp['fit_intercept'], max_iter=1000, alpha=0.000001)
model.fit(Xt, Yt)
return model.predict, False
def _get_svc(dataset, mp):
split = dataset.training_splits()
Xt, Yt = split['X'], split['Y']
model = SVC(gamma=mp['gamma'], C=mp['C'], kernel=mp['kernel'])
model.fit(Xt, Yt)
return model.predict, False
def _get_linsvc(dataset, mp):
split = dataset.training_splits()
Xt, Yt = split['X'], split['Y']
model = LinearSVC(loss=mp['loss'], penalty=mp['penalty'], fit_intercept=mp['fit_intercept'])
model.fit(Xt, Yt)
return model.predict, False
def _get_fair_robust(dataset, mp):
split = dataset.training_splits()
Xt, Yt, St = split['X'], split['Y'], split['S']
Yt = 1.0*(Yt == 1)
model = UnlabeledFairRobust()
model.fit(Xt, Yt, St)
def predictf(X, model=model):
Yp = model.predict(X)
return 1*(Yp==1) - 1*(Yp==0)
return predictf, False
# Actual evaluation functions
def eval_fairlearn(dataset, mp):
return _evaluate_model(dataset, _get_fairlearn, mp)
def eval_fair_constraints(dataset, mp):
return _evaluate_model(dataset, _get_fair_constraints, mp)
def eval_hoeff_sc(dataset, mp):
trainf = lambda dataset, mp: _get_hoeff_sc(dataset, mp, enforce_robustness=False)
return _evaluate_model(dataset, trainf, mp)
def eval_hoeff_sc_robust(dataset, mp):
trainf = lambda dataset, mp: _get_hoeff_sc(dataset, mp, enforce_robustness=True)
return _evaluate_model(dataset, trainf, mp)
def eval_ttest_sc(dataset, mp):
trainf = lambda dataset, mp: _get_ttest_sc(dataset, mp, enforce_robustness=False)
return _evaluate_model(dataset, trainf, mp)
def eval_ttest_sc_robust(dataset, mp):
trainf = lambda dataset, mp: _get_ttest_sc(dataset, mp, enforce_robustness=True)
return _evaluate_model(dataset, trainf, mp)
def eval_sgd(dataset, mp):
return _evaluate_model(dataset, _get_sgd, mp)
def eval_svc(dataset, mp):
return _evaluate_model(dataset, _get_svc, mp)
def eval_linsvc(dataset, mp):
return _evaluate_model(dataset, _get_linsvc, mp)
def eval_fair_robust(dataset, mp):
return _evaluate_model(dataset, _get_fair_robust, mp)
######################
# Dataset Loader #
######################
def load_dataset(tparams, seed):
dset_args = {
'r_train' : 1.0,
'include_intercept' : True,
'include_R' : tparams['include_R'],
'include_S' : tparams['include_S'],
'use_pct' : 1.0,
'seed' : seed,
'gpa_cutoff' : tparams['gpa_cutoff'],
'standardize' : tparams['standardize']
}
dataset = brazil.load(**dset_args)
dataset.resample_n_train = tparams['n_train']
return dataset
############
# Main #
############
if __name__ == '__main__':
# Note: This script computes experiments for the cross product of all values given for the
# sweepable arguments.
# Note: Sweepable arguments allow inputs of the form, <start>:<end>:<increment>, which are then
# expanded into ranges via np.arange(<start>, <end>, <increment>).
with ArgumentSweeper() as parser:
parser.add_argument('base_path', type=str)
parser.add_argument('--gpa_cutoff', type=float, default=3.5, help='Cutoff for defining "good" GPA.')
parser.add_argument('--include_R', action='store_true', help='Whether or not to include race as a predictive feature.')
parser.add_argument('--include_S', action='store_true', help='Whether or not to include sex as a predictive feature.')
parser.add_argument('--standardize', action='store_true', help='Whether or not to standardize input features.')
parser.add_argument('--n_jobs', type=int, default=4, help='Number of processes to use.')
parser.add_argument('--n_trials', type=int, default=10, help='Number of trials to run.')
parser.add_argument('--n_iters', type=int, default=10, help='Number of SMLA training iterations.')
parser.add_argument('--optimizer', type=str, default='cmaes', help='Choice of optimizer to use.')
parser.add_argument('--definition', type=str, default='DisparateImpact', help='Choice of safety definition to enforce.')
parser.add_argument('--e', type=float, default=0.05, help='Value for epsilon.')
parser.add_argument('--d', type=float, default=0.05, help='Value for delta.')
parser.add_argument('--robust_loss', action='store_true', help='Causes the loss function to estimate post-demographic shift loss.')
parser.add_sweepable_argument('--n_train', type=int, default=10000, nargs='*', help='Number of samples to draw from the population for training.')
parser.add_sweepable_argument('--r_train_v_test', type=float, default=0.4, nargs='*', help='Ratio of data used for training vs testing.')
parser.add_argument('--r_cand_v_safe', type=float, default=0.4, help='Ratio of training data used for candidate selection vs safety checking. (SMLA only)')
parser.add_sweepable_argument('--model_type', type=str, default='linear', nargs='*', help='Base model type to use for SMLAs.')
parser.add_argument('--fixed_dist', action='store_true', help='Fixed the distribution post-deployment (only works when dshift_var=race.')
parser.add_argument('--dshift_var', type=str, default='race', help='Choice of variable to evaluate demographic shift for.')
parser.add_argument('--dshift_alpha', type=float, default=0.0, help='Width of intervals around true marginals representing valid demographic shifts.')
parser.add_argument('--cs_scale', type=float, default=1.0, help='Scaling factor for predicted confidence intervals during candidate selection.')
args = parser.parse_args()
args_dict = dict(args.__dict__)
# Generate the constraints and deltas
population = brazil.load()
if args.dshift_var.lower()[0] == 's':
constraints = make_constraints(args.definition, 'R', np.unique(population._R), args.e)
if args.dshift_var.lower()[0] == 'r':
constraints = make_constraints(args.definition, 'S', np.unique(population._S), args.e)
deltas = [ args.d for _ in constraints ]
print()
print(args.definition,':')
print(' Interpreting constraint string \'%s\'' % constraints[0])
print(' as \'%r\'.' % get_parser().parse(constraints[0]))
smla_names = ['SC', 'QSC', 'SRC', 'QSRC']
model_evaluators = {
'SC' : eval_hoeff_sc,
'QSC' : eval_ttest_sc,
'SRC' : eval_hoeff_sc_robust,
'QSRC' : eval_ttest_sc_robust,
# 'SGD' : eval_sgd,
# 'LinSVC' : eval_linsvc,
# 'SVC' : eval_svc
'FairConst' : eval_fair_constraints,
'FairlearnSVC' : eval_fairlearn,
'FairRobust' : eval_fair_robust
}
# Store task parameters:
tparams = {k:args_dict[k] for k in ['n_jobs', 'base_path', 'r_train_v_test', 'include_R', 'include_S', 'gpa_cutoff', 'standardize', 'n_train']}
# Generate options for enforcing robustness constraints
if args.dshift_var.lower() == 'sex':
D = get_parser(mode='inner').parse('S')
D_values = population._S
elif args.dshift_var.lower() == 'race':
D = get_parser(mode='inner').parse('R')
D_values = population._R
else:
raise RuntimeError('This experiment does not support demographic shift for the variable \'%s\'' % args.dshift_var)
unique_D_values = np.unique(D_values)
Pr_D = np.array([ (D_values==d).mean() for d in unique_D_values ])
if args.fixed_dist:
smla_dshift_opts = {
'demographic_variable' : D,
'demographic_variable_values' : unique_D_values,
'demographic_marginals' : np.array([0.3 , 0.6 , 0.05 , 0.048, 0.002]),
'known_demographic_terms' : ds.get_population_conditionals(population.all_sets(), constraints, D)
}
else:
assert (args.dshift_alpha >= 0) and (args.dshift_alpha <= 1.0), 'Demographic shift alpha value must be between 0 and 1.'
smla_dshift_opts = {
'demographic_variable' : D,
'demographic_variable_values' : unique_D_values,
'demographic_marginals' : ds.make_intervals(Pr_D, args.dshift_alpha, epsilon=1e-3),
'known_demographic_terms' : ds.get_population_conditionals(population.all_sets(), constraints, D)
}
# Fill in parameter dictionaries for each model
srl_mparam_names = ['n_iters','optimizer','model_type', 'definition', 'e', 'cs_scale', 'robust_loss']
bsln_mparam_names = ['definition', 'e']
mparams = {}
for name in model_evaluators.keys():
if name in smla_names:
mparams[name] = {k:args_dict[k] for k in srl_mparam_names}
else:
mparams[name] = {k:args_dict[k] for k in bsln_mparam_names}
mparams[name]['constraints'] = constraints
mparams[name]['deltas'] = deltas
mparams[name]['dshift_alpha'] = args.dshift_alpha
mparams[name]['dshift_var'] = args.dshift_var
mparams[name]['r_cand_v_safe'] = args.r_cand_v_safe
mparams[name].update(smla_dshift_opts)
# mparams['SGD'].update(loss=['hinge','log','perceptron'], penalty='l2', fit_intercept=False)
# mparams['SVC'].update(kernel=['rbf'], gamma=2, C=1)
# mparams['LinSVC'].update(loss=['hinge'], penalty='l2', fit_intercept=False)
mparams['FairConst'].update(cov=[0.01])
mparams['FairlearnSVC'].update(loss=['hinge'], penalty='l2', fit_intercept=False, fl_e=[0.01, 0.1])
# Expand the parameter sets into a set of configurations
args_to_expand = parser._sweep_argnames + ['loss', 'kernel', 'cov', 'fl_e', 'n_train']
tparams, mparams = launcher.make_parameters(tparams, mparams, expand=args_to_expand)
# print()
# # Create a results file and directory
# save_path = launcher.prepare_paths(args.base_path, tparams, mparams, smla_names, root='results', filename=None)
# print()
# # Run the experiment
# launcher.run(args.n_trials, save_path, model_evaluators, load_dataset, tparams, mparams, n_workers=args.n_jobs, seed=None)
| [
"core.base.sc.SeldonianClassifier",
"numpy.array",
"baselines.fair_robust.unlabeled.UnlabeledFairRobust",
"sklearn.linear_model.SGDClassifier",
"utils.experiments.demographic_shift.make_intervals",
"numpy.dot",
"pandas.DataFrame",
"warnings.simplefilter",
"baselines.fair_classification.utils.train_m... | [((1053, 1115), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (1074, 1115), False, 'import warnings\n'), ((1116, 1183), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'ConvergenceWarning'}), "(action='ignore', category=ConvergenceWarning)\n", (1137, 1183), False, 'import warnings\n'), ((1393, 1433), 'utils.rvs.get_classification_cm', 'get_classification_cm', (["mp['constraints']"], {}), "(mp['constraints'])\n", (1414, 1433), False, 'from utils.rvs import ConstraintManager, get_parser, get_classification_cm\n'), ((1731, 1737), 'time.time', 'time', ([], {}), '()\n', (1735, 1737), False, 'from time import time\n'), ((1971, 2068), 'utils.experiments.demographic_shift.evaluate_antagonistic_demographic_shift', 'ds.evaluate_antagonistic_demographic_shift', (['predictf', "mp['constraints']", 'dataset', 'dshift_opts'], {}), "(predictf, mp['constraints'],\n dataset, dshift_opts)\n", (2013, 2068), True, 'from utils.experiments import demographic_shift as ds\n'), ((2477, 2493), 'pandas.DataFrame', 'pd.DataFrame', (['Xt'], {}), '(Xt)\n', (2489, 2493), True, 'import pandas as pd\n'), ((2595, 2608), 'pandas.Series', 'pd.Series', (['Yt'], {}), '(Yt)\n', (2604, 2608), True, 'import pandas as pd\n'), ((2615, 2628), 'pandas.Series', 'pd.Series', (['Tt'], {}), '(Tt)\n', (2624, 2628), True, 'import pandas as pd\n'), ((3158, 3247), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'loss': "mp['loss']", 'penalty': "mp['penalty']", 'fit_intercept': "mp['fit_intercept']"}), "(loss=mp['loss'], penalty=mp['penalty'], fit_intercept=mp[\n 'fit_intercept'])\n", (3167, 3247), False, 'from sklearn.svm import SVC, LinearSVC\n'), ((3258, 3316), 'baselines.fairlearn.classred.expgrad', 'expgrad', (['Xt', 'Tt', 'Yt', 'base_model'], {'cons': 'cons', 'eps': "mp['fl_e']"}), "(Xt, Tt, Yt, base_model, cons=cons, eps=mp['fl_e'])\n", (3265, 3316), False, 'from baselines.fairlearn.classred import expgrad\n'), ((4278, 4468), 'baselines.fair_classification.utils.train_model', 'fc_ut.train_model', (['X', 'Y', 'x_control', 'fc_lf._logistic_loss', 'apply_fairness_constraints', 'apply_accuracy_constraint', 'sep_constraint', 'sensitive_attrs', 'sensitive_attrs_to_cov_thresh', 'gamma'], {}), '(X, Y, x_control, fc_lf._logistic_loss,\n apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,\n sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)\n', (4295, 4468), True, 'import baselines.fair_classification.utils as fc_ut\n'), ((5483, 5673), 'baselines.fair_classification.utils.train_model', 'fc_ut.train_model', (['X', 'Y', 'x_control', 'fc_lf._logistic_loss', 'apply_fairness_constraints', 'apply_accuracy_constraint', 'sep_constraint', 'sensitive_attrs', 'sensitive_attrs_to_cov_thresh', 'gamma'], {}), '(X, Y, x_control, fc_lf._logistic_loss,\n apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,\n sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)\n', (5500, 5673), True, 'import baselines.fair_classification.utils as fc_ut\n'), ((5676, 5744), 'core.base.sc.SeldonianClassifier', 'SeldonianClassifier', (["mp['constraints']", "mp['deltas']"], {}), "(mp['constraints'], mp['deltas'], **model_params)\n", (5695, 5744), False, 'from core.base.sc import SeldonianClassifier\n'), ((6760, 6950), 'baselines.fair_classification.utils.train_model', 'fc_ut.train_model', (['X', 'Y', 'x_control', 'fc_lf._logistic_loss', 'apply_fairness_constraints', 'apply_accuracy_constraint', 'sep_constraint', 'sensitive_attrs', 'sensitive_attrs_to_cov_thresh', 'gamma'], {}), '(X, Y, x_control, fc_lf._logistic_loss,\n apply_fairness_constraints, apply_accuracy_constraint, sep_constraint,\n sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma)\n', (6777, 6950), True, 'import baselines.fair_classification.utils as fc_ut\n'), ((6953, 7021), 'core.base.sc.SeldonianClassifier', 'SeldonianClassifier', (["mp['constraints']", "mp['deltas']"], {}), "(mp['constraints'], mp['deltas'], **model_params)\n", (6972, 7021), False, 'from core.base.sc import SeldonianClassifier\n'), ((7626, 7680), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': "mp['gamma']", 'C': "mp['C']", 'kernel': "mp['kernel']"}), "(gamma=mp['gamma'], C=mp['C'], kernel=mp['kernel'])\n", (7629, 7680), False, 'from sklearn.svm import SVC, LinearSVC\n'), ((7837, 7926), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'loss': "mp['loss']", 'penalty': "mp['penalty']", 'fit_intercept': "mp['fit_intercept']"}), "(loss=mp['loss'], penalty=mp['penalty'], fit_intercept=mp[\n 'fit_intercept'])\n", (7846, 7926), False, 'from sklearn.svm import SVC, LinearSVC\n'), ((8120, 8141), 'baselines.fair_robust.unlabeled.UnlabeledFairRobust', 'UnlabeledFairRobust', ([], {}), '()\n', (8139, 8141), False, 'from baselines.fair_robust.unlabeled import UnlabeledFairRobust\n'), ((9872, 9896), 'datasets.brazil_with_race.load', 'brazil.load', ([], {}), '(**dset_args)\n', (9883, 9896), True, 'from datasets import brazil_with_race as brazil\n'), ((1784, 1790), 'time.time', 'time', ([], {}), '()\n', (1788, 1790), False, 'from time import time\n'), ((7276, 7332), 'sklearn.linear_model.LogisticRegression', 'LM.LogisticRegression', ([], {'fit_intercept': "mp['fit_intercept']"}), "(fit_intercept=mp['fit_intercept'])\n", (7297, 7332), True, 'from sklearn import linear_model as LM\n'), ((7350, 7474), 'sklearn.linear_model.SGDClassifier', 'LM.SGDClassifier', ([], {'loss': "mp['loss']", 'penalty': "mp['penalty']", 'fit_intercept': "mp['fit_intercept']", 'max_iter': '(1000)', 'alpha': '(1e-06)'}), "(loss=mp['loss'], penalty=mp['penalty'], fit_intercept=mp[\n 'fit_intercept'], max_iter=1000, alpha=1e-06)\n", (7366, 7474), True, 'from sklearn import linear_model as LM\n'), ((10332, 10349), 'utils.argsweep.ArgumentSweeper', 'ArgumentSweeper', ([], {}), '()\n', (10347, 10349), False, 'from utils.argsweep import ArgumentSweeper\n'), ((13036, 13049), 'datasets.brazil_with_race.load', 'brazil.load', ([], {}), '()\n', (13047, 13049), True, 'from datasets import brazil_with_race as brazil\n'), ((14602, 14621), 'numpy.unique', 'np.unique', (['D_values'], {}), '(D_values)\n', (14611, 14621), True, 'import numpy as np\n'), ((16724, 16789), 'utils.experiments.launcher.make_parameters', 'launcher.make_parameters', (['tparams', 'mparams'], {'expand': 'args_to_expand'}), '(tparams, mparams, expand=args_to_expand)\n', (16748, 16789), False, 'from utils.experiments import launcher\n'), ((1555, 1594), 'numpy.floor', 'np.floor', (["(mp['r_cand_v_safe'] * n_train)"], {}), "(mp['r_cand_v_safe'] * n_train)\n", (1563, 1594), True, 'import numpy as np\n'), ((4495, 4508), 'numpy.dot', 'np.dot', (['_X', 'w'], {}), '(_X, w)\n', (4501, 4508), True, 'import numpy as np\n'), ((13146, 13170), 'numpy.unique', 'np.unique', (['population._R'], {}), '(population._R)\n', (13155, 13170), True, 'import numpy as np\n'), ((13276, 13300), 'numpy.unique', 'np.unique', (['population._S'], {}), '(population._S)\n', (13285, 13300), True, 'import numpy as np\n'), ((14865, 14905), 'numpy.array', 'np.array', (['[0.3, 0.6, 0.05, 0.048, 0.002]'], {}), '([0.3, 0.6, 0.05, 0.048, 0.002])\n', (14873, 14905), True, 'import numpy as np\n'), ((15307, 15364), 'utils.experiments.demographic_shift.make_intervals', 'ds.make_intervals', (['Pr_D', 'args.dshift_alpha'], {'epsilon': '(0.001)'}), '(Pr_D, args.dshift_alpha, epsilon=0.001)\n', (15324, 15364), True, 'from utils.experiments import demographic_shift as ds\n'), ((14279, 14303), 'utils.rvs.get_parser', 'get_parser', ([], {'mode': '"""inner"""'}), "(mode='inner')\n", (14289, 14303), False, 'from utils.rvs import ConstraintManager, get_parser, get_classification_cm\n'), ((13518, 13530), 'utils.rvs.get_parser', 'get_parser', ([], {}), '()\n', (13528, 13530), False, 'from utils.rvs import ConstraintManager, get_parser, get_classification_cm\n'), ((14392, 14416), 'utils.rvs.get_parser', 'get_parser', ([], {'mode': '"""inner"""'}), "(mode='inner')\n", (14402, 14416), False, 'from utils.rvs import ConstraintManager, get_parser, get_classification_cm\n')] |
# Program 21d: Animation of a JJ limit cycle bifurcation.
# See Figure 21.9.
from matplotlib import pyplot as plt
from matplotlib.animation import ArtistAnimation
import numpy as np
from scipy.integrate import odeint
fig = plt.figure()
myimages = []
bj = 1.2
tmax = 100
def jj_ode(x, t):
return [x[1], kappa - bj*x[1] - np.sin(x[0])]
time = np.arange(0, tmax, 0.1)
x0 = [0.1, 0.1]
for kappa in np.arange(0.1, 2, 0.1):
xs = odeint(jj_ode, x0, time)
imgplot = plt.plot(np.sin(xs[:, 0]), xs[:, 1], 'r-')
myimages.append(imgplot)
my_anim = ArtistAnimation(fig, myimages, interval=100, blit=False, repeat_delay=100)
plt.show()
| [
"scipy.integrate.odeint",
"matplotlib.animation.ArtistAnimation",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((225, 237), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (235, 237), True, 'from matplotlib import pyplot as plt\n'), ((350, 373), 'numpy.arange', 'np.arange', (['(0)', 'tmax', '(0.1)'], {}), '(0, tmax, 0.1)\n', (359, 373), True, 'import numpy as np\n'), ((403, 425), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (412, 425), True, 'import numpy as np\n'), ((558, 632), 'matplotlib.animation.ArtistAnimation', 'ArtistAnimation', (['fig', 'myimages'], {'interval': '(100)', 'blit': '(False)', 'repeat_delay': '(100)'}), '(fig, myimages, interval=100, blit=False, repeat_delay=100)\n', (573, 632), False, 'from matplotlib.animation import ArtistAnimation\n'), ((633, 643), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (641, 643), True, 'from matplotlib import pyplot as plt\n'), ((436, 460), 'scipy.integrate.odeint', 'odeint', (['jj_ode', 'x0', 'time'], {}), '(jj_ode, x0, time)\n', (442, 460), False, 'from scipy.integrate import odeint\n'), ((484, 500), 'numpy.sin', 'np.sin', (['xs[:, 0]'], {}), '(xs[:, 0])\n', (490, 500), True, 'import numpy as np\n'), ((328, 340), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (334, 340), True, 'import numpy as np\n')] |
import sys
import numpy as np
import math
import librosa
import soundfile as sf
import json
from librosa.core.spectrum import power_to_db
import scipy
file_path = sys.argv[1]
data, samplerate = sf.read(file_path)
#data = np.clip(data*3, -1, 1)
with open("MfccConfig.json", "r") as f:
config = json.load(f)
frame_size = config['frame_size']
frame_step = config['frame_step']
n_fft = config['n_fft']
n_mels = config['mfcc_bank_cnt']
fmin = config['fmin']
fmax = config['fmax']
dtype = config.get('dtype', "int")
high_prec = config.get('use_high_prec', False) or dtype == "fix32_scal"
use_power = False
rad4 = round(math.log(n_fft//2, 4)) == math.log(n_fft//2, 4)
ndct = config.get('n_dct', False)
from librosa.filters import get_window
from librosa import util
librosa_fft_window = get_window("hann", frame_size, fftbins=True)
# Pad the window out to n_fft size
librosa_fft_window = util.pad_center(librosa_fft_window, n_fft)
stft = librosa.core.spectrum.stft(data, n_fft, frame_step, frame_size, center=False, pad_mode="constant")
spect = np.abs(stft) ** (1 if not use_power else 2)
mel_basis = librosa.filters.mel(samplerate, n_fft, n_mels, fmin, fmax)
mel_spect = np.dot(mel_basis, spect)
logmel = power_to_db(mel_spect, top_db=None)
mfcc = scipy.fftpack.dct(logmel, axis=0, type=2, norm=None)
with open("ground_truth.h", "w") as f:
f.write(f"float ground_truth[] = {{\n")
for elem in mfcc.T.flatten():
f.write(f"{elem}, ")
f.write("};\n")
| [
"numpy.abs",
"librosa.filters.get_window",
"librosa.core.spectrum.stft",
"math.log",
"librosa.util.pad_center",
"numpy.dot",
"scipy.fftpack.dct",
"librosa.filters.mel",
"librosa.core.spectrum.power_to_db",
"json.load",
"soundfile.read"
] | [((195, 213), 'soundfile.read', 'sf.read', (['file_path'], {}), '(file_path)\n', (202, 213), True, 'import soundfile as sf\n'), ((788, 832), 'librosa.filters.get_window', 'get_window', (['"""hann"""', 'frame_size'], {'fftbins': '(True)'}), "('hann', frame_size, fftbins=True)\n", (798, 832), False, 'from librosa.filters import get_window\n'), ((889, 931), 'librosa.util.pad_center', 'util.pad_center', (['librosa_fft_window', 'n_fft'], {}), '(librosa_fft_window, n_fft)\n', (904, 931), False, 'from librosa import util\n'), ((940, 1043), 'librosa.core.spectrum.stft', 'librosa.core.spectrum.stft', (['data', 'n_fft', 'frame_step', 'frame_size'], {'center': '(False)', 'pad_mode': '"""constant"""'}), "(data, n_fft, frame_step, frame_size, center=\n False, pad_mode='constant')\n", (966, 1043), False, 'import librosa\n'), ((1103, 1161), 'librosa.filters.mel', 'librosa.filters.mel', (['samplerate', 'n_fft', 'n_mels', 'fmin', 'fmax'], {}), '(samplerate, n_fft, n_mels, fmin, fmax)\n', (1122, 1161), False, 'import librosa\n'), ((1174, 1198), 'numpy.dot', 'np.dot', (['mel_basis', 'spect'], {}), '(mel_basis, spect)\n', (1180, 1198), True, 'import numpy as np\n'), ((1208, 1243), 'librosa.core.spectrum.power_to_db', 'power_to_db', (['mel_spect'], {'top_db': 'None'}), '(mel_spect, top_db=None)\n', (1219, 1243), False, 'from librosa.core.spectrum import power_to_db\n'), ((1251, 1303), 'scipy.fftpack.dct', 'scipy.fftpack.dct', (['logmel'], {'axis': '(0)', 'type': '(2)', 'norm': 'None'}), '(logmel, axis=0, type=2, norm=None)\n', (1268, 1303), False, 'import scipy\n'), ((299, 311), 'json.load', 'json.load', (['f'], {}), '(f)\n', (308, 311), False, 'import json\n'), ((646, 669), 'math.log', 'math.log', (['(n_fft // 2)', '(4)'], {}), '(n_fft // 2, 4)\n', (654, 669), False, 'import math\n'), ((1047, 1059), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (1053, 1059), True, 'import numpy as np\n'), ((620, 643), 'math.log', 'math.log', (['(n_fft // 2)', '(4)'], {}), '(n_fft // 2, 4)\n', (628, 643), False, 'import math\n')] |
# Copyright 2019-21 by <NAME>. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""SCADIO: write OpenSCAD program to create protein structure 3D model.
3D printing a protein structure is a non-trivial exercise due to the
overall complexity and the general requirement for supporting overhang regions
while printing. This software is a path to generating a model for printing
(e.g. an STL file), and does not address the issues around converting the
model to a physical product. OpenSCAD <http://www.openscad.org/> can create
a printable model from the script this software produces. MeshMixer
<http://www.meshmixer.com/>, various slicer software, and the 3D printer
technology available to you provide options for addressing the problems around
physically rendering the model.
The model generated here consists of OpenSCAD primitives, e.g. spheres and
cylinders, representing individual atoms and bonds in an explicit model of a
protein structure. The benefit is that individual atoms/bonds may be selected
for specific print customizations relevant to 3D printing (such as rotatable
bond mechanisms or hydrogen bond magnets). Alternatively, use e.g. Chimera to
render a structure as ribbons or similar for printing as a single object.
I suggest generating your initial model using the OpenSCAD script provided
here, then modifying that script according to your needs. Changing the
atomScale and bondRadius values can simplify the model by removing gaps and
the corresponding need for supports, or you may wish to modify the
hedronDispatch() routine to select residues or chain sections for printing
separately and subsequently joining with rotatable bonds. During this
development phase you will likely have your version include only the data
matrices generated here, by using the `includeCode=False` option to
write_SCAD(). An example project using rotatable backbone and magnetic
hydrogen bonds is at <https://www.thingiverse.com/thing:3957471>.
"""
# import re
from Bio.File import as_handle
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.internal_coords import IC_Residue, IC_Chain
# from Bio.PDB.Structure import Structure
# from Bio.PDB.Residue import Residue
from Bio.PDB.vectors import homog_scale_mtx
import numpy as np # type: ignore
def _scale_residue(res, scale, scaleMtx):
if res.internal_coord:
res.internal_coord.applyMtx(scaleMtx)
if res.internal_coord.gly_Cbeta:
res.internal_coord.scale = scale
def write_SCAD(
entity,
file,
scale=None,
pdbid=None,
backboneOnly=False,
includeCode=True,
maxPeptideBond=None,
start=None,
fin=None,
handle="protein",
):
"""Write hedron assembly to file as OpenSCAD matrices.
This routine calls both :meth:`.IC_Chain.internal_to_atom_coordinates` and
:meth:`.IC_Chain.atom_to_internal_coordinates` due to requirements for
scaling, explicit bonds around rings, and setting the coordinate space of
the output model.
Output data format is primarily:
- matrix for each hedron:
len1, angle2, len3, atom covalent bond class, flags to indicate
atom/bond represented in previous hedron (OpenSCAD very slow with
redundant overlapping elements), flags for bond features
- transform matrices to assemble each hedron into residue dihedra sets
- transform matrices for each residue to position in chain
OpenSCAD software is included in this Python file to process these
matrices into a model suitable for a 3D printing project.
:param entity: Biopython PDB :class:`.Structure` entity
structure data to export
:param file: Bipoython :func:`.as_handle` filename or open file pointer
file to write data to
:param float scale:
units (usually mm) per angstrom for STL output, written in output
:param str pdbid:
PDB idcode, written in output. Defaults to '0PDB' if not supplied
and no 'idcode' set in entity
:param bool backboneOnly: default False.
Do not output side chain data past Cbeta if True
:param bool includeCode: default True.
Include OpenSCAD software (inline below) so output file can be loaded
into OpenSCAD; if False, output data matrices only
:param float maxPeptideBond: Optional default None.
Override the cut-off in IC_Chain class (default 1.4) for detecting
chain breaks. If your target has chain breaks, pass a large number
here to create a very long 'bond' spanning the break.
:param int start,fin: default None
Parameters for internal_to_atom_coords() to limit chain segment.
:param str handle: default 'protein'
name for top level of generated OpenSCAD matrix structure
See :meth:`.IC_Residue.set_flexible` to set flags for specific residues to
have rotatable bonds, and :meth:`.IC_Residue.set_hbond` to include cavities
for small magnets to work as hydrogen bonds.
See <https://www.thingiverse.com/thing:3957471> for implementation example.
The OpenSCAD code explicitly creates spheres and cylinders to
represent atoms and bonds in a 3D model. Options are available
to support rotatable bonds and magnetic hydrogen bonds.
Matrices are written to link, enumerate and describe residues,
dihedra, hedra, and chains, mirroring contents of the relevant IC_*
data structures.
The OpenSCAD matrix of hedra has additional information as follows:
* the atom and bond state (single, double, resonance) are logged
so that covalent radii may be used for atom spheres in the 3D models
* bonds and atoms are tracked so that each is only created once
* bond options for rotation and magnet holders for hydrogen bonds
may be specified (see :meth:`.IC_Residue.set_flexible` and
:meth:`.IC_Residue.set_hbond` )
Note the application of :data:`Bio.PDB.internal_coords.IC_Chain.MaxPeptideBond`
: missing residues may be linked (joining chain segments with arbitrarily
long bonds) by setting this to a large value.
Note this uses the serial assembly per residue, placing each residue at
the origin and supplying the coordinate space transform to OpenaSCAD
All ALTLOC (disordered) residues and atoms are written to the output
model. (see :data:`Bio.PDB.internal_coords.IC_Residue.no_altloc`)
"""
if maxPeptideBond is not None:
mpbStash = IC_Chain.MaxPeptideBond
IC_Chain.MaxPeptideBond = float(maxPeptideBond)
# step one need IC_Residue atom_coords loaded in order to scale
# so if no internal_coords, initialise from Atom coordinates
added_IC_Atoms = False
if "S" == entity.level or "M" == entity.level:
for chn in entity.get_chains():
if not chn.internal_coord:
chn.internal_coord = IC_Chain(chn)
added_IC_Atoms = True
elif "C" == entity.level:
if not entity.internal_coord: # entity.internal_coord:
entity.internal_coord = IC_Chain(entity)
added_IC_Atoms = True
else:
raise PDBException("level not S, M or C: " + str(entity.level))
if added_IC_Atoms:
# if loaded pdb, need to scale, and asm, gen atomArray
entity.atom_to_internal_coordinates()
else:
# if loaded pic file and need to scale, generate atom coords
entity.internal_to_atom_coordinates(None)
if scale is not None:
scaleMtx = homog_scale_mtx(scale)
if "C" == entity.level:
entity.internal_coord.atomArray = np.dot(
entity.internal_coord.atomArray[:], scaleMtx
)
entity.internal_coord.hAtoms_needs_update[:] = True
entity.internal_coord.scale = scale
else:
for chn in entity.get_chains():
if hasattr(chn.internal_coord, "atomArray"):
chn.internal_coord.atomArray = np.dot(
chn.internal_coord.atomArray[:], scaleMtx
)
chn.internal_coord.hAtoms_needs_update[:] = True
chn.internal_coord.scale = scale
# generate internal coords for scaled entity
# (hedron bond lengths have changed if scaled)
# if not scaling, still need to generate internal coordinate
# bonds for ring sidechains
# AllBonds is a class attribute for IC_Residue.atom_to_internal_coordinates
# to generate explicit hedra covering all bonds
allBondsStash = IC_Residue._AllBonds
IC_Residue._AllBonds = True
# trigger rebuild of hedra for AllBonds
if "C" == entity.level:
entity.internal_coord.ordered_aa_ic_list[0].hedra = {}
delattr(entity.internal_coord, "hAtoms_needs_update")
delattr(entity.internal_coord, "hedraLen")
else:
for chn in entity.get_chains():
chn.internal_coord.ordered_aa_ic_list[0].hedra = {}
delattr(chn.internal_coord, "hAtoms_needs_update")
delattr(chn.internal_coord, "hedraLen")
entity.atom_to_internal_coordinates()
IC_Residue._AllBonds = allBondsStash
# rebuild atom coordinates now with chain starting at origin: in OpenSCAD
# code, each residue model is transformed to N-Ca-C start position instead
# of updating transform matrix along chain
entity.internal_to_atom_coordinates()
with as_handle(file, "w") as fp:
if includeCode:
fp.write(peptide_scad)
if not pdbid and hasattr(entity, "header"):
pdbid = entity.header.get("idcode", None)
if pdbid is None or "" == pdbid:
pdbid = "0PDB"
fp.write(
'protein = [ "' + pdbid + '", ' + str(scale) + ", // ID, protein_scale\n"
)
if "S" == entity.level or "M" == entity.level:
for chn in entity.get_chains():
fp.write(" [\n")
chn.internal_coord._write_SCAD(
fp, backboneOnly=backboneOnly, start=start, fin=fin
)
fp.write(" ]\n")
elif "C" == entity.level:
fp.write(" [\n")
entity.internal_coord._write_SCAD(
fp, backboneOnly=backboneOnly, start=start, fin=fin
)
fp.write(" ]\n")
elif "R" == entity.level:
raise NotImplementedError("writescad single residue not yet implemented.")
fp.write("\n];\n")
if maxPeptideBond is not None:
IC_Chain.MaxPeptideBond = mpbStash
peptide_scad = """
/*
//
// peptide.scad
// Copyright (c) 2019 <NAME>. All rights reserved.
// This file is part of the Biopython distribution and governed by your
// choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
// Please see the LICENSE file that should have been included as part of this
// package.
//
// This is the support file to build an OpenSCAD (http://www.openscad.org/) model
// of a protein from internal coordinates. The resulting model may be constructed
// on a 3D printer.
//
// data matrices should be appended below to form a program ready to load into
// the OpenSCAD application.
//
// The protein_scale value used throughout is the second element of the
// protein[] array appended below.
// This is the value supplied when generating the data for build units per
// PDB angstrom.
// You may wish to modify it here to adjust the appearance of the model in
// terms of atom sphere or bond cylinder diameter, however the bond lengths
// are fixed with the supplied value when the data matrices are generated.
// Atom sphere and bond cylinder radii may be individually adjusted below as
// well.
//
// $fn (fragment number) is an OpenSCAD parameter controlling the smoothness
// of the model surface. Smaller values will render faster, but yield more
// 'blocky' models.
//
// This is intended to be a working example, you are encouraged to modify the
// OpenSCAD subroutines below to generate a model to your liking. For more
// information, start with http://www.openscad.org/cheatsheet/index.html
//
// Note especially the hedronDispatch() subroutine below: here you may select
// hedra based on residue, sequence position, and class (hedron atoms) for
// special handling. Also see the per hedron render options in the hedra[]
// array.
//
// If you modify this file, you may find it useful to generate the data
// matrices without this OpenSCAD code by calling write_SCAD() with the
// includeCode=False option, then use the OpenSCAD 'include <>' facility at
// the end of your modified OpenSCAD program.
*/
rotate([-90,0,0]) // convenient for default location (no N-Ca-C start coordinates)
chain(protein); // this is the main subroutine call to build the structure
// top-level OpenSCAD $fn for visible surfaces. Rotatable bonds use $fn=8
// inside, regardless of this setting.
$fn = 0; // 0 yields OpenSCAD default of 30. $n=8 should print with minimal support
tubes=false; // style: render atoms and bonds as constant diameter cylinders, preferred for rotatable bonds / h-bonds
support=false; // enable print-in-place internal support for rotatable bonds
// N.B. rotatable bonds must be parallel to build plate for internal support
// structures to be generated correctly by slicer
// output parameters
atomScale=1.0; // 0.8 better for rotatable bonds
defaultAtomRadius = 0.77; // used if tubes = true
bondRadius = (tubes ? defaultAtomRadius * atomScale : 0.4);
jBondRadius = defaultAtomRadius * atomScale; // radius for rotatable bonds
// general printer, slicer, print settings
layerHeight=0.15; // must match slicer setting for print-in-place support
clearance=0.3; // sliding clearance - can be smaller (0.2) if not doing print-in-place
pClearance=0.2; // press-fit clearance (magnets for h-bonds)
shim=0.05; // extra to make OpenSCAD surfaces distinct in difference()
nozzleDiameter=0.4;
// need one magnet for each side of hydrogen bond, suggest 3mm x 5mm e.g. from eBay
// use compass to identify poles if you care, North pointing (red) repels compass North pointing
magR=3/2; // magnet radius
magL=5; // magnet length
// for $fn=8 which works nice on fdm printer
oRot = 22.5; // 45/2, rotate to make fn=8 spheres and cylinders flat on build plate
apmFac = cos(180/8); // apothem factor - multiply by radius for center to octagon side distance
octSide = 2* tan(180/8); // multiply by radius to get length of octagon side
// for values of $fn:
fnRot = ($fn ? 90-(180/$fn) : 90-(180/30));
bondLenFac = 0.6; // fraction of bond length to extend from atom for each arm of hedron in join
hblen = 1.97; // hydrogen bond length
wall = 3*nozzleDiameter;
joinerStep = 1; // radius difference between rotatable bond axle and end knob inside bond cylinder
caTop = false; // only make top of N_C-alpha_C hedron plus C-beta (see hedron() and hedron_dispatch() examples)
/*
//
// Generate a sphere to represent an atom.
// Colour and size determined for the atom covalent radius specified by the
// parameter 'a' by lookup in the atomData table below, then scaled by the
// supplied parameter 'scal'.
//
// scal : protein_scale
// clr : additional radius if used to create clearance for rotatable bonds
//
*/
module atom(a,scal,clr=0)
{
ad = atomData[search([a],atomData)[0]];
color(ad[1]) {
rotate([0,0,fnRot]) sphere(r=((ad[2]*atomScale)*scal)+clr);
}
}
/*
//
// a hedron (below) may be 'reversed' in terms of the order of its two bonds;
// this function fixes the ordering
//
*/
function hFlip(h,rev) =
// yes reversed : not reversed
// 0 1 2 3 4 5 6 7 : 0 1 2 3 4 5 6 7
// len1 len3 atom1 atom3 a1 a2 a1-a2 a2-a3 len1 len3 atom1 atom3 a1 a3 a1-a2 a2-a3
(rev ? [ h[2], h[0], h[5], h[3], h[8], h[6], h[10], h[9] ] : [ h[0], h[2], h[3], h[5], h[6], h[8], h[9], h[10] ]);
// h[1] = angle2 for both cases
/*
//
// generate the male or female interior cylinders of a rotating bond
//
*/
module joinUnit(cOuterLen, cOuterRad, cInnerLen, cInnerRad, male=false) {
if (male) {
rotate([0,0,oRot]) {
cylinder(h=cInnerLen, r=cInnerRad, center=false, $fn=8);
cylinder(h=cOuterLen, r=cOuterRad, center=false, $fn=8);
}
} else {
rotate([0,0,fnRot]) {
cylinder(h=cInnerLen, r=cInnerRad, center=false, $fn=30);
cylinder(h=cOuterLen, r=cOuterRad, center=false, $fn=30);
}
}
}
/*
//
// create a rotatable bond
//
// supportSel : 0 for no support, 1 or 2 for support on top or bottom (needed
// for reversed hedra)
//
*/
module joiner(bondlen, scal, male=0, ver=0, supportSelect=0) { // ver = differentiate joiner part lengths to guide assembly, but not used
lenfac = bondLenFac;
jClr = clearance+0.05;
cOuterRad = (jBondRadius * scal) - (2*wall + (male ? jClr/2 : -jClr/2));
cInnerRad = cOuterRad - joinerStep; // m/f jClr already in cOuterRad; - (male ? 0 : -0*jClr/2);
hArmLen = (bondlen * lenfac);
lenClr = 0.6*jClr; // length clearance applied to male and female both, so effective clearance is 2x this value
cOuterLen = hArmLen * lenfac + (ver ? 0.5 : - 0.5) - (wall+ (male ? lenClr*2 : -lenClr*2 ));
joinerOffset = (hArmLen * (1 - lenfac)) + (male ? lenClr : -lenClr) - (ver ? 1 : 0);
i=supportSelect-1;
oside = cOuterRad*octSide;
wid = oside+2*wall+4*jClr+1;
if (male) {
rotate([0,180,0])
translate([0,0,-(bondlen-joinerOffset)]) {
difference() {
joinUnit(cOuterLen, cOuterRad, bondlen, cInnerRad, male=true);
if (supportSelect) {
rotate([0,0,i*180]) {
translate([0,(cOuterRad*apmFac)-0.5*layerHeight,cOuterLen/2]) {
cube([oside+2*shim,layerHeight+shim,cOuterLen+2*shim],center=true);
}
}
}
}
if (supportSelect) {
rotate([0,0,i*180]) {
translate([0,(cOuterRad*apmFac)-0.5*layerHeight,cOuterLen/2]) {
for (j=[0:1]) {
rotate([0,(j?60:-60),0])
cube([wid,layerHeight,2*nozzleDiameter],center=true);
}
}
}
}
}
} else {
translate([0,0,joinerOffset]) {
joinUnit(cOuterLen, cOuterRad, bondlen, cInnerRad);
if (supportSelect) { // extra gap top and bottom because filament sags
supHeight = max(5*layerHeight,2*(cOuterRad-cOuterRad*apmFac)); // double because center=true below
for(j=[0:1]) {
rotate([0,0,j*180]) {
translate([0,(cOuterRad*apmFac),cOuterLen/2]) {
cube([oside+2*shim,supHeight+shim,cOuterLen+2*shim],center=true);
}
}
}
}
}
}
}
/*
//
// create bond with different options (regular, skinny, h-bond atom, rotatable
// male or female
//
// parameters:
// bl : bond length
// br : bond radius
// scal : protein_scale
// key : option symbols defined below
// atm : atomic element symbol, used for color and radius by atom() routine above
// ver : make rotatable bonds slightly different based on value; currently unused
// supporSel : enable print-in-place support for rotatable bonds
//
*/
// option symbols - these names generated in BioPython code so avoid changing without thought
StdBond = 1;
FemaleJoinBond = 2;
MaleJoinBond = 3;
SkinnyBond = 4; // Calpha - Cbeta bond cylinder needs to be skinny for clearance with rotating bonds
HBond = 5; // make room inside atom/bond to insert magnet to appropriate depth
module bond(bl, br, scal, key, atm, ver, supportSel=0) {
br = (key == FemaleJoinBond ? jBondRadius * scal : br) * (key == SkinnyBond ? 0.65 : 1); // bond radius smaller for skinnyBond
bl = (key == FemaleJoinBond ? bl * bondLenFac : bl); // make female joiner shorter
if (key == MaleJoinBond) { // male join is direct solid, others need difference()
joiner(bl, scal, male = true, ver = ver, supportSelect=supportSel);
} else { // regular bond / skinny / h-bond / female join
bhblen = bl +(hblen/2 * scal);
rotate([0,0,fnRot]) {
difference() {
union() {
cylinder(h=bl,r=br,center=false);
if (key == HBond) { // make extension collar for h-bond magnet
rotate([0,0,oRot-fnRot]) cylinder(h=bhblen-1,r=(magR + clearance +wall),center=false, $fn=8);
}
}
atom(atm,scal,-clearance); // remove overlap with atom to clear area for female join
if (key == HBond) { // make space to insert magnet inside bond cylinder
translate([0,0,(bhblen-magL)-pClearance])
cylinder(h=magL+pClearance+shim, r=magR+pClearance, center=false, $fn=8);
}
}
}
}
}
/*
//
// Generate a 'hedron', one plane of 3 points, consisting of 3 atoms joined by
// two bonds.
// Defined as bond length - bond angle - bond length
//
// In some cases the sequence of atoms in the h[] array is reversed (rev flag),
// as detailed in the comments.
//
// other parameters:
//
// h = hedron array data according to rev flag:
// yes reversed : not reversed
// 0 1 2 3 4 5 6 7 : 0 1 2 3 4 5 6 7
// len1 len3 atom1 atom3 a1 a2 a1-a2 a2-a3 len1 len3 atom1 atom3 a1 a3 a1-a2 a2-a3
//
// split: chop half of the hedron - to selectively print parts of a rotating
// bond to be glued together. top or bottom half selected by global caTop
// (C-alpha top) variable, undef by default so bottom half.
//
// supporSel: enable support structure inside rotatable bond to print in place.
// Please note the bond needs to be exactly parallel to the buildplate and the
// layerHeight global variable above needs to be set correctly for the
// structure to be correctly created by your slicer software.
//
*/
module hedron(h,rev=0,scal,split=0, supportSel) {
newh = hFlip(h, rev); // make a consistent hedron array regardless of rev flag
bondRad = bondRadius * scal;
difference() {
union(){
if (h[7]) {
// central atom at 0,0,0
atom(h[4],scal);
}
if (newh[5] && newh[7] != FemaleJoinBond) { // not female join
// comments for non-reversed case
// atom 3 is len3 up on +z
translate([0,0,newh[1]])
difference() {
atom(newh[3],scal * (newh[7] == SkinnyBond ? 0.7 : 1)); // if skinny bond make atom (C-beta) same diameter as bond
if (newh[7] == HBond) { // make room for hbond magnet through atom - this branch not used for backbone N,O
translate([0,0,scal*hblen/2-magL-pClearance])
cylinder(h=magL+pClearance,r=magR+pClearance,$fn=8);
}
}
}
if (newh[7]) {
// atom 2 - atom 3 bond from origin up +z distance len3
bond(newh[1], bondRad, scal, newh[7], h[4], ver=1, supportSel=supportSel);
}
rotate([0, h[1], 0]) { // rotate following elements by angle2 about Y
if (newh[6]) {
bond(newh[0], bondRad, scal, newh[6], h[4], ver=1, supportSel=supportSel); // h[4] is center atom (atom 2)
}
if (newh[4] && newh[6] != FemaleJoinBond) { // if draw atom 2 and atom1-atom2 not joiner
translate([0,0,newh[0]]) {
difference() {
atom(newh[2],scal * (newh[6] == SkinnyBond ? 0.7 : 1)); // put atom1 sphere len1 away on Z
if (newh[6] == HBond) { // make room for hbond magnet through atom
translate([0,0,scal*hblen/2-magL-pClearance])
cylinder(h=magL+pClearance,r=magR+pClearance,$fn=8);
}
}
}
}
}
}
if (split) {
// top / bottom half cutter
thick = 2*bondRadius * scal;
Zdim = newh[0];
Xdim = newh[1];
cside = 7* defaultAtomRadius * atomScale * scal / 12 + (caTop ? pClearance : -pClearance);
difference() {
translate([-Xdim,((rev || caTop) ? 0 : -thick),-Zdim]) {
cube([2*Xdim,thick,2*Zdim]);
}
if (!caTop) {
rotate([0,(rev ? h[1] : 0),0])
rotate([45,0,0])
cube([cside, cside, cside],center=true);
}
}
if (caTop) {
//translate([tx+cside,0,tx+cside])
rotate([0,(rev ? h[1] : 0),0])
rotate([45,0,0])
cube([cside, cside, cside], center=true);
}
}
if (newh[7] == FemaleJoinBond) { // female join
joiner(newh[1], scal, male=false, ver=1, supportSelect=supportSel);
}
if (newh[6] == FemaleJoinBond) { // female join
rotate([0, h[1], 0]) { // rotate following elements by angle2 about Y
joiner(newh[0], scal, male=false, ver=1, supportSelect=supportSel);
translate([0,0,newh[0]])
atom(newh[2],scal+0.5,clearance); // clearance for atom against join outer cylinder
}
}
if (newh[7] == FemaleJoinBond || newh[6] == FemaleJoinBond) { // female join both hedron arms
translate([0,0,newh[1]]) atom(newh[3],scal+0.5,clearance); // clearance for atom against join outer cylinder
}
}
}
/*
//
// Hook to call custom routines for specific hedra.
//
// Residue is h[h_residue]
// Sequence position is h[h_seqpos]
//
*/
module hedronDispatch(h,rev=0,scal) {
// default action is just to pass to hedron()
hedron(h, rev, scal, 0, (support ? 1 : 0));
/*
// Some examples for special handling for specific hedra below:
// note use of h_seqpos, h_residue, h_class for selecting hedra
// bool flag caTop (for rotatable bond part) needs to be a global variable
// so hedron() above can see it.
caBase1 = false; // only make bottom of N_C-alpha_C hedron
caBase2 = false; // same as caBase1 but for case of reversed hedron (for testing, should be identical to caBase1 result)
amideOnly = false; // make only the first amide
if (caTop) {
// these examples select a specific sequence position (h[h_seqpos] == n)
if (h[h_seqpos] == 1) {
if (h[h_class] == "NCAC") {
hedron(h, rev, scal, 1);
} else if (h[h_class] == "CBCAC") {
color("yellow") { // ca-cb
hedron(h, rev, scal);
}
}
}
} else if (caBase1) {
if (h[h_seqpos] == 1 && (h[h_class] == "NCAC")) {
hedron(h, rev, scal, true, (support ? 1 : 0));
}
} else if (caBase2) {
if (h[h_seqpos] == 5 && (h[h_class] == "NCAC")) {
hedron(h, rev, scal, true, (support ? 1 : 0));
}
} else if (amideOnly) {
if (h[h_seqpos] == 1) {
if (h[h_class] == "CACN") {
color("darkgray") {
hedron(h, rev, scal);
}
} else if (h[h_class] == "CACO") {
color("red") { // c=o
hedron(h, rev, scal);
}
} else if (h[h_class] == "CNCA") {
color("cyan") { // h=n
hedron(h, rev, scal);
}
}
} else if ((h[h_seqpos] == 2) && (h[h_class] == "HNCA")) {
color("cyan") { // h=n
hedron(h, rev, scal);
}
}
// actions above select out only a single hedron
} else {
// actions below will process hedra all but handle selected ones differently
if (h[h_class] == "NCAC") {
if (h[h_seqpos] == 1) {
if (! CCap && NCap) { // make split rotatable bond for terminal NH3
hedron(h, rev, scal, true, (support ? 1 : 0));
}
} else if (h[h_seqpos] == 5) { // make split rotatable bond for terminal COOH
hedron(h, rev, scal, true, (support ? 2 : 0)); // note supportSel = 2
} else {
hedron(h, rev, scal, 0, (support ? 2 : 0));
}
} else if (h[h_class] == "CBCAC") {
color("yellow") { // ca-cb -- color yellow in OpenSCAD renderer
if (h[h_seqpos] == 1 ) { // don't make here for N-term
} else if (h[h_seqpos] == 5 ) { // don't make here for C-term
} else {
hedron(h, rev, scal); // otherwise do make here
}
}
} else if (h[h_class] == "HNCA") {
color("cyan") { // color h-n in OenSCAD renderer
if (h[h_seqpos] == 1) {
if (NCap) { // only make at N term if variable NCap is true
hedron(h, rev, scal, 0, (support ? 1 : 0));
}
} else {
hedron(h, rev, scal, 0, (support ? 1 : 0));
}
}
} else if (h[h_residue] == "P") {
color("darkgray") // highlight Prolines in OpenSCAD renderer
hedron(h, rev, scal);
} else {
echo("unrecognised hedron", h[h_class]);
color("pink")
hedron(h, rev, scal, 0, (support ? 1 : 0));
}
}
*/
}
/*
//
// Generate a hedron rotated to specific angle d
//
*/
module d2(d,hedra,scal)
{
tz = (d[d_reversed] ? hedra[d[d_h2ndx]][2] : hedra[d[d_h2ndx]][0]); // get h2 len1 depending on reversed
rotate(d[d_dangle1]) { // 4. rotate h2 to specified dihedral angle
translate([0,0,tz]) { // 3. translate h2 h2:len1 up +z
rotate([180, 0, 0]) { // 2. rotate h2r about X so h2:a3 in +z and h2:a1 in -z
hedronDispatch(hedra[d[d_h2ndx]],(!d[d_reversed]),scal); // 1. reverse hedron 2 orientation = h2r
}
}
}
}
/*
//
// Generate two hedra at specified dihedral angle d
//
*/
module dihedron(d,hedra,scal)
{
if (d[d_h1new])
hedronDispatch(hedra[d[d_h1ndx]],d[d_reversed],scal); // reverse h1 if dihedral reversed
if (d[d_h2new])
d2(d,hedra,scal);
}
/*
//
// Generate a residue consisting of the set of dihedra in the parameter 'r',
// referring to hedra the table specified in the parameter 'hedra'.
//
*/
module residue(r,hedra, scal)
{
for (d = r) {
multmatrix(d[d_dihedralTransform]) {
dihedron(d, hedra, scal);
}
}
}
/*
//
// Generate a chain of residues, each positioned by a supplied
// rotation/translation matrix.
//
*/
module chain(protein)
{
chnD = protein[p_chainData];
c = chnD[c_residues];
dihedra = chnD[c_dihedra];
hedra = chnD[c_hedra];
for (r = c) {
multmatrix(r[r_resTransform]) {
residue(dihedra[r[r_resNdx]],hedra, protein[p_proteinScale]);
}
}
}
/*
//
// OpenSCAD array indices to reference protein data - tied to BioPython code
//
*/
// protein base level
p_pdbid = 0;
p_proteinScale = 1;
p_chainData = 2;
// chain level data
c_chainID = 0;
c_dihedra = 1;
c_hedra = 2;
c_residues = 3;
// hedra definitions
h_len1 = 0;
h_angle2 = 1;
h_len3 = 2;
h_atom1class = 3;
h_atom2class = 4;
h_atom3class = 5;
h_atom1state = 6;
h_atom2state = 7;
h_atom3state = 8;
h_bond1state = 9;
h_bond2state = 10;
h_residue = 11;
h_seqpos = 12; // residue sequence position for first atom in hedra
h_class = 13;
// dihedra specifications for each residue in sequence, dihedral array
d_dangle1 = 0;
d_h1ndx = 1;
d_h2ndx = 2;
d_reversed = 3;
d_h1new = 4;
d_h2new = 5;
d_dihedralTransform = 6;
// residueSet: world transform for each residue in sequence array
r_resNdx = 0;
r_resID = 1;
r_resTransform = 2;
// use single default atom radius for all atoms if tubes = true, else use
// covalent radii from literature
atomData = ( tubes ?
[ ["Csb","green" , defaultAtomRadius], ["Cres","green" , defaultAtomRadius], ["Cdb","green" , defaultAtomRadius],
["Osb","red" , defaultAtomRadius], ["Ores","red" , defaultAtomRadius], ["Odb","red" , defaultAtomRadius],
["Nsb","blue" , defaultAtomRadius], ["Nres","blue" , defaultAtomRadius], ["Ndb","blue" , defaultAtomRadius],
["Hsb","gray" , defaultAtomRadius],
["Ssb","yellow" , defaultAtomRadius] ]
:
// covalent radii from <NAME> : 'Atomic Structures of all the Twenty
// Essential Amino Acids and a Tripeptide, with Bond Lengths as Sums of Atomic
// Covalent Radii' https://arxiv.org/pdf/0804.2488.pdf
[ ["Csb","green" , 0.77], ["Cres","green" , 0.72], ["Cdb","green" , 0.67],
["Osb","red" , 0.67], ["Ores","red" , 0.635], ["Odb","red" , 0.60],
["Nsb","blue" , 0.70], ["Nres","blue" , 0.66], ["Ndb","blue" , 0.62],
["Hsb","gray" , 0.37],
["Ssb","yellow" , 1.04] ]
);
// optionally include protein array data here [ write_SCAD(includeCode=False) ], e.g.:
// include <1rtm.scad>;
// or paste below
""" # noqa
| [
"Bio.PDB.vectors.homog_scale_mtx",
"numpy.dot",
"Bio.File.as_handle",
"Bio.PDB.internal_coords.IC_Chain"
] | [((7667, 7689), 'Bio.PDB.vectors.homog_scale_mtx', 'homog_scale_mtx', (['scale'], {}), '(scale)\n', (7682, 7689), False, 'from Bio.PDB.vectors import homog_scale_mtx\n'), ((9573, 9593), 'Bio.File.as_handle', 'as_handle', (['file', '"""w"""'], {}), "(file, 'w')\n", (9582, 9593), False, 'from Bio.File import as_handle\n'), ((7769, 7821), 'numpy.dot', 'np.dot', (['entity.internal_coord.atomArray[:]', 'scaleMtx'], {}), '(entity.internal_coord.atomArray[:], scaleMtx)\n', (7775, 7821), True, 'import numpy as np\n'), ((7044, 7057), 'Bio.PDB.internal_coords.IC_Chain', 'IC_Chain', (['chn'], {}), '(chn)\n', (7052, 7057), False, 'from Bio.PDB.internal_coords import IC_Residue, IC_Chain\n'), ((7226, 7242), 'Bio.PDB.internal_coords.IC_Chain', 'IC_Chain', (['entity'], {}), '(entity)\n', (7234, 7242), False, 'from Bio.PDB.internal_coords import IC_Residue, IC_Chain\n'), ((8134, 8183), 'numpy.dot', 'np.dot', (['chn.internal_coord.atomArray[:]', 'scaleMtx'], {}), '(chn.internal_coord.atomArray[:], scaleMtx)\n', (8140, 8183), True, 'import numpy as np\n')] |
import numpy as np
import random
import torch.nn.functional as F
import torch
import argparse
import json
def parse_args():
parser = argparse.ArgumentParser(description="Evaluation embedding based on link prediction")
parser.add_argument('--embedding_path', default=None)
parser.add_argument('--edgelist_path', default=None)
parser.add_argument('--idmap_path', default=None)
parser.add_argument('--file_format', default="word2vec", help="File format, choose word2vec or numpy")
return parser.parse_args()
def square_distance(matrix1, matrix2):
if len(matrix1.shape) == 1:
return np.sum((matrix1 - matrix2)**2)
return np.sum((matrix1 - matrix2)**2, dim=1)
def cosine_distance(matrix1, matrix2):
matrix1 = torch.FloatTensor(matrix1)
matrix2 = torch.FloatTensor(matrix2)
distance = (1.0 - F.cosine_similarity(matrix1, matrix2)) / 2
return distance
def statistic_on_distance(embedding, edgelist):
source_node = edgelist[:,0]
target_node = edgelist[:,1]
source_embedding = embedding[source_node]
target_embedding = embedding[target_node]
distance = torch.mean(cosine_distance(source_embedding, target_embedding))
return distance.item()
def link_prediction(embedding, threshold_distance, edgelist):
new_edge_list = []
new_edge_list = [set(ele) for ele in edgelist if set(ele) not in new_edge_list]
embedding = torch.FloatTensor(embedding)
edges = []
for node in range(min(len(embedding), 50)):
print("Start finding neighbors for node: ", node)
embedding_of_node = embedding[node]
cos_distances = cosine_distance(torch.stack([embedding_of_node]*len(embedding)), embedding)
for i, ele in enumerate(cos_distances):
if ele < threshold_distance:
if set([node, i]) not in edges:
edges.append(set([node, i]))
print("start evaluate")
count_true = 0
for edge in edges:
if edge in new_edge_list:
count_true += 1
precision = count_true/len(edges)
recall = count_true/len(new_edge_list)
return precision, recall
def load_embedding(embed_file, id_map, file_format):
if file_format == "word2vec":
try:
with open(embed_file) as fp:
descriptions = fp.readline().split()
if len(descriptions) != 2:
raise Exception("Wrong format")
num_nodes = int(descriptions[0])
dim = int(descriptions[1])
embeddings = np.zeros((num_nodes, dim))
for line in fp:
tokens = line.split()
if len(tokens) != dim + 1:
raise Exception("Wrong format")
feature = np.zeros(dim)
for i in range(dim):
feature[i] = float(tokens[i+1])
embeddings[id_map[tokens[0]]] = feature
fp.close()
except Exception as e:
print(e)
print("The format might be wrong, consider trying --file_format flag with 'numpy' value")
embeddings = None
else:
embeddings = np.load(embed_file)
return embeddings
def load_edgelist(edgelist_file, id_map, file_format):
if file_format == "word2vec":
all_instances = []
with open(edgelist_file) as fp:
for line in fp:
ins = line.split()
all_instances.append([id_map[ins[0]], id_map[ins[1]]])
return np.array(all_instances)
return np.load(edgelist_file)
if __name__ == '__main__':
args = parse_args()
print(args)
id_map = json.loads(open(args.idmap_path, "r").read())
embedding = load_embedding(args.embedding_path, id_map, args.file_format)
edgelist = load_edgelist(args.edgelist_path, id_map, args.file_format)
if embedding is not None:
cosine_similarity = statistic_on_distance(embedding, edgelist)
precision, recall = link_prediction(embedding, cosine_similarity, edgelist)
print("Mean distance between node: ", cosine_similarity)
print("precision: ", precision)
print('recall: ', recall)
| [
"argparse.ArgumentParser",
"torch.nn.functional.cosine_similarity",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.load",
"torch.FloatTensor"
] | [((142, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluation embedding based on link prediction"""'}), "(description=\n 'Evaluation embedding based on link prediction')\n", (165, 231), False, 'import argparse\n'), ((675, 714), 'numpy.sum', 'np.sum', (['((matrix1 - matrix2) ** 2)'], {'dim': '(1)'}), '((matrix1 - matrix2) ** 2, dim=1)\n', (681, 714), True, 'import numpy as np\n'), ((767, 793), 'torch.FloatTensor', 'torch.FloatTensor', (['matrix1'], {}), '(matrix1)\n', (784, 793), False, 'import torch\n'), ((808, 834), 'torch.FloatTensor', 'torch.FloatTensor', (['matrix2'], {}), '(matrix2)\n', (825, 834), False, 'import torch\n'), ((1419, 1447), 'torch.FloatTensor', 'torch.FloatTensor', (['embedding'], {}), '(embedding)\n', (1436, 1447), False, 'import torch\n'), ((3616, 3638), 'numpy.load', 'np.load', (['edgelist_file'], {}), '(edgelist_file)\n', (3623, 3638), True, 'import numpy as np\n'), ((633, 665), 'numpy.sum', 'np.sum', (['((matrix1 - matrix2) ** 2)'], {}), '((matrix1 - matrix2) ** 2)\n', (639, 665), True, 'import numpy as np\n'), ((3211, 3230), 'numpy.load', 'np.load', (['embed_file'], {}), '(embed_file)\n', (3218, 3230), True, 'import numpy as np\n'), ((3581, 3604), 'numpy.array', 'np.array', (['all_instances'], {}), '(all_instances)\n', (3589, 3604), True, 'import numpy as np\n'), ((857, 894), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['matrix1', 'matrix2'], {}), '(matrix1, matrix2)\n', (876, 894), True, 'import torch.nn.functional as F\n'), ((2563, 2589), 'numpy.zeros', 'np.zeros', (['(num_nodes, dim)'], {}), '((num_nodes, dim))\n', (2571, 2589), True, 'import numpy as np\n'), ((2797, 2810), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (2805, 2810), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from collections import OrderedDict
import os
import warnings
import numpy as np
from typing import Any, Callable, Dict, List, Tuple, Union
from pathlib import Path
import matplotlib.pyplot as plt
import torch.cuda as cuda
from sklearn.metrics import accuracy_score
try:
from apex import amp
AMP_AVAILABLE = True
except ModuleNotFoundError:
AMP_AVAILABLE = False
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
# this
from collections import deque
import io
import decord
import IPython.display
from time import sleep, time
from PIL import Image
from threading import Thread
from torchvision.transforms import Compose
from utils_cv.action_recognition.dataset import get_transforms
from ..common.gpu import torch_device, num_devices
from .dataset import VideoDataset
from .references.metrics import accuracy, AverageMeter
# These paramaters are set so that we can use torch hub to download pretrained
# models from the specified repo
TORCH_R2PLUS1D = "moabitcoin/ig65m-pytorch" # From https://github.com/moabitcoin/ig65m-pytorch
MODELS = {
# Model name followed by the number of output classes.
"r2plus1d_34_32_ig65m": 359,
"r2plus1d_34_32_kinetics": 400,
"r2plus1d_34_8_ig65m": 487,
"r2plus1d_34_8_kinetics": 400,
}
class VideoLearner(object):
""" Video recognition learner object that handles training loop and evaluation. """
def __init__(
self,
dataset: VideoDataset = None,
num_classes: int = None, # ie 51 for hmdb51
base_model: str = "ig65m", # or "kinetics"
sample_length: int = None,
) -> None:
""" By default, the Video Learner will use a R2plus1D model. Pass in
a dataset of type Video Dataset and the Video Learner will intialize
the model.
Args:
dataset: the datset to use for this model
num_class: the number of actions/classifications
base_model: the R2plus1D model is based on either ig65m or
kinetics. By default it will use the weights from ig65m since it
tends attain higher results.
"""
# set empty - populated when fit is called
self.results = []
# set num classes
self.num_classes = num_classes
if dataset:
self.dataset = dataset
self.sample_length = self.dataset.sample_length
else:
assert sample_length == 8 or sample_length == 32
self.sample_length = sample_length
self.model, self.model_name = self.init_model(
self.sample_length, base_model, num_classes,
)
@staticmethod
def init_model(
sample_length: int, base_model: str, num_classes: int = None
) -> torchvision.models.video.resnet.VideoResNet:
"""
Initializes the model by loading it using torch's `hub.load`
functionality. Uses the model from TORCH_R2PLUS1D.
Args:
sample_length: Number of consecutive frames to sample from a video (i.e. clip length).
base_model: the R2plus1D model is based on either ig65m or kinetics.
num_classes: the number of classes/actions
Returns:
Load a model from a github repo, with pretrained weights
"""
if base_model not in ("ig65m", "kinetics"):
raise ValueError(
f"Not supported model {base_model}. Should be 'ig65m' or 'kinetics'"
)
# Decide if to use pre-trained weights for DNN trained using 8 or for 32 frames
model_name = f"r2plus1d_34_{sample_length}_{base_model}"
print(f"Loading {model_name} model")
model = torch.hub.load(
TORCH_R2PLUS1D,
model_name,
num_classes=MODELS[model_name],
pretrained=True,
)
# Replace head
if num_classes is not None:
model.fc = nn.Linear(model.fc.in_features, num_classes)
return model, model_name
def freeze(self) -> None:
"""Freeze model except the last layer"""
self._set_requires_grad(False)
for param in self.model.fc.parameters():
param.requires_grad = True
def unfreeze(self) -> None:
"""Unfreeze all layers in model"""
self._set_requires_grad(True)
def _set_requires_grad(self, requires_grad=True) -> None:
""" sets requires grad """
for param in self.model.parameters():
param.requires_grad = requires_grad
def fit(
self,
lr: float,
epochs: int,
model_dir: str = "checkpoints",
model_name: str = None,
momentum: float = 0.95,
weight_decay: float = 0.0001,
mixed_prec: bool = False,
use_one_cycle_policy: bool = False,
warmup_pct: float = 0.3,
lr_gamma: float = 0.1,
lr_step_size: float = None,
grad_steps: int = 2,
save_model: bool = False,
) -> None:
""" The primary fit function """
# set epochs
self.epochs = epochs
# set lr_step_size based on epochs
if lr_step_size is None:
lr_step_size = np.ceil(2 / 3 * self.epochs)
# set model name
if model_name is None:
model_name = self.model_name
os.makedirs(model_dir, exist_ok=True)
data_loaders = {}
data_loaders["train"] = self.dataset.train_dl
data_loaders["valid"] = self.dataset.test_dl
# Move model to gpu before constructing optimizers and amp.initialize
device = torch_device()
self.model.to(device)
count_devices = num_devices()
torch.backends.cudnn.benchmark = True
named_params_to_update = {}
total_params = 0
for name, param in self.model.named_parameters():
total_params += 1
if param.requires_grad:
named_params_to_update[name] = param
print("Params to learn:")
if len(named_params_to_update) == total_params:
print("\tfull network")
else:
for name in named_params_to_update:
print(f"\t{name}")
# create optimizer
optimizer = optim.SGD(
list(named_params_to_update.values()),
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
)
# Use mixed-precision if available
# Currently, only O1 works with DataParallel: See issues https://github.com/NVIDIA/apex/issues/227
if mixed_prec:
# break if not AMP_AVAILABLE
assert AMP_AVAILABLE
# 'O0': Full FP32, 'O1': Conservative, 'O2': Standard, 'O3': Full FP16
self.model, optimizer = amp.initialize(
self.model,
optimizer,
opt_level="O1",
loss_scale="dynamic",
# keep_batchnorm_fp32=True doesn't work on 'O1'
)
# Learning rate scheduler
if use_one_cycle_policy:
# Use warmup with the one-cycle policy
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=lr,
total_steps=self.epochs,
pct_start=warmup_pct,
base_momentum=0.9 * momentum,
max_momentum=momentum,
)
else:
# Simple step-decay
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=lr_step_size, gamma=lr_gamma,
)
# DataParallel after amp.initialize
model = (
nn.DataParallel(self.model) if count_devices > 1 else self.model
)
criterion = nn.CrossEntropyLoss().to(device)
# set num classes
topk = 5
if topk >= self.num_classes:
topk = self.num_classes
for e in range(1, self.epochs + 1):
print(
f"Epoch {e} ========================================================="
)
print(f"lr={scheduler.get_lr()}")
self.results.append(
self.train_an_epoch(
model,
data_loaders,
device,
criterion,
optimizer,
grad_steps=grad_steps,
mixed_prec=mixed_prec,
topk=topk,
)
)
scheduler.step()
if save_model:
self.save(
os.path.join(
model_dir,
"{model_name}_{self.epoch}.pt".format(
model_name=model_name, epoch=str(e).zfill(3),
),
)
)
self.plot_precision_loss_curves()
@staticmethod
def train_an_epoch(
model,
data_loaders,
device,
criterion,
optimizer,
grad_steps: int = 1,
mixed_prec: bool = False,
topk: int = 5,
) -> Dict[str, Any]:
"""Train / validate a model for one epoch.
Args:
model: the model to use to train
data_loaders: dict {'train': train_dl, 'valid': valid_dl}
device: gpu or not
criterion: TODO
optimizer: TODO
grad_steps: If > 1, use gradient accumulation. Useful for larger batching
mixed_prec: If True, use FP16 + FP32 mixed precision via NVIDIA apex.amp
topk: top k classes
Return:
dict {
'train/time': batch_time.avg,
'train/loss': losses.avg,
'train/top1': top1.avg,
'train/top5': top5.avg,
'valid/time': ...
}
"""
if mixed_prec and not AMP_AVAILABLE:
warnings.warn(
"""
NVIDIA apex module is not installed. Cannot use
mixed-precision. Turning off mixed-precision.
"""
)
mixed_prec = False
result = OrderedDict()
for phase in ["train", "valid"]:
# switch mode
if phase == "train":
model.train()
else:
model.eval()
# set loader
dl = data_loaders[phase]
# collect metrics
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time()
for step, (inputs, target) in enumerate(dl, start=1):
if step % 10 == 0:
print(f" Phase {phase}: batch {step} of {len(dl)}")
inputs = inputs.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.set_grad_enabled(phase == "train"):
# compute output
outputs = model(inputs)
loss = criterion(outputs, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, target, topk=(1, topk))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
if phase == "train":
# make the accumulated gradient to be the same scale as without the accumulation
loss = loss / grad_steps
if mixed_prec:
with amp.scale_loss(
loss, optimizer
) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if step % grad_steps == 0:
optimizer.step()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time() - end)
end = time()
print(f"{phase} took {batch_time.sum:.2f} sec ", end="| ")
print(f"loss = {losses.avg:.4f} ", end="| ")
print(f"top1_acc = {top1.avg:.4f} ", end=" ")
if topk >= 5:
print(f"| top5_acc = {top5.avg:.4f}", end="")
print()
result[f"{phase}/time"] = batch_time.sum
result[f"{phase}/loss"] = losses.avg
result[f"{phase}/top1"] = top1.avg
result[f"{phase}/top5"] = top5.avg
return result
def plot_precision_loss_curves(
self, figsize: Tuple[int, int] = (10, 5)
) -> None:
""" Plot training loss and accuracy from calling `fit` on the test set. """
assert len(self.results) > 0
fig = plt.figure(figsize=figsize)
valid_losses = [dic["valid/loss"] for dic in self.results]
valid_top1 = [float(dic["valid/top1"]) for dic in self.results]
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_xlim([0, self.epochs - 1])
ax1.set_xticks(range(0, self.epochs))
ax1.set_xlabel("epochs")
ax1.set_ylabel("loss", color="g")
ax1.plot(valid_losses, "g-")
ax2 = ax1.twinx()
ax2.set_ylabel("top1 %acc", color="b")
ax2.plot(valid_top1, "b-")
fig.suptitle("Loss and Average Precision (AP) over Epochs")
def evaluate(
self,
num_samples: int = 10,
report_every: int = 100,
train_or_test: str = "test",
) -> None:
""" eval code for validation/test set and saves the evaluation results in self.results.
Args:
num_samples: number of samples (clips) of the validation set to test
report_every: print line of results every n times
train_or_test: use train or test set
"""
# asset train or test valid
assert train_or_test in ["train", "test"]
# set device and num_gpus
num_gpus = num_devices()
device = torch_device()
torch.backends.cudnn.benchmark = True if cuda.is_available() else False
# init model with gpu (or not)
self.model.to(device)
if num_gpus > 1:
self.model = nn.DataParallel(model)
self.model.eval()
# set train or test
ds = (
self.dataset.test_ds
if train_or_test == "test"
else self.dataset.train_ds
)
# set num_samples
ds.dataset.num_samples = num_samples
print(
f"{len(self.dataset.test_ds)} samples of {self.dataset.test_ds[0][0][0].shape}"
)
# Loop over all examples in the test set and compute accuracies
ret = dict(
infer_times=[],
video_preds=[],
video_trues=[],
clip_preds=[],
clip_trues=[],
)
report_every = 100
# inference
with torch.no_grad():
for i in range(
1, len(ds)
): # [::10]: # Skip some examples to speed up accuracy computation
if i % report_every == 0:
print(
f"Processsing {i} of {len(self.dataset.test_ds)} samples.."
)
# Get model inputs
inputs, label = ds[i]
inputs = inputs.to(device, non_blocking=True)
# Run inference
start_time = time()
outputs = self.model(inputs)
outputs = outputs.cpu().numpy()
infer_time = time() - start_time
ret["infer_times"].append(infer_time)
# Store results
ret["video_preds"].append(outputs.sum(axis=0).argmax())
ret["video_trues"].append(label)
ret["clip_preds"].extend(outputs.argmax(axis=1))
ret["clip_trues"].extend([label] * num_samples)
print(
f"Avg. inference time per video ({len(ds)} clips) =",
round(np.array(ret["infer_times"]).mean() * 1000, 2),
"ms",
)
print(
"Video prediction accuracy =",
round(accuracy_score(ret["video_trues"], ret["video_preds"]), 2),
)
print(
"Clip prediction accuracy =",
round(accuracy_score(ret["clip_trues"], ret["clip_preds"]), 2),
)
return ret
def _predict(self, frames, transform):
"""Runs prediction on frames applying transforms before predictions."""
clip = torch.from_numpy(np.array(frames))
# Transform frames and append batch dim
sample = torch.unsqueeze(transform(clip), 0)
sample = sample.to(torch_device())
output = self.model(sample)
scores = nn.functional.softmax(output, dim=1).data.cpu().numpy()[0]
return scores
def _filter_labels(
self,
id_score_dict: dict,
labels: List[str],
threshold: float = 0.0,
target_labels: List[str] = None,
filter_labels: List[str] = None,
) -> Dict[str, int]:
""" Given the predictions, filter out the noise based on threshold,
target labels and filter labels.
Arg:
id_score_dict: dictionary of predictions
labels: all labels
threshold: the min threshold to keep prediction
target_labels: exclude any labels not in target labels
filter_labels: exclude any labels in filter labels
Returns
A dictionary of labels and scores
"""
# Show only interested actions (target_labels) with a confidence score >= threshold
result = {}
for i, s in id_score_dict.items():
label = labels[i]
if (
(s < threshold)
or (target_labels is not None and label not in target_labels)
or (filter_labels is not None and label in filter_labels)
):
continue
if label in result:
result[label] += s
else:
result[label] = s
return result
def predict_frames(
self,
window: deque,
scores_cache: deque,
scores_sum: np.ndarray,
is_ready: list,
averaging_size: int,
score_threshold: float,
labels: List[str],
target_labels: List[str],
transforms: Compose,
update_println: Callable,
) -> None:
""" Predicts frames """
# set model device and to eval mode
self.model.to(torch_device())
self.model.eval()
# score
t = time()
scores = self._predict(window, transforms)
dur = time() - t
# Averaging scores across clips (dense prediction)
scores_cache.append(scores)
scores_sum += scores
if len(scores_cache) == averaging_size:
scores_avg = scores_sum / averaging_size
if len(labels) >= 5:
num_labels = 5
else:
num_labels = len(labels) - 1
top5_id_score_dict = {
i: scores_avg[i]
for i in (-scores_avg).argpartition(num_labels - 1)[
:num_labels
]
}
top5_label_score_dict = self._filter_labels(
top5_id_score_dict,
labels,
threshold=score_threshold,
target_labels=target_labels,
)
top5 = sorted(top5_label_score_dict.items(), key=lambda kv: -kv[1])
# fps and preds
println = (
f"{1 // dur} fps"
+ "<p style='font-size:20px'>"
+ "<br>".join([f"{k} ({v:.3f})" for k, v in top5])
+ "</p>"
)
# Plot final results nicely
update_println(println)
scores_sum -= scores_cache.popleft()
# Inference done. Ready to run on the next frames.
window.popleft()
if is_ready:
is_ready[0] = True
def predict_video(
self,
video_fpath: str,
labels: List[str] = None,
averaging_size: int = 5,
score_threshold: float = 0.025,
target_labels: List[str] = None,
transforms: Compose = None,
) -> None:
"""Load video and show frames and inference results while displaying the results
"""
# set up video reader
video_reader = decord.VideoReader(video_fpath)
print(f"Total frames = {len(video_reader)}")
# set up ipython jupyter display
d_video = IPython.display.display("", display_id=1)
d_caption = IPython.display.display("Preparing...", display_id=2)
# set vars
is_ready = [True]
window = deque()
scores_cache = deque()
# use labels if given, else see if we have labels from our dataset
if not labels:
if self.dataset.classes:
labels = self.dataset.classes
else:
raise ("No labels found, add labels argument.")
scores_sum = np.zeros(len(labels))
# set up transforms
if not transforms:
transforms = get_transforms(train=False)
# set up print function
def update_println(println):
d_caption.update(IPython.display.HTML(println))
while True:
try:
frame = video_reader.next().asnumpy()
if len(frame.shape) != 3:
break
# Start an inference thread when ready
if is_ready[0]:
window.append(frame)
if len(window) == self.sample_length:
is_ready[0] = False
Thread(
target=self.predict_frames,
args=(
window,
scores_cache,
scores_sum,
is_ready,
averaging_size,
score_threshold,
labels,
target_labels,
transforms,
update_println,
),
).start()
# Show video preview
f = io.BytesIO()
im = Image.fromarray(frame)
im.save(f, "jpeg")
# resize frames to avoid flicker for windows
w, h = frame.shape[0], frame.shape[1]
scale = 300.0 / max(w, h)
w = round(w * scale)
h = round(h * scale)
im = im.resize((h, w))
d_video.update(IPython.display.Image(data=f.getvalue()))
sleep(0.03)
except Exception:
break
def save(self, model_path: Union[Path, str]) -> None:
""" Save the model to a path on disk. """
torch.save(self.model.state_dict(), model_path)
def load(self, model_name: str, model_dir: str = "checkpoints") -> None:
"""
TODO accept epoch. If None, load the latest model.
:param model_name: Model name format should be 'name_0EE' where E is the epoch
:param model_dir: By default, 'checkpoints'
:return:
"""
self.model.load_state_dict(
torch.load(os.path.join(model_dir, f"{model_name}.pt"))
)
| [
"apex.amp.scale_loss",
"torch.nn.CrossEntropyLoss",
"io.BytesIO",
"time.sleep",
"numpy.array",
"apex.amp.initialize",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"utils_cv.action_recognition.dataset.get_transforms",
"collections.deque",
"torch.set_grad_enabled",
"decord.VideoRead... | [((3786, 3881), 'torch.hub.load', 'torch.hub.load', (['TORCH_R2PLUS1D', 'model_name'], {'num_classes': 'MODELS[model_name]', 'pretrained': '(True)'}), '(TORCH_R2PLUS1D, model_name, num_classes=MODELS[model_name],\n pretrained=True)\n', (3800, 3881), False, 'import torch\n'), ((5409, 5446), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (5420, 5446), False, 'import os\n'), ((10244, 10257), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10255, 10257), False, 'from collections import OrderedDict\n'), ((13044, 13071), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13054, 13071), True, 'import matplotlib.pyplot as plt\n'), ((18930, 18936), 'time.time', 'time', ([], {}), '()\n', (18934, 18936), False, 'from time import sleep, time\n'), ((20789, 20820), 'decord.VideoReader', 'decord.VideoReader', (['video_fpath'], {}), '(video_fpath)\n', (20807, 20820), False, 'import decord\n'), ((21113, 21120), 'collections.deque', 'deque', ([], {}), '()\n', (21118, 21120), False, 'from collections import deque\n'), ((21144, 21151), 'collections.deque', 'deque', ([], {}), '()\n', (21149, 21151), False, 'from collections import deque\n'), ((4020, 4064), 'torch.nn.Linear', 'nn.Linear', (['model.fc.in_features', 'num_classes'], {}), '(model.fc.in_features, num_classes)\n', (4029, 4064), True, 'import torch.nn as nn\n'), ((5273, 5301), 'numpy.ceil', 'np.ceil', (['(2 / 3 * self.epochs)'], {}), '(2 / 3 * self.epochs)\n', (5280, 5301), True, 'import numpy as np\n'), ((6845, 6920), 'apex.amp.initialize', 'amp.initialize', (['self.model', 'optimizer'], {'opt_level': '"""O1"""', 'loss_scale': '"""dynamic"""'}), "(self.model, optimizer, opt_level='O1', loss_scale='dynamic')\n", (6859, 6920), False, 'from apex import amp\n'), ((7207, 7373), 'torch.optim.lr_scheduler.OneCycleLR', 'torch.optim.lr_scheduler.OneCycleLR', (['optimizer'], {'max_lr': 'lr', 'total_steps': 'self.epochs', 'pct_start': 'warmup_pct', 'base_momentum': '(0.9 * momentum)', 'max_momentum': 'momentum'}), '(optimizer, max_lr=lr, total_steps=self.\n epochs, pct_start=warmup_pct, base_momentum=0.9 * momentum,\n max_momentum=momentum)\n', (7242, 7373), False, 'import torch\n'), ((7546, 7633), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'lr_step_size', 'gamma': 'lr_gamma'}), '(optimizer, step_size=lr_step_size, gamma=\n lr_gamma)\n', (7577, 7633), False, 'import torch\n'), ((7735, 7762), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.model'], {}), '(self.model)\n', (7750, 7762), True, 'import torch.nn as nn\n'), ((10000, 10174), 'warnings.warn', 'warnings.warn', (['"""\n NVIDIA apex module is not installed. Cannot use\n mixed-precision. Turning off mixed-precision.\n """'], {}), '(\n """\n NVIDIA apex module is not installed. Cannot use\n mixed-precision. Turning off mixed-precision.\n """\n )\n', (10013, 10174), False, 'import warnings\n'), ((10692, 10698), 'time.time', 'time', ([], {}), '()\n', (10696, 10698), False, 'from time import sleep, time\n'), ((14327, 14346), 'torch.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (14344, 14346), True, 'import torch.cuda as cuda\n'), ((14478, 14500), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (14493, 14500), True, 'import torch.nn as nn\n'), ((15183, 15198), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15196, 15198), False, 'import torch\n'), ((16835, 16851), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (16843, 16851), True, 'import numpy as np\n'), ((19002, 19008), 'time.time', 'time', ([], {}), '()\n', (19006, 19008), False, 'from time import sleep, time\n'), ((21540, 21567), 'utils_cv.action_recognition.dataset.get_transforms', 'get_transforms', ([], {'train': '(False)'}), '(train=False)\n', (21554, 21567), False, 'from utils_cv.action_recognition.dataset import get_transforms\n'), ((7831, 7852), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7850, 7852), True, 'import torch.nn as nn\n'), ((15709, 15715), 'time.time', 'time', ([], {}), '()\n', (15713, 15715), False, 'from time import sleep, time\n'), ((16447, 16501), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["ret['video_trues']", "ret['video_preds']"], {}), "(ret['video_trues'], ret['video_preds'])\n", (16461, 16501), False, 'from sklearn.metrics import accuracy_score\n'), ((16592, 16644), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["ret['clip_trues']", "ret['clip_preds']"], {}), "(ret['clip_trues'], ret['clip_preds'])\n", (16606, 16644), False, 'from sklearn.metrics import accuracy_score\n'), ((22783, 22795), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (22793, 22795), False, 'import io\n'), ((22817, 22839), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (22832, 22839), False, 'from PIL import Image\n'), ((23236, 23247), 'time.sleep', 'sleep', (['(0.03)'], {}), '(0.03)\n', (23241, 23247), False, 'from time import sleep, time\n'), ((23841, 23884), 'os.path.join', 'os.path.join', (['model_dir', 'f"""{model_name}.pt"""'], {}), "(model_dir, f'{model_name}.pt')\n", (23853, 23884), False, 'import os\n'), ((11018, 11058), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (11040, 11058), False, 'import torch\n'), ((12285, 12291), 'time.time', 'time', ([], {}), '()\n', (12289, 12291), False, 'from time import sleep, time\n'), ((15838, 15844), 'time.time', 'time', ([], {}), '()\n', (15842, 15844), False, 'from time import sleep, time\n'), ((12245, 12251), 'time.time', 'time', ([], {}), '()\n', (12249, 12251), False, 'from time import sleep, time\n'), ((16295, 16323), 'numpy.array', 'np.array', (["ret['infer_times']"], {}), "(ret['infer_times'])\n", (16303, 16323), True, 'import numpy as np\n'), ((11777, 11808), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (11791, 11808), False, 'from apex import amp\n'), ((17050, 17086), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (17071, 17086), True, 'import torch.nn as nn\n'), ((22113, 22290), 'threading.Thread', 'Thread', ([], {'target': 'self.predict_frames', 'args': '(window, scores_cache, scores_sum, is_ready, averaging_size,\n score_threshold, labels, target_labels, transforms, update_println)'}), '(target=self.predict_frames, args=(window, scores_cache, scores_sum,\n is_ready, averaging_size, score_threshold, labels, target_labels,\n transforms, update_println))\n', (22119, 22290), False, 'from threading import Thread\n')] |
import numpy as np
from .geometry import Point, Rectangle, Circle
from typing import Union
import copy
class Entity:
def __init__(self, center: Point, heading: float, movable: bool = True, friction: float = 0):
self.center = center # this is x, y
self.heading = heading
self.movable = movable
self.color = 'ghost white'
self.collidable = True
if movable:
self.friction = friction
self.velocity = Point(0,0) # this is xp, yp
self.acceleration = 0 # this is vp (or speedp)
self.angular_velocity = 0 # this is headingp
self.inputSteering = 0
self.inputAcceleration = 0
self.max_speed = np.inf
self.min_speed = 0
@property
def speed(self) -> float:
return self.velocity.norm(p = 2) if self.movable else 0
def set_control(self, inputSteering: float, inputAcceleration: float):
self.inputSteering = inputSteering
self.inputAcceleration = inputAcceleration
def tick(self, dt: float):
if self.movable:
speed = self.speed
heading = self.heading
new_angular_velocity = speed * self.inputSteering
new_acceleration = self.inputAcceleration - self.friction * speed
new_heading = heading + (self.angular_velocity + new_angular_velocity) * dt / 2.
new_speed = np.clip(speed + (self.acceleration + new_acceleration) * dt / 2., self.min_speed, self.max_speed)
new_velocity = Point(((speed + new_speed) / 2.) * np.cos((new_heading + heading) / 2.),
((speed + new_speed) / 2.) * np.sin((new_heading + heading) / 2.))
new_center = self.center + (self.velocity + new_velocity) * dt / 2.
self.center = new_center
self.heading = new_heading
self.velocity = new_velocity
self.acceleration = new_acceleration
self.angular_velocity = new_angular_velocity
self.buildGeometry()
def collidesWith(self, other) -> bool:
raise NotImplementedError
def buildGeometry(self): # builds the obj
raise NotImplementedError
def collidesWith(self, other: Union['Point','Entity']) -> bool:
if isinstance(other, Entity):
return self.obj.intersectsWith(other.obj)
elif isinstance(other, Point):
return self.obj.intersectsWith(other)
else:
raise NotImplementedError
def distanceTo(self, other: Union['Point','Entity']) -> float:
if isinstance(other, Entity):
return self.obj.distanceTo(other.obj)
elif isinstance(other, Point):
return self.obj.distanceTo(other)
else:
raise NotImplementedError
def copy(self):
return copy.deepcopy(self)
@property
def x(self):
return self.center.x
@property
def y(self):
return self.center.y
@property
def xp(self):
return self.velocity.x
@property
def yp(self):
return self.velocity.y
class RectangleEntity(Entity):
def __init__(self, center: Point, heading: float, size: Point, movable: bool = True, friction: float = 0):
super(RectangleEntity, self).__init__(center, heading, movable, friction)
self.size = size
self.buildGeometry()
@property
def edge_centers(self):
edge_centers = np.zeros((4,2), dtype=np.float32)
x = self.center.x
y = self.center.y
w = self.size.x
h = self.size.y
edge_centers[0] = [x + w / 2. * np.cos(self.heading), y + w / 2. * np.sin(self.heading)]
edge_centers[1] = [x - h / 2. * np.sin(self.heading), y + h / 2. * np.cos(self.heading)]
edge_centers[2] = [x - w / 2. * np.cos(self.heading), y - w / 2. * np.sin(self.heading)]
edge_centers[3] = [x + h / 2. * np.sin(self.heading), y - h / 2. * np.cos(self.heading)]
return edge_centers
@property
def corners(self):
ec = self.edge_centers
c = np.array([self.center.x, self.center.y])
corners = []
corners.append(Point(*(ec[1] + ec[0] - c)))
corners.append(Point(*(ec[2] + ec[1] - c)))
corners.append(Point(*(ec[3] + ec[2] - c)))
corners.append(Point(*(ec[0] + ec[3] - c)))
return corners
def buildGeometry(self):
C = self.corners
self.obj = Rectangle(*C[:-1])
class CircleEntity(Entity):
def __init__(self, center: Point, heading: float, radius: float, movable: bool = True, friction: float = 0):
super(CircleEntity, self).__init__(center, heading, movable, friction)
self.radius = radius
self.buildGeometry()
def buildGeometry(self):
self.obj = Circle(self.center, self.radius)
| [
"numpy.clip",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"copy.deepcopy",
"numpy.sin"
] | [((2457, 2476), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (2470, 2476), False, 'import copy\n'), ((2997, 3031), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': 'np.float32'}), '((4, 2), dtype=np.float32)\n', (3005, 3031), True, 'import numpy as np\n'), ((3558, 3598), 'numpy.array', 'np.array', (['[self.center.x, self.center.y]'], {}), '([self.center.x, self.center.y])\n', (3566, 3598), True, 'import numpy as np\n'), ((1227, 1330), 'numpy.clip', 'np.clip', (['(speed + (self.acceleration + new_acceleration) * dt / 2.0)', 'self.min_speed', 'self.max_speed'], {}), '(speed + (self.acceleration + new_acceleration) * dt / 2.0, self.\n min_speed, self.max_speed)\n', (1234, 1330), True, 'import numpy as np\n'), ((1382, 1419), 'numpy.cos', 'np.cos', (['((new_heading + heading) / 2.0)'], {}), '((new_heading + heading) / 2.0)\n', (1388, 1419), True, 'import numpy as np\n'), ((1458, 1495), 'numpy.sin', 'np.sin', (['((new_heading + heading) / 2.0)'], {}), '((new_heading + heading) / 2.0)\n', (1464, 1495), True, 'import numpy as np\n'), ((3141, 3161), 'numpy.cos', 'np.cos', (['self.heading'], {}), '(self.heading)\n', (3147, 3161), True, 'import numpy as np\n'), ((3176, 3196), 'numpy.sin', 'np.sin', (['self.heading'], {}), '(self.heading)\n', (3182, 3196), True, 'import numpy as np\n'), ((3232, 3252), 'numpy.sin', 'np.sin', (['self.heading'], {}), '(self.heading)\n', (3238, 3252), True, 'import numpy as np\n'), ((3267, 3287), 'numpy.cos', 'np.cos', (['self.heading'], {}), '(self.heading)\n', (3273, 3287), True, 'import numpy as np\n'), ((3323, 3343), 'numpy.cos', 'np.cos', (['self.heading'], {}), '(self.heading)\n', (3329, 3343), True, 'import numpy as np\n'), ((3358, 3378), 'numpy.sin', 'np.sin', (['self.heading'], {}), '(self.heading)\n', (3364, 3378), True, 'import numpy as np\n'), ((3414, 3434), 'numpy.sin', 'np.sin', (['self.heading'], {}), '(self.heading)\n', (3420, 3434), True, 'import numpy as np\n'), ((3449, 3469), 'numpy.cos', 'np.cos', (['self.heading'], {}), '(self.heading)\n', (3455, 3469), True, 'import numpy as np\n')] |
import unittest
import tlpy.defect
import tlpy.host
import numpy as np
from unittest.mock import Mock
class DefectTestCase( unittest.TestCase ):
"""Test for `defect.py`"""
def setUp( self ):
elemental_energies = { 'Ge' : -4.48604,
'P' : -5.18405,
'O' : -4.54934575 }
self.host = tlpy.host.Host( energy = -2884.79313425,
vbm = 0.4657,
cbm = 4.0154,
elemental_energies = elemental_energies,
correction_scaling = 0.099720981 )
self.name = 'V_O1'
self.stoichiometry = { 'O' : -1 }
self.site = 'O'
self.defect = tlpy.defect.Defect( self.name, self.stoichiometry, self.host, self.site )
qs0 = self.defect.add_charge_state( 0, -2876.05861202 )
qs1 = self.defect.add_charge_state( +1, -2877.36415986 )
qs2 = self.defect.add_charge_state( +2, -2880.33856625 )
self.charge_states = [ qs0, qs1, qs2 ]
def test_is_defect_initialised( self ):
"""Checking Defect object is correctly initialised"""
self.assertEqual( self.defect.name, self.name )
self.assertEqual( self.defect.stoichiometry, self.stoichiometry )
self.assertEqual( self.defect.host, self.host )
self.assertEqual( self.defect.site, self.site )
def test_add_charge_state( self ):
"""Checking adding a Defect_Charge_State object to a Defect object"""
q = 0
energy = -2876.05861202
self.defect.add_charge_state( q, energy )
self.assertEqual( self.defect.charge_state[ q ].charge, q )
self.assertEqual( self.defect.charge_state[ q ].energy, energy )
def test_charge_state_at_fermi_energy( self ):
"""Lowest formation energy charge state at a specific Fermi energy"""
self.assertEqual( self.defect.charge_state_at_fermi_energy( 0.0 ), self.charge_states[ 2 ] )
self.assertEqual( self.defect.charge_state_at_fermi_energy( 3.0 ), self.charge_states[ 0 ] )
def test_charge_state_list( self ):
"""List of charge states returned"""
self.assertEqual( self.defect.charge_state_list(), [ cs.charge for cs in self.charge_states ] )
def test_transition_level( self ):
"""Calculate the transition level for two charge states"""
tl = self.defect.transition_level( 0, 2, delta_mu = { 'O' : 0 } )
self.assertAlmostEqual( tl[0], 1.474835153 )
self.assertAlmostEqual( tl[1], 4.185176480 )
def test_transition_level_missing_mu( self ):
"""Raise KeyError if the correct chemical potential is missing in a transition level calculation"""
self.assertRaises( KeyError, self.defect.transition_level, 0, 2, { 'Ti' : 0 } )
def test_transition_level_profile( self ):
"""Calculate the set of points that give the transition level plot for this defect"""
tl_profile = self.defect.tl_profile( { 'O' : 0 }, 0.0, 3.0 )
expected_profile = [ [ 0.0, 1.23550617],
[ 1.47483515, 4.18517648],
[ 3.0, 4.18517648] ]
self.assertTrue( np.allclose( tl_profile, expected_profile ) )
def test_xmgrace_output_generated_correctly( self ):
self.defect.name = 'name'
self.defect.tl_profile = Mock( return_value = np.array( [[1,2],[3,4],[5,6]] ) )
xmgrace_output = self.defect.xmgrace_output( delta_mu = { 'O' : 0 } )
self.assertEqual( xmgrace_output, "# name\n1 2\n3 4\n5 6\n" )
def test_xmgrace_calls_tl_profile_correctly( self ):
self.defect.tl_profile = Mock( return_value = np.array( [[1,2],[3,4],[5,6]] ) )
self.defect.host.fundamental_gap = 1.0
self.defect.xmgrace_output( delta_mu = { 'O' : 0 } )
self.defect.tl_profile.assert_called_with( { 'O' : 0 }, 0.0, 1.0 )
def test_xmgrace_calls_tl_profile_correctly_with_optional_arguments( self ):
self.defect.tl_profile = Mock( return_value = np.array( [[1,2],[3,4],[5,6]] ) )
self.defect.xmgrace_output( delta_mu = { 'O' : 0 }, ef_min = 0.5, ef_max = 1.5 )
self.defect.tl_profile.assert_called_with( { 'O' : 0 }, 0.5, 1.5 )
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"numpy.allclose"
] | [((4321, 4336), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4334, 4336), False, 'import unittest\n'), ((3252, 3293), 'numpy.allclose', 'np.allclose', (['tl_profile', 'expected_profile'], {}), '(tl_profile, expected_profile)\n', (3263, 3293), True, 'import numpy as np\n'), ((3444, 3478), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (3452, 3478), True, 'import numpy as np\n'), ((3738, 3772), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (3746, 3772), True, 'import numpy as np\n'), ((4091, 4125), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (4099, 4125), True, 'import numpy as np\n')] |
import numpy as np
def spiral_data(points, classes):
# https://cs231n.github.io/neural-networks-case-study/
X = np.zeros((points*classes, 2))
y = np.zeros(points*classes, dtype='uint8')
for class_number in range(classes):
ix = range(points*class_number, points*(class_number+1))
r = np.linspace(0.0, 1, points) # radius
t = np.linspace(class_number*4, (class_number+1)*4,
points) + np.random.randn(points)*0.2
X[ix] = np.c_[r*np.sin(t*2.5), r*np.cos(t*2.5)]
y[ix] = class_number
return X, y
'''
import matplotlib.pyplot as plt
x, y = spiral_data(1000, 3)
plt.scatter(x[:, 0], x[:, 1], c=y)
plt.show()
'''
| [
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.random.randn"
] | [((127, 158), 'numpy.zeros', 'np.zeros', (['(points * classes, 2)'], {}), '((points * classes, 2))\n', (135, 158), True, 'import numpy as np\n'), ((166, 207), 'numpy.zeros', 'np.zeros', (['(points * classes)'], {'dtype': '"""uint8"""'}), "(points * classes, dtype='uint8')\n", (174, 207), True, 'import numpy as np\n'), ((326, 353), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', 'points'], {}), '(0.0, 1, points)\n', (337, 353), True, 'import numpy as np\n'), ((377, 438), 'numpy.linspace', 'np.linspace', (['(class_number * 4)', '((class_number + 1) * 4)', 'points'], {}), '(class_number * 4, (class_number + 1) * 4, points)\n', (388, 438), True, 'import numpy as np\n'), ((460, 483), 'numpy.random.randn', 'np.random.randn', (['points'], {}), '(points)\n', (475, 483), True, 'import numpy as np\n'), ((513, 528), 'numpy.sin', 'np.sin', (['(t * 2.5)'], {}), '(t * 2.5)\n', (519, 528), True, 'import numpy as np\n'), ((530, 545), 'numpy.cos', 'np.cos', (['(t * 2.5)'], {}), '(t * 2.5)\n', (536, 545), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from utils import *
import lr_funcs as lr
XTrain_Orig, YTrain, XTest_Orig, YTest, classes = load_datasets(False)
XTrain = lr.reshape_features(XTrain_Orig)/255.
XTest = lr.reshape_features(XTest_Orig)/255.
# print_stats(XTrain_Orig, YTrain, XTest_Orig, YTest)
def test_learning_rates():
learning_rates = [0.01, 0.001, 0.0001,.005]
for i in (learning_rates):
x = lr.model(XTrain,YTrain, XTest, YTest, num_iterations=5000 , learning_rate=i, print_cost=False)
plt.plot(np.squeeze(x['costs']), label=str(i))
print(f'Learning Rate : {i}, Train Accuracy : {x["train_acc"]}, Test Accuracy : {x["test_acc"]}')
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
# d = lr.model(XTrain,YTrain, XTest, YTest, num_iterations=2000 , learning_rate=.005, print_cost=True)
# costs = np.squeeze(d['costs'])
# plt.plot(costs)
# plt.ylabel('Cost')
# plt.xlabel('Iterations [per 100')
# plt.title(f'Learning Rate : {d["learning_rate"]}, # of iterations : {d["num_iterations"]}')
# plt.show()
test_learning_rates() | [
"lr_funcs.reshape_features",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.squeeze",
"lr_funcs.model",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((176, 208), 'lr_funcs.reshape_features', 'lr.reshape_features', (['XTrain_Orig'], {}), '(XTrain_Orig)\n', (195, 208), True, 'import lr_funcs as lr\n'), ((222, 253), 'lr_funcs.reshape_features', 'lr.reshape_features', (['XTest_Orig'], {}), '(XTest_Orig)\n', (241, 253), True, 'import lr_funcs as lr\n'), ((694, 712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (704, 712), True, 'import matplotlib.pyplot as plt\n'), ((717, 752), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations (hundreds)"""'], {}), "('iterations (hundreds)')\n", (727, 752), True, 'import matplotlib.pyplot as plt\n'), ((767, 810), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'shadow': '(True)'}), "(loc='upper center', shadow=True)\n", (777, 810), True, 'import matplotlib.pyplot as plt\n'), ((878, 888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (886, 888), True, 'import matplotlib.pyplot as plt\n'), ((434, 532), 'lr_funcs.model', 'lr.model', (['XTrain', 'YTrain', 'XTest', 'YTest'], {'num_iterations': '(5000)', 'learning_rate': 'i', 'print_cost': '(False)'}), '(XTrain, YTrain, XTest, YTest, num_iterations=5000, learning_rate=i,\n print_cost=False)\n', (442, 532), True, 'import lr_funcs as lr\n'), ((546, 568), 'numpy.squeeze', 'np.squeeze', (["x['costs']"], {}), "(x['costs'])\n", (556, 568), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 18:29:41 2019
@author: <NAME>
"""
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import tools
import numpy as np
from scipy import ndimage
#from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#%%
#images
#PSP_001414_1780_RED_img_row_33792_col_12288_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_15360_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_14336_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_13312_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_9216_col_11264_w_1024_h_1024_x_0_y_0
#chameleon
#parachute
path = "C:/Users/<NAME>/Documents/Unterlagen/SoSe2019/mars/python/1024x1024/"
img = cv2.imread('rocks.jpg')
#im = Image.open('rocks.jpg')
#np_im = np.array(im)
sharpened = tools.sharp(img, 3)
stretched = tools.stretch_8bit(img)
enhanced1 = tools.stretch_8bit(sharpened)
enhanced2 = tools.sharp(stretched, 3)
plt.imshow(enhanced1)
plt.show()
plt.imshow(enhanced2)
plt.show()
compare = tools.concatenate([img, sharpened, stretched, enhanced1, enhanced2]) #they are img type
plt.imshow(compare)
plt.show()
print(type(compare))
#cv2.imwrite('land_sharp3.jpg', compare)
compare = tools.concatenate([img, enhanced1, enhanced2])
plt.imshow(compare)
plt.show()
#compare.save('land_orgfinal_sharp3.jpg')
#cv2.imwrite('output/enhanced.jpg', enhanced1)
#cv2.imwrite('output/stretched.jpg', stretched)
#v2.imwrite('output/sharpened.jpg', sharpened)
#%%
img = cv2.imread('output/techno-signature_augmentation/parachute.jpg')
#simple = tools.augment_simple(img)
#augmentations = tools.augment_random(simple[3], generations = 8)
#augmentations = [flipped, rolled, rotated90, rotated180]
#cv2.imwrite('flipped.jpg', simple[0])
#cv2.imwrite('rolled.jpg', simple[1])
#cv2.imwrite('rotated90.jpg', simple[2])
#cv2.imwrite('rotated180.jpg', simple[3])
lista = [cv2.imread('aug_00.jpg'),cv2.imread('aug_01.jpg'),cv2.imread('aug_02.jpg'),cv2.imread('aug_03.jpg'),
cv2.imread('aug_04.jpg'),cv2.imread('aug_05.jpg'),cv2.imread('aug_06.jpg'),cv2.imread('aug_07.jpg')]
#lista2 = [cv2.imread('aug_08.jpg'),cv2.imread('aug_09.jpg'),cv2.imread('aug_10.jpg'),cv2.imread('aug_11.jpg'),
#cv2.imread('aug_12.jpg'),cv2.imread('aug_13.jpg'),cv2.imread('aug_14.jpg'),cv2.imread('aug_15.jpg')]
#lista3 = [cv2.imread('aug_16.jpg'),cv2.imread('aug_17.jpg'),cv2.imread('aug_18.jpg'),cv2.imread('aug_19.jpg'),
#cv2.imread('aug_20.jpg'),cv2.imread('aug_21.jpg'),cv2.imread('aug_22.jpg'),cv2.imread('aug_23.jpg')]
#%%
concatenated = tools.concatenate(lista)
plt.imshow(concatenated)
plt.show()
concatenated.save('comb5.jpg')
#%%
#_________________ create function with this _________________________ DONE
list_im = ['output/original.jpg','output/sharpened.jpg','output/stretched.jpg','output/enhanced.jpg']
imgs = [ Image.open(i) for i in list_im ]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
# save that beautiful picture
imgs_comb = Image.fromarray( imgs_comb)
#imgs_comb.save( 'test_hor.jpg' )
# for a vertical stacking it is simple: use vstack
imgs_comb = np.vstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
imgs_comb = Image.fromarray( imgs_comb)
#imgs_comb.save( 'test_ver.jpg' )
#_______________________________________________________________________
#%%
def concatenate(imgflnames): #file name, Image.fromarray for cv2 or numpy. Error: ValueError: cannot resize an array that references or is referenced
#by another array in this way.
#Use the np.resize function or refcheck=False
images = [cv2.imread(i) for i in imgflnames] #for loop one line for lists
print("\n", type(images), "\n")
print("lenght: ", len(images))
print("dimension 0: ", images[0].ndim)
print("dimension 1: ", images[1].ndim)
min_shape = sorted( [(np.sum(i.shape), i.shape ) for i in images])[0][1]
print(min_shape)
imgs_comb = np.hstack( (np.asarray(cv2.resize(i,(min_shape[0], min_shape[1]))) for i in images ) )
#res = cv2.resize(img_np, dsize=(2048, 2048), interpolation=cv2.INTER_CUBIC)
imgs_comb = Image.fromarray( imgs_comb)
return imgs_comb
def concatenate2(imgflnames): #file name, dimensionality problem: all the input arrays must have same number of dimensions. Could be fix with a resize function
images = [Image.open(i) for i in imgflnames] #for loop one line for lists
print("\n", type(images), "\n")
print("lenght: ", len(images))
print("dimension 0: ", images[0].size)
print("dimension 1: ", images[1].size)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in images])[0][1]
print(min_shape)
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in images ) )
imgs_comb = Image.fromarray( imgs_comb)
return imgs_comb
#%%
list_im = ['output/enhancement/original.jpg','output/enhancement/enhanced.jpg']
imgs = [ Image.open(i) for i in list_im ]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
imgs_comb = np.hstack((np.asarray(i.resize(min_shape) ) for i in imgs))
# save that beautiful picture
imgs_comb = Image.fromarray( imgs_comb)
plt.imshow(imgs_comb)
plt.show()
two = concatenate2(list_im)
plt.imshow(two)
plt.show()
#imgs_comb.save( 'orginal_final.jpg' )
#tools.augment_random(img, 20)
#augmented = tools.augment_simple(img)
#cv2.imwrite("output/chameleon.jpg", img)
#cv2.imwrite("output/flipped.jpg", augmented[0])
#cv2.imwrite("output/rolled.jpg", augmented[1])
#cv2.imwrite("output/rotated90.jpg", augmented[2])
#cv2.imwrite("output/rotated180.jpg", augmented[3])
#%% register_image(img)
#try with chameleon and rotate 27.5
#try resize again
img = cv2.imread('resized.jpg')
plt.imshow(img)
plt.show()
#img = cv2.imread('resized.jpg')
ref = tools.generate_template(img)
plt.imshow(ref)
plt.show()
cv2.imwrite('refresized.jpg',ref)
#ref = tools.generate_template(img, [255,0,0])
#plt.imshow(ref)
#plt.show()
#%%
type2_list = ['type2.jpg','reftype2.jpg','translation_type2.jpg','rigid_body_type2.jpg','scale_type2.jpg','affine_type2.jpg','bilatelar_type2.jpg']
resized_list = ['resized.jpg','align_and_crop_before.jpg','refresized.jpg','translation.jpg','rigid_body.jpg','scaled_rotation.jpg','affine.jpg','bilinear.jpg']
conc1 = tools.concatenate(type2_list, True)
plt.imshow(conc1)
plt.show()
conc2 = tools.concatenate(resized_list, True)
plt.imshow(conc2)
plt.show()
#%%
img_list = ['output/enhancement/original.jpg','output/enhancement/enhanced.jpg', 'bilinear_template.jpg'] #dimensionality problem
images = [Image.open(i) for i in img_list]
for i in images:
print (i.size)
print (type(i))
concatenated = concatenate(img_list)
plt.imshow(concatenated)
plt.show()
#%% concatenation test detailed
list_im1 = 'output/enhancement/original.jpg'
imgs_1 = Image.open(list_im1)
imgs2_1 = cv2.imread(list_im1)
print(imgs_1.size)
print("PIL Image type: ", type(imgs_1))
print(imgs2_1.shape)
print("CV2read imgs2 type: ", type(imgs2_1))
list_im2 = 'output/enhancement/enhanced.jpg'
imgs_2 = Image.open(list_im2)
imgs2_2 = cv2.imread(list_im2)
print("\n",imgs_2.size)
print("PIL Image type: ", type(imgs_2))
print(imgs2_2.shape)
print("CV2read imgs2 type: ", type(imgs2_2))
list_im3 = 'bilinear_template.jpg'
imgs_3 = Image.open(list_im3)
imgs2_3 = cv2.imread(list_im3)
print("\n",imgs_3.size)
print("PIL Image type: ", type(imgs_3))
print(imgs2_3.shape)
print("CV2read imgs2 type: ", type(imgs2_3))
result = tools.concatenate([list_im3, list_im2, list_im1])
plt.imshow(result)
plt.show()
#%%
#img_rotated = ndimage.rotate(img, 27)
#cv2.imwrite('output/rotated_chameleon27.jpg', img_rotated)
#transformations = tools.register_image(img, ref = 'bilinear_template.jpg') #best result so far
transformations = tools.register_image(img)
transformations = tools.register_image(img ,'solid')
transformations = tools.register_image(img, ref = 'bilinear_template.jpg') #homography function could have the same
#%%
cv2.imwrite('output/translation_resized_bilinear.jpg', transformations[0])
cv2.imwrite('output/rotation_resized_bilinear.jpg', transformations[1])
cv2.imwrite('output/scaled_rotation_resized_bilinear.jpg', transformations[2])
cv2.imwrite('output/affine_resized_bilinear.jpg', transformations[3])
cv2.imwrite('output/bilinear_resized_bilinear.jpg', transformations[4])
#%%
def random_color(low=5, high=250):
color = [np.random.randint(5,250),np.random.randint(5,250),np.random.randint(5,250)]
return color
#%%
def generate_template():
ref = np.zeros((img.shape[0],img.shape[1],3), dtype = 'uint8')
margin = int(min(img.shape[0], img.shape[1])/10)
#for 200,200 to shape[1]-200 shape[0]-200 generate random
for i in range(0,img.shape[1]-2*margin):
i+=1
for j in range(0,img.shape[0]-2*margin):
ref[margin+i,margin+j,:] = random_color()
j+=1
return ref
#%%
plt.imshow(ref)
plt.show()
cv2.imwrite('test_template.jpg', ref) | [
"matplotlib.pyplot.imshow",
"tools.generate_template",
"PIL.Image.fromarray",
"cv2.imwrite",
"PIL.Image.open",
"tools.concatenate",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"tools.sharp",
"tools.register_image",
"cv2.resize",
"cv2.imread",
"tools.stretch_8bit",
"matplotlib.pyp... | [((758, 781), 'cv2.imread', 'cv2.imread', (['"""rocks.jpg"""'], {}), "('rocks.jpg')\n", (768, 781), False, 'import cv2\n'), ((847, 866), 'tools.sharp', 'tools.sharp', (['img', '(3)'], {}), '(img, 3)\n', (858, 866), False, 'import tools\n'), ((879, 902), 'tools.stretch_8bit', 'tools.stretch_8bit', (['img'], {}), '(img)\n', (897, 902), False, 'import tools\n'), ((916, 945), 'tools.stretch_8bit', 'tools.stretch_8bit', (['sharpened'], {}), '(sharpened)\n', (934, 945), False, 'import tools\n'), ((958, 983), 'tools.sharp', 'tools.sharp', (['stretched', '(3)'], {}), '(stretched, 3)\n', (969, 983), False, 'import tools\n'), ((985, 1006), 'matplotlib.pyplot.imshow', 'plt.imshow', (['enhanced1'], {}), '(enhanced1)\n', (995, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1017), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1015, 1017), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1040), 'matplotlib.pyplot.imshow', 'plt.imshow', (['enhanced2'], {}), '(enhanced2)\n', (1029, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1041, 1051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1049, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1131), 'tools.concatenate', 'tools.concatenate', (['[img, sharpened, stretched, enhanced1, enhanced2]'], {}), '([img, sharpened, stretched, enhanced1, enhanced2])\n', (1080, 1131), False, 'import tools\n'), ((1151, 1170), 'matplotlib.pyplot.imshow', 'plt.imshow', (['compare'], {}), '(compare)\n', (1161, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1181), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1179, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1302), 'tools.concatenate', 'tools.concatenate', (['[img, enhanced1, enhanced2]'], {}), '([img, enhanced1, enhanced2])\n', (1273, 1302), False, 'import tools\n'), ((1303, 1322), 'matplotlib.pyplot.imshow', 'plt.imshow', (['compare'], {}), '(compare)\n', (1313, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1333), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1331, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1531, 1595), 'cv2.imread', 'cv2.imread', (['"""output/techno-signature_augmentation/parachute.jpg"""'], {}), "('output/techno-signature_augmentation/parachute.jpg')\n", (1541, 1595), False, 'import cv2\n'), ((2577, 2601), 'tools.concatenate', 'tools.concatenate', (['lista'], {}), '(lista)\n', (2594, 2601), False, 'import tools\n'), ((2603, 2627), 'matplotlib.pyplot.imshow', 'plt.imshow', (['concatenated'], {}), '(concatenated)\n', (2613, 2627), True, 'import matplotlib.pyplot as plt\n'), ((2628, 2638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2636, 2638), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3223), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs_comb'], {}), '(imgs_comb)\n', (3212, 3223), False, 'from PIL import Image\n'), ((3403, 3429), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs_comb'], {}), '(imgs_comb)\n', (3418, 3429), False, 'from PIL import Image\n'), ((5484, 5510), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs_comb'], {}), '(imgs_comb)\n', (5499, 5510), False, 'from PIL import Image\n'), ((5512, 5533), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imgs_comb'], {}), '(imgs_comb)\n', (5522, 5533), True, 'import matplotlib.pyplot as plt\n'), ((5534, 5544), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5542, 5544), True, 'import matplotlib.pyplot as plt\n'), ((5574, 5589), 'matplotlib.pyplot.imshow', 'plt.imshow', (['two'], {}), '(two)\n', (5584, 5589), True, 'import matplotlib.pyplot as plt\n'), ((5590, 5600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5598, 5600), True, 'import matplotlib.pyplot as plt\n'), ((6038, 6063), 'cv2.imread', 'cv2.imread', (['"""resized.jpg"""'], {}), "('resized.jpg')\n", (6048, 6063), False, 'import cv2\n'), ((6064, 6079), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (6074, 6079), True, 'import matplotlib.pyplot as plt\n'), ((6080, 6090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6088, 6090), True, 'import matplotlib.pyplot as plt\n'), ((6131, 6159), 'tools.generate_template', 'tools.generate_template', (['img'], {}), '(img)\n', (6154, 6159), False, 'import tools\n'), ((6160, 6175), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ref'], {}), '(ref)\n', (6170, 6175), True, 'import matplotlib.pyplot as plt\n'), ((6176, 6186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6184, 6186), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6222), 'cv2.imwrite', 'cv2.imwrite', (['"""refresized.jpg"""', 'ref'], {}), "('refresized.jpg', ref)\n", (6199, 6222), False, 'import cv2\n'), ((6620, 6655), 'tools.concatenate', 'tools.concatenate', (['type2_list', '(True)'], {}), '(type2_list, True)\n', (6637, 6655), False, 'import tools\n'), ((6656, 6673), 'matplotlib.pyplot.imshow', 'plt.imshow', (['conc1'], {}), '(conc1)\n', (6666, 6673), True, 'import matplotlib.pyplot as plt\n'), ((6674, 6684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6682, 6684), True, 'import matplotlib.pyplot as plt\n'), ((6693, 6730), 'tools.concatenate', 'tools.concatenate', (['resized_list', '(True)'], {}), '(resized_list, True)\n', (6710, 6730), False, 'import tools\n'), ((6731, 6748), 'matplotlib.pyplot.imshow', 'plt.imshow', (['conc2'], {}), '(conc2)\n', (6741, 6748), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6757, 6759), True, 'import matplotlib.pyplot as plt\n'), ((7031, 7055), 'matplotlib.pyplot.imshow', 'plt.imshow', (['concatenated'], {}), '(concatenated)\n', (7041, 7055), True, 'import matplotlib.pyplot as plt\n'), ((7056, 7066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7064, 7066), True, 'import matplotlib.pyplot as plt\n'), ((7157, 7177), 'PIL.Image.open', 'Image.open', (['list_im1'], {}), '(list_im1)\n', (7167, 7177), False, 'from PIL import Image\n'), ((7188, 7208), 'cv2.imread', 'cv2.imread', (['list_im1'], {}), '(list_im1)\n', (7198, 7208), False, 'import cv2\n'), ((7392, 7412), 'PIL.Image.open', 'Image.open', (['list_im2'], {}), '(list_im2)\n', (7402, 7412), False, 'from PIL import Image\n'), ((7423, 7443), 'cv2.imread', 'cv2.imread', (['list_im2'], {}), '(list_im2)\n', (7433, 7443), False, 'import cv2\n'), ((7622, 7642), 'PIL.Image.open', 'Image.open', (['list_im3'], {}), '(list_im3)\n', (7632, 7642), False, 'from PIL import Image\n'), ((7653, 7673), 'cv2.imread', 'cv2.imread', (['list_im3'], {}), '(list_im3)\n', (7663, 7673), False, 'import cv2\n'), ((7814, 7863), 'tools.concatenate', 'tools.concatenate', (['[list_im3, list_im2, list_im1]'], {}), '([list_im3, list_im2, list_im1])\n', (7831, 7863), False, 'import tools\n'), ((7864, 7882), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (7874, 7882), True, 'import matplotlib.pyplot as plt\n'), ((7883, 7893), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7891, 7893), True, 'import matplotlib.pyplot as plt\n'), ((8113, 8138), 'tools.register_image', 'tools.register_image', (['img'], {}), '(img)\n', (8133, 8138), False, 'import tools\n'), ((8157, 8191), 'tools.register_image', 'tools.register_image', (['img', '"""solid"""'], {}), "(img, 'solid')\n", (8177, 8191), False, 'import tools\n'), ((8210, 8264), 'tools.register_image', 'tools.register_image', (['img'], {'ref': '"""bilinear_template.jpg"""'}), "(img, ref='bilinear_template.jpg')\n", (8230, 8264), False, 'import tools\n'), ((8312, 8386), 'cv2.imwrite', 'cv2.imwrite', (['"""output/translation_resized_bilinear.jpg"""', 'transformations[0]'], {}), "('output/translation_resized_bilinear.jpg', transformations[0])\n", (8323, 8386), False, 'import cv2\n'), ((8387, 8458), 'cv2.imwrite', 'cv2.imwrite', (['"""output/rotation_resized_bilinear.jpg"""', 'transformations[1]'], {}), "('output/rotation_resized_bilinear.jpg', transformations[1])\n", (8398, 8458), False, 'import cv2\n'), ((8459, 8537), 'cv2.imwrite', 'cv2.imwrite', (['"""output/scaled_rotation_resized_bilinear.jpg"""', 'transformations[2]'], {}), "('output/scaled_rotation_resized_bilinear.jpg', transformations[2])\n", (8470, 8537), False, 'import cv2\n'), ((8538, 8607), 'cv2.imwrite', 'cv2.imwrite', (['"""output/affine_resized_bilinear.jpg"""', 'transformations[3]'], {}), "('output/affine_resized_bilinear.jpg', transformations[3])\n", (8549, 8607), False, 'import cv2\n'), ((8608, 8679), 'cv2.imwrite', 'cv2.imwrite', (['"""output/bilinear_resized_bilinear.jpg"""', 'transformations[4]'], {}), "('output/bilinear_resized_bilinear.jpg', transformations[4])\n", (8619, 8679), False, 'import cv2\n'), ((9244, 9259), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ref'], {}), '(ref)\n', (9254, 9259), True, 'import matplotlib.pyplot as plt\n'), ((9260, 9270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9268, 9270), True, 'import matplotlib.pyplot as plt\n'), ((9271, 9308), 'cv2.imwrite', 'cv2.imwrite', (['"""test_template.jpg"""', 'ref'], {}), "('test_template.jpg', ref)\n", (9282, 9308), False, 'import cv2\n'), ((1926, 1950), 'cv2.imread', 'cv2.imread', (['"""aug_00.jpg"""'], {}), "('aug_00.jpg')\n", (1936, 1950), False, 'import cv2\n'), ((1951, 1975), 'cv2.imread', 'cv2.imread', (['"""aug_01.jpg"""'], {}), "('aug_01.jpg')\n", (1961, 1975), False, 'import cv2\n'), ((1976, 2000), 'cv2.imread', 'cv2.imread', (['"""aug_02.jpg"""'], {}), "('aug_02.jpg')\n", (1986, 2000), False, 'import cv2\n'), ((2001, 2025), 'cv2.imread', 'cv2.imread', (['"""aug_03.jpg"""'], {}), "('aug_03.jpg')\n", (2011, 2025), False, 'import cv2\n'), ((2027, 2051), 'cv2.imread', 'cv2.imread', (['"""aug_04.jpg"""'], {}), "('aug_04.jpg')\n", (2037, 2051), False, 'import cv2\n'), ((2052, 2076), 'cv2.imread', 'cv2.imread', (['"""aug_05.jpg"""'], {}), "('aug_05.jpg')\n", (2062, 2076), False, 'import cv2\n'), ((2077, 2101), 'cv2.imread', 'cv2.imread', (['"""aug_06.jpg"""'], {}), "('aug_06.jpg')\n", (2087, 2101), False, 'import cv2\n'), ((2102, 2126), 'cv2.imread', 'cv2.imread', (['"""aug_07.jpg"""'], {}), "('aug_07.jpg')\n", (2112, 2126), False, 'import cv2\n'), ((2866, 2879), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (2876, 2879), False, 'from PIL import Image\n'), ((4372, 4398), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs_comb'], {}), '(imgs_comb)\n', (4387, 4398), False, 'from PIL import Image\n'), ((5012, 5038), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs_comb'], {}), '(imgs_comb)\n', (5027, 5038), False, 'from PIL import Image\n'), ((5157, 5170), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (5167, 5170), False, 'from PIL import Image\n'), ((6904, 6917), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (6914, 6917), False, 'from PIL import Image\n'), ((8865, 8921), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1], 3)'], {'dtype': '"""uint8"""'}), "((img.shape[0], img.shape[1], 3), dtype='uint8')\n", (8873, 8921), True, 'import numpy as np\n'), ((3849, 3862), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (3859, 3862), False, 'import cv2\n'), ((4596, 4609), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (4606, 4609), False, 'from PIL import Image\n'), ((8733, 8758), 'numpy.random.randint', 'np.random.randint', (['(5)', '(250)'], {}), '(5, 250)\n', (8750, 8758), True, 'import numpy as np\n'), ((8758, 8783), 'numpy.random.randint', 'np.random.randint', (['(5)', '(250)'], {}), '(5, 250)\n', (8775, 8783), True, 'import numpy as np\n'), ((8783, 8808), 'numpy.random.randint', 'np.random.randint', (['(5)', '(250)'], {}), '(5, 250)\n', (8800, 8808), True, 'import numpy as np\n'), ((4207, 4250), 'cv2.resize', 'cv2.resize', (['i', '(min_shape[0], min_shape[1])'], {}), '(i, (min_shape[0], min_shape[1]))\n', (4217, 4250), False, 'import cv2\n'), ((3031, 3045), 'numpy.sum', 'np.sum', (['i.size'], {}), '(i.size)\n', (3037, 3045), True, 'import numpy as np\n'), ((5322, 5336), 'numpy.sum', 'np.sum', (['i.size'], {}), '(i.size)\n', (5328, 5336), True, 'import numpy as np\n'), ((4096, 4111), 'numpy.sum', 'np.sum', (['i.shape'], {}), '(i.shape)\n', (4102, 4111), True, 'import numpy as np\n'), ((4843, 4857), 'numpy.sum', 'np.sum', (['i.size'], {}), '(i.size)\n', (4849, 4857), True, 'import numpy as np\n')] |
from .tprofile import TemperatureProfile
import numpy as np
class TemperatureArray(TemperatureProfile):
"""
Temperature profile loaded from array
"""
def __init__(self, tp_array=[2000, 1000]):
super().__init__(self.__class__.__name__)
self._tp_profile = np.array(tp_array)
@property
def profile(self):
"""Returns an isothermal temperature profile
Returns: :obj:np.array(float)
temperature profile
"""
if self._tp_profile.shape[0] == self.nlayers:
return self._tp_profile
else:
interp_temp = np.linspace(0.0, 1.0, self._tp_profile.shape[0])
interp_array = np.linspace(0.0, 1.0, self.nlayers)
return np.interp(interp_array, interp_temp, self._tp_profile)
def write(self, output):
temperature = super().write(output)
temperature.write_scalar('tp_array', self._tp_profile)
return temperature
| [
"numpy.array",
"numpy.linspace",
"numpy.interp"
] | [((292, 310), 'numpy.array', 'np.array', (['tp_array'], {}), '(tp_array)\n', (300, 310), True, 'import numpy as np\n'), ((616, 664), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'self._tp_profile.shape[0]'], {}), '(0.0, 1.0, self._tp_profile.shape[0])\n', (627, 664), True, 'import numpy as np\n'), ((692, 727), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'self.nlayers'], {}), '(0.0, 1.0, self.nlayers)\n', (703, 727), True, 'import numpy as np\n'), ((748, 802), 'numpy.interp', 'np.interp', (['interp_array', 'interp_temp', 'self._tp_profile'], {}), '(interp_array, interp_temp, self._tp_profile)\n', (757, 802), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import subprocess
import argparse
import sys
import nibabel as nib
from builtins import str
import matplotlib.pyplot as plt
import numpy as np
import nipype.algorithms.confounds as npalg
import nilearn.plotting as nlp
import nilearn.image as nimg
import nilearn.signal as sgn
from nilearn import datasets
from nilearn.image import index_img
import nilearn.masking as nbmsk
from PIL import Image, ImageDraw, ImageFont
parser = argparse.ArgumentParser(description='Save QA check Plots')
# Required options
reqoptions = parser.add_argument_group('Required arguments')
reqoptions.add_argument('-o', '-out', dest="outDir", required=True, help='Directory where images are to be saved' )
reqoptions.add_argument('-i', '-in', dest="inDir", required=True, help='Dir where EPI + masks are stored [MNI SPACE]' )
reqoptions.add_argument('-a', '-im1', dest="im1", required=True, help='First Image' )
reqoptions.add_argument('-b', '-im2', dest="im2", required=True, help='Second Image' )
reqoptions.add_argument('-c', '-bg_im', dest="bg_im", required=True, help='Anatomical Image' )
reqoptions.add_argument('-x', '-msg1', dest="msg1", required=False, default=None, help='Title for image 1' )
reqoptions.add_argument('-y', '-msg2', dest="msg2", required=False, default=None, help='Title for image 2' )
reqoptions.add_argument('-d', '-dpi', dest="dpi", required=False, default=120, help='Saved figure DPI' )
args = parser.parse_args()
outDir = args.outDir
inDir = args.inDir
im1 = args.im1
im2 = args.im2
bgImg = args.bg_im
msg1 = args.msg1
msg2 = args.msg2
# PNG resolution of the saved file
figDpi=int(args.dpi)
# Font size and weight for ALL plots
plt.rcParams.update({'font.size': 20, 'font.weight':'bold'} )
im1Name = im1.split('.')
im2Name = im2.split('.')
outSD1 = outDir + '/' + im1Name[0] + '.png'
outSD2 = outDir + '/' + im2Name[0] + '.png'
outSDD = outDir + '/' + im1Name[0] + '_to_' + im2Name[0] + '.gif'
outG1 = outDir + '/' + im1Name[0] + '_GP.png'
outG2 = outDir + '/' + im2Name[0] + '_GP.png'
outGDD = outDir + '/' + im1Name[0] + '_to_' + im2Name[0] + '_GP.gif'
if msg1 == None:
msg1 = im1Name[0]
if msg2 == None:
msg2 = im2Name[0]
nii1 = nib.load(inDir +'/' + im1)
nii2 = nib.load(inDir +'/' + im2)
bgNii = nib.load(inDir +'/' + bgImg)
data1 = np.array(nii1.get_data())
data2 = np.array(nii2.get_data())
dataBg = np.array(bgNii.get_data())
#bgImg = np.mean(data1, axis=3)
bgImg = nimg.new_img_like(bgNii,dataBg)
x,y,z = bgImg.shape
fig = plt.figure(figsize=(18,14), dpi=figDpi, facecolor='w', edgecolor='k')
nSteps = 50
nRow = 7
origX = float(subprocess.check_output(['3dinfo', '-oi', inDir +'/' + im1]))
origY = float(subprocess.check_output(['3dinfo', '-oj', inDir +'/' + im1]))
origZ = float(subprocess.check_output(['3dinfo', '-ok', inDir +'/' + im1]))
minPoint = float(subprocess.check_output(['3dinfo', '-Iextent', inDir +'/' + im1]))# + origZ
maxPoint = float(subprocess.check_output(['3dinfo', '-Sextent', inDir +'/' + im1]))# + origZ
minX = float(subprocess.check_output(['3dinfo', '-Rextent', inDir +'/' + im1]))# + origX
maxX = float(subprocess.check_output(['3dinfo', '-Sextent', inDir +'/' + im1]))#+ origX
minY = float(subprocess.check_output(['3dinfo', '-Aextent', inDir +'/' + im1]))# + origY
maxY = float(subprocess.check_output(['3dinfo', '-Pextent', inDir +'/' + im1]))# + origY
oz = (maxPoint-minPoint)/4.
oy = (maxX-minX)/2.
ox = (maxY-minY)/2.
minPoint -= oz
maxPoint -= oz
maxX -= ox
minX -= ox
maxY -= oy
minY -= oy
zSlices = np.linspace(minPoint, maxPoint, nSteps)
rowPlace = np.linspace(0, 1, nRow + 1)
step= int( np.floor( nSteps/nRow ) )
t0 = 0
tf = step
if len(data1.shape)>3:
sd1 = np.mean(data1, axis=3)
else:
sd1 = data1
sd1 = nimg.new_img_like(nii1,sd1)
xPos = minX * 0.375
yPos = maxY * 0.2
for i in range(nRow):
display = nlp.plot_epi(sd1, display_mode='z', figure=fig, draw_cross=False, vmin=300, vmax=2200, cmap='gray', cut_coords=zSlices[t0:tf], axes=(0,rowPlace[i],1,1/nRow), colorbar=True, black_bg=True)
display.add_edges(bgNii, color=(.1,.1,.75))
nSl = tf-t0
for j in range(nSl):
display.add_markers([(xPos,yPos,zSlices[t0+j])], marker_color='r', marker_size=75)
t0 = t0 + step
tf = tf + step
plt.savefig(outSD1)
t0 = 0
tf = step
fig = plt.figure(figsize=(18,14), dpi=figDpi, facecolor='w', edgecolor='k')
if len(data2.shape)>3:
sd2 = np.mean(data2, axis=3)
else:
sd2 = data2
#sd2 = np.mean(data2, axis=3)
sd2 = nimg.new_img_like(nii2,sd2)
for i in range(nRow):
display = nlp.plot_epi(sd2, display_mode='z', figure=fig, draw_cross=False, vmin=300, vmax=2200, cmap='gray', cut_coords=zSlices[t0:tf], axes=(0,rowPlace[i],1,1/nRow), colorbar=True)
display.add_edges(bgNii, color=(.1,.1,.75))
nSl = tf-t0
for j in range(nSl):
display.add_markers([(xPos,yPos,zSlices[t0+j])], marker_color='w', marker_size=75)
t0 = t0 + step
tf = tf + step
plt.savefig(outSD2)
im1 = Image.open(outSD1)
im2 = Image.open(outSD2)
w, h = im1.size
fnt = ImageFont.truetype('/home/fsluser/Documents/rs_proc/QC_funcs/fonts/DAYPBL__.ttf', 25)
draw = ImageDraw.Draw(im1)
draw.text( ((w*0.85)/2,0), msg1, fill=(255,255,255), font=fnt)
del draw
draw = ImageDraw.Draw(im2)
draw.text( ((w*0.85)/2,0), msg2, fill=(255,255,255), font=fnt )
del draw
ims = []
ims.append(im1)
ims.append(im2)
ims[0].save(outSDD, format='GIF',
append_images=ims[1:], save_all=True, duration=1000, loop=0)
os.remove(outSD1)
os.remove(outSD2)
| [
"nilearn.image.new_img_like",
"subprocess.check_output",
"numpy.mean",
"PIL.Image.open",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"nibabel.load",
"numpy.floor",
"PIL.ImageFont.truetype",
"nilearn.plotting.plot_epi",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure"... | [((470, 528), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save QA check Plots"""'}), "(description='Save QA check Plots')\n", (493, 528), False, 'import argparse\n'), ((1722, 1783), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20, 'font.weight': 'bold'}"], {}), "({'font.size': 20, 'font.weight': 'bold'})\n", (1741, 1783), True, 'import matplotlib.pyplot as plt\n'), ((2244, 2271), 'nibabel.load', 'nib.load', (["(inDir + '/' + im1)"], {}), "(inDir + '/' + im1)\n", (2252, 2271), True, 'import nibabel as nib\n'), ((2278, 2305), 'nibabel.load', 'nib.load', (["(inDir + '/' + im2)"], {}), "(inDir + '/' + im2)\n", (2286, 2305), True, 'import nibabel as nib\n'), ((2314, 2343), 'nibabel.load', 'nib.load', (["(inDir + '/' + bgImg)"], {}), "(inDir + '/' + bgImg)\n", (2322, 2343), True, 'import nibabel as nib\n'), ((2490, 2522), 'nilearn.image.new_img_like', 'nimg.new_img_like', (['bgNii', 'dataBg'], {}), '(bgNii, dataBg)\n', (2507, 2522), True, 'import nilearn.image as nimg\n'), ((2551, 2621), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 14)', 'dpi': 'figDpi', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(figsize=(18, 14), dpi=figDpi, facecolor='w', edgecolor='k')\n", (2561, 2621), True, 'import matplotlib.pyplot as plt\n'), ((3577, 3616), 'numpy.linspace', 'np.linspace', (['minPoint', 'maxPoint', 'nSteps'], {}), '(minPoint, maxPoint, nSteps)\n', (3588, 3616), True, 'import numpy as np\n'), ((3631, 3658), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nRow + 1)'], {}), '(0, 1, nRow + 1)\n', (3642, 3658), True, 'import numpy as np\n'), ((3795, 3823), 'nilearn.image.new_img_like', 'nimg.new_img_like', (['nii1', 'sd1'], {}), '(nii1, sd1)\n', (3812, 3823), True, 'import nilearn.image as nimg\n'), ((4292, 4311), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outSD1'], {}), '(outSD1)\n', (4303, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4337, 4407), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 14)', 'dpi': 'figDpi', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(figsize=(18, 14), dpi=figDpi, facecolor='w', edgecolor='k')\n", (4347, 4407), True, 'import matplotlib.pyplot as plt\n'), ((4516, 4544), 'nilearn.image.new_img_like', 'nimg.new_img_like', (['nii2', 'sd2'], {}), '(nii2, sd2)\n', (4533, 4544), True, 'import nilearn.image as nimg\n'), ((4955, 4974), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outSD2'], {}), '(outSD2)\n', (4966, 4974), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5001), 'PIL.Image.open', 'Image.open', (['outSD1'], {}), '(outSD1)\n', (4993, 5001), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5008, 5026), 'PIL.Image.open', 'Image.open', (['outSD2'], {}), '(outSD2)\n', (5018, 5026), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5051, 5141), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""/home/fsluser/Documents/rs_proc/QC_funcs/fonts/DAYPBL__.ttf"""', '(25)'], {}), "(\n '/home/fsluser/Documents/rs_proc/QC_funcs/fonts/DAYPBL__.ttf', 25)\n", (5069, 5141), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5146, 5165), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im1'], {}), '(im1)\n', (5160, 5165), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5250, 5269), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im2'], {}), '(im2)\n', (5264, 5269), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5492, 5509), 'os.remove', 'os.remove', (['outSD1'], {}), '(outSD1)\n', (5501, 5509), False, 'import os\n'), ((5510, 5527), 'os.remove', 'os.remove', (['outSD2'], {}), '(outSD2)\n', (5519, 5527), False, 'import os\n'), ((2658, 2719), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-oi', inDir + '/' + im1]"], {}), "(['3dinfo', '-oi', inDir + '/' + im1])\n", (2681, 2719), False, 'import subprocess\n'), ((2734, 2795), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-oj', inDir + '/' + im1]"], {}), "(['3dinfo', '-oj', inDir + '/' + im1])\n", (2757, 2795), False, 'import subprocess\n'), ((2810, 2871), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-ok', inDir + '/' + im1]"], {}), "(['3dinfo', '-ok', inDir + '/' + im1])\n", (2833, 2871), False, 'import subprocess\n'), ((2892, 2958), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-Iextent', inDir + '/' + im1]"], {}), "(['3dinfo', '-Iextent', inDir + '/' + im1])\n", (2915, 2958), False, 'import subprocess\n'), ((2985, 3051), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-Sextent', inDir + '/' + im1]"], {}), "(['3dinfo', '-Sextent', inDir + '/' + im1])\n", (3008, 3051), False, 'import subprocess\n'), ((3075, 3141), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-Rextent', inDir + '/' + im1]"], {}), "(['3dinfo', '-Rextent', inDir + '/' + im1])\n", (3098, 3141), False, 'import subprocess\n'), ((3164, 3230), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-Sextent', inDir + '/' + im1]"], {}), "(['3dinfo', '-Sextent', inDir + '/' + im1])\n", (3187, 3230), False, 'import subprocess\n'), ((3254, 3320), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-Aextent', inDir + '/' + im1]"], {}), "(['3dinfo', '-Aextent', inDir + '/' + im1])\n", (3277, 3320), False, 'import subprocess\n'), ((3343, 3409), 'subprocess.check_output', 'subprocess.check_output', (["['3dinfo', '-Pextent', inDir + '/' + im1]"], {}), "(['3dinfo', '-Pextent', inDir + '/' + im1])\n", (3366, 3409), False, 'import subprocess\n'), ((3670, 3693), 'numpy.floor', 'np.floor', (['(nSteps / nRow)'], {}), '(nSteps / nRow)\n', (3678, 3693), True, 'import numpy as np\n'), ((3747, 3769), 'numpy.mean', 'np.mean', (['data1'], {'axis': '(3)'}), '(data1, axis=3)\n', (3754, 3769), True, 'import numpy as np\n'), ((3898, 4098), 'nilearn.plotting.plot_epi', 'nlp.plot_epi', (['sd1'], {'display_mode': '"""z"""', 'figure': 'fig', 'draw_cross': '(False)', 'vmin': '(300)', 'vmax': '(2200)', 'cmap': '"""gray"""', 'cut_coords': 'zSlices[t0:tf]', 'axes': '(0, rowPlace[i], 1, 1 / nRow)', 'colorbar': '(True)', 'black_bg': '(True)'}), "(sd1, display_mode='z', figure=fig, draw_cross=False, vmin=300,\n vmax=2200, cmap='gray', cut_coords=zSlices[t0:tf], axes=(0, rowPlace[i],\n 1, 1 / nRow), colorbar=True, black_bg=True)\n", (3910, 4098), True, 'import nilearn.plotting as nlp\n'), ((4438, 4460), 'numpy.mean', 'np.mean', (['data2'], {'axis': '(3)'}), '(data2, axis=3)\n', (4445, 4460), True, 'import numpy as np\n'), ((4579, 4764), 'nilearn.plotting.plot_epi', 'nlp.plot_epi', (['sd2'], {'display_mode': '"""z"""', 'figure': 'fig', 'draw_cross': '(False)', 'vmin': '(300)', 'vmax': '(2200)', 'cmap': '"""gray"""', 'cut_coords': 'zSlices[t0:tf]', 'axes': '(0, rowPlace[i], 1, 1 / nRow)', 'colorbar': '(True)'}), "(sd2, display_mode='z', figure=fig, draw_cross=False, vmin=300,\n vmax=2200, cmap='gray', cut_coords=zSlices[t0:tf], axes=(0, rowPlace[i],\n 1, 1 / nRow), colorbar=True)\n", (4591, 4764), True, 'import nilearn.plotting as nlp\n')] |
import bitarray
import numpy as np
import subprocess as sp
from qubit_interaction import to_qubits, to_cbits
from qiskit import QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram, plot_bloch_multivector
def compare_bases(bases1, bases2, bits):
formed_key = []
for i in range(len(bases1)):
if bases1[i] == bases2[i]:
formed_key.append(bits[i])
return formed_key
def run_simul(is_eve):
tmp = sp.call('clear', shell=True)
if is_eve:
print("You know that Eve is listening, but Alice and Bob don't know this.\n")
else:
print("You know that Eve is not listening, but Alice and Bob don't know this.\n")
print("To check if Eve is listening, Alice is going to send a message to Bob over the quantum channel.")
alice_message = input("Choose a message for Alice to send (between 20 and 100 characters): ") or "Need a smol message smh"
tmp = sp.call('clear', shell=True)
if(len(alice_message)>100 or len(alice_message)<20):
alice_message = "Smh the code will just use this string instead"
alice_bits = bitarray.bitarray()
alice_bits.frombytes(alice_message.encode('utf-8'))
alice_bits = np.array(list(alice_bits)).astype(int)
print("Alice is going to send this message:\n", alice_bits)
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
print("Alice chooses to encode the bits on the standard basis(Z) or the signed basis(X) at random and sends the encoded message to Bob.")
np.random.seed(seed=0)
alice_bases = np.random.randint(2, size=(len(alice_bits)))
encoded_message = to_qubits(alice_bits, alice_bases)
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
if is_eve:
print("Oops! Eve has intercepted the encoded message and measured the qubits. Alice and Bob don't know yet that this happened. Eve sends the qubits she received to Bob.")
eve_bases = np.random.randint(2, size=(len(alice_bits)))
eve_bits = to_cbits(encoded_message, eve_bases)
eve_bits = np.array(eve_bits)
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
print("Bob chooses to measure the bits he received on the standard basis(Z) or the signed basis(X) at random.")
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
bob_bases = np.random.randint(2, size=(len(alice_bits)))
print("Bob measures the qubits using the bases he chose and obtains this message:")
bob_bits = to_cbits(encoded_message, bob_bases)
bob_bits = np.array(bob_bits)
print(bob_bits)
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
print("Alice and Bob send the bases they used to each other. If they chose the same base to encode and decode a bit, they keep that bit. They drop that bit if they used different bases to encode and decode it.")
alice_key = compare_bases(alice_bases, bob_bases, alice_bits)
alice_key = np.array(alice_key)
bob_key = compare_bases(alice_bases, bob_bases, bob_bits)
bob_key = np.array(bob_key)
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
print("Alice and Bob now choose a small part of the bits they end up with and send it to each other.")
sample_size = int(len(alice_key)/3)
alice_sample = alice_key[-sample_size:]
bob_sample = bob_key[-sample_size:]
print("Alice sent:".ljust(12), alice_sample)
print("Bob sent:".ljust(12), bob_sample)
input("\nPress any key to continue...")
tmp = sp.call('clear', shell=True)
if np.array_equal(alice_sample, bob_sample):
print("Alice and Bob believe that Eve has not intercepted their key, and their communication channel is secure.")
if is_eve:
print("However, they are wrong! Eve has intercepted their message. You knew all along didn't you?")
else:
print("They are right! Eve has not intercepted their message. You knew all along didn't you?")
else:
print("Alice and Bob believe that Eve has intercepted their key, and their communication channel is not secure.")
if is_eve:
print("They are right! Eve has intercepted their message. You knew all along didn't you?")
else:
print("However, they are wrong! Eve has not intercepted their message. You knew all along didn't you?")
return | [
"numpy.array",
"numpy.array_equal",
"numpy.random.seed",
"subprocess.call",
"qubit_interaction.to_qubits",
"qubit_interaction.to_cbits",
"bitarray.bitarray"
] | [((459, 487), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (466, 487), True, 'import subprocess as sp\n'), ((942, 970), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (949, 970), True, 'import subprocess as sp\n'), ((1120, 1139), 'bitarray.bitarray', 'bitarray.bitarray', ([], {}), '()\n', (1137, 1139), False, 'import bitarray\n'), ((1372, 1400), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (1379, 1400), True, 'import subprocess as sp\n'), ((1549, 1571), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(0)'}), '(seed=0)\n', (1563, 1571), True, 'import numpy as np\n'), ((1658, 1692), 'qubit_interaction.to_qubits', 'to_qubits', (['alice_bits', 'alice_bases'], {}), '(alice_bits, alice_bases)\n', (1667, 1692), False, 'from qubit_interaction import to_qubits, to_cbits\n'), ((1748, 1776), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (1755, 1776), True, 'import subprocess as sp\n'), ((2411, 2439), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (2418, 2439), True, 'import subprocess as sp\n'), ((2606, 2642), 'qubit_interaction.to_cbits', 'to_cbits', (['encoded_message', 'bob_bases'], {}), '(encoded_message, bob_bases)\n', (2614, 2642), False, 'from qubit_interaction import to_qubits, to_cbits\n'), ((2658, 2676), 'numpy.array', 'np.array', (['bob_bits'], {}), '(bob_bits)\n', (2666, 2676), True, 'import numpy as np\n'), ((2752, 2780), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (2759, 2780), True, 'import subprocess as sp\n'), ((3081, 3100), 'numpy.array', 'np.array', (['alice_key'], {}), '(alice_key)\n', (3089, 3100), True, 'import numpy as np\n'), ((3177, 3194), 'numpy.array', 'np.array', (['bob_key'], {}), '(bob_key)\n', (3185, 3194), True, 'import numpy as np\n'), ((3250, 3278), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (3257, 3278), True, 'import subprocess as sp\n'), ((3663, 3691), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (3670, 3691), True, 'import subprocess as sp\n'), ((3700, 3740), 'numpy.array_equal', 'np.array_equal', (['alice_sample', 'bob_sample'], {}), '(alice_sample, bob_sample)\n', (3714, 3740), True, 'import numpy as np\n'), ((2065, 2101), 'qubit_interaction.to_cbits', 'to_cbits', (['encoded_message', 'eve_bases'], {}), '(encoded_message, eve_bases)\n', (2073, 2101), False, 'from qubit_interaction import to_qubits, to_cbits\n'), ((2121, 2139), 'numpy.array', 'np.array', (['eve_bits'], {}), '(eve_bits)\n', (2129, 2139), True, 'import numpy as np\n'), ((2211, 2239), 'subprocess.call', 'sp.call', (['"""clear"""'], {'shell': '(True)'}), "('clear', shell=True)\n", (2218, 2239), True, 'import subprocess as sp\n')] |
import random
import os
import pickle
import librosa as lb
import numpy as np
import musdb
import yaml
# ignore warning about unsafe loaders in pyYAML 5.1 (used in musdb)
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({'YAMLLoadWarning': False})
def musdb_pre_processing(path_to_musdb, path_to_save_data, target_sr,
frame_length):
"""
This function splits all MUSDB tracks in frames of a given length, downsamples them to a given sampling rate,
converts them to mono and saves each frame as .npy-file. It randomly splits the training partition into a training
(80 tracks) and a validation (20 tracks) set.
"""
path_to_save_train_set = os.path.join(path_to_save_data, 'train')
path_to_save_val_set = os.path.join(path_to_save_data, 'val')
path_to_save_test_set = os.path.join(path_to_save_data, 'test')
if not os.path.exists(path_to_save_data):
os.makedirs(path_to_save_data)
if not os.path.exists(path_to_save_train_set):
os.makedirs(path_to_save_train_set)
if not os.path.exists(path_to_save_val_set):
os.makedirs(path_to_save_val_set)
if not os.path.exists(path_to_save_test_set):
os.makedirs(path_to_save_test_set)
# load the musdb train and test partition with the parser musdb (https://github.com/sigsep/sigsep-mus-db)
musdb_corpus = musdb.DB(root_dir=path_to_musdb)
training_tracks = musdb_corpus.load_mus_tracks(subsets=['train'])
test_tracks = musdb_corpus.load_mus_tracks(subsets=['test'])
# randomly select 20 tracks from the training partition that will be the validation set
all_idx = list(np.arange(0, 100))
random.seed(1)
val_idx = random.sample(population=all_idx, k=20) # track indices of validation set tracks
train_idx = [idx for idx in all_idx if idx not in val_idx] # track indices of training set tracks
# process and save training set
train_file_list = []
for idx in train_idx:
track = training_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_train_set, file_name), frames[:, n])
train_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_train_set, "train_file_list.pickle"), "wb")
pickle.dump(train_file_list, pickle_out)
pickle_out.close()
# process and save validation set
val_file_list = []
for idx in val_idx:
track = training_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_val_set, file_name), frames[:, n])
val_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_val_set, "val_file_list.pickle"), "wb")
pickle.dump(val_file_list, pickle_out)
pickle_out.close()
# process and save test set
test_file_list = []
for idx in range(50):
track = test_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_test_set, file_name), frames[:, n])
test_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_test_set, "test_file_list.pickle"), "wb")
pickle.dump(test_file_list, pickle_out)
pickle_out.close()
if __name__ == '__main__':
path_to_musdb = '../Datasets/MUSDB18'
path_to_save_data = '../Datasets/MUSDB_accompaniments'
target_sr = 16000
frame_length = 131584
musdb_pre_processing(path_to_musdb, path_to_save_data, target_sr=target_sr, frame_length=frame_length)
| [
"os.path.exists",
"random.sample",
"librosa.util.frame",
"pickle.dump",
"os.makedirs",
"yaml.warnings",
"librosa.to_mono",
"os.path.join",
"random.seed",
"musdb.DB",
"librosa.core.resample",
"numpy.arange"
] | [((248, 289), 'yaml.warnings', 'yaml.warnings', (["{'YAMLLoadWarning': False}"], {}), "({'YAMLLoadWarning': False})\n", (261, 289), False, 'import yaml\n'), ((731, 771), 'os.path.join', 'os.path.join', (['path_to_save_data', '"""train"""'], {}), "(path_to_save_data, 'train')\n", (743, 771), False, 'import os\n'), ((799, 837), 'os.path.join', 'os.path.join', (['path_to_save_data', '"""val"""'], {}), "(path_to_save_data, 'val')\n", (811, 837), False, 'import os\n'), ((866, 905), 'os.path.join', 'os.path.join', (['path_to_save_data', '"""test"""'], {}), "(path_to_save_data, 'test')\n", (878, 905), False, 'import os\n'), ((1401, 1433), 'musdb.DB', 'musdb.DB', ([], {'root_dir': 'path_to_musdb'}), '(root_dir=path_to_musdb)\n', (1409, 1433), False, 'import musdb\n'), ((1704, 1718), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (1715, 1718), False, 'import random\n'), ((1733, 1772), 'random.sample', 'random.sample', ([], {'population': 'all_idx', 'k': '(20)'}), '(population=all_idx, k=20)\n', (1746, 1772), False, 'import random\n'), ((2897, 2937), 'pickle.dump', 'pickle.dump', (['train_file_list', 'pickle_out'], {}), '(train_file_list, pickle_out)\n', (2908, 2937), False, 'import pickle\n'), ((3930, 3968), 'pickle.dump', 'pickle.dump', (['val_file_list', 'pickle_out'], {}), '(val_file_list, pickle_out)\n', (3941, 3968), False, 'import pickle\n'), ((4958, 4997), 'pickle.dump', 'pickle.dump', (['test_file_list', 'pickle_out'], {}), '(test_file_list, pickle_out)\n', (4969, 4997), False, 'import pickle\n'), ((918, 951), 'os.path.exists', 'os.path.exists', (['path_to_save_data'], {}), '(path_to_save_data)\n', (932, 951), False, 'import os\n'), ((961, 991), 'os.makedirs', 'os.makedirs', (['path_to_save_data'], {}), '(path_to_save_data)\n', (972, 991), False, 'import os\n'), ((1003, 1041), 'os.path.exists', 'os.path.exists', (['path_to_save_train_set'], {}), '(path_to_save_train_set)\n', (1017, 1041), False, 'import os\n'), ((1051, 1086), 'os.makedirs', 'os.makedirs', (['path_to_save_train_set'], {}), '(path_to_save_train_set)\n', (1062, 1086), False, 'import os\n'), ((1098, 1134), 'os.path.exists', 'os.path.exists', (['path_to_save_val_set'], {}), '(path_to_save_val_set)\n', (1112, 1134), False, 'import os\n'), ((1144, 1177), 'os.makedirs', 'os.makedirs', (['path_to_save_val_set'], {}), '(path_to_save_val_set)\n', (1155, 1177), False, 'import os\n'), ((1189, 1226), 'os.path.exists', 'os.path.exists', (['path_to_save_test_set'], {}), '(path_to_save_test_set)\n', (1203, 1226), False, 'import os\n'), ((1236, 1270), 'os.makedirs', 'os.makedirs', (['path_to_save_test_set'], {}), '(path_to_save_test_set)\n', (1247, 1270), False, 'import os\n'), ((1681, 1698), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (1690, 1698), True, 'import numpy as np\n'), ((2292, 2317), 'librosa.to_mono', 'lb.to_mono', (['track_audio.T'], {}), '(track_audio.T)\n', (2302, 2317), True, 'import librosa as lb\n'), ((2355, 2412), 'librosa.core.resample', 'lb.core.resample', (['track_audio_mono', 'track.rate', 'target_sr'], {}), '(track_audio_mono, track.rate, target_sr)\n', (2371, 2412), True, 'import librosa as lb\n'), ((2431, 2530), 'librosa.util.frame', 'lb.util.frame', ([], {'y': 'track_audio_mono_resampled', 'frame_length': 'frame_length', 'hop_length': 'frame_length'}), '(y=track_audio_mono_resampled, frame_length=frame_length,\n hop_length=frame_length)\n', (2444, 2530), True, 'import librosa as lb\n'), ((2823, 2885), 'os.path.join', 'os.path.join', (['path_to_save_train_set', '"""train_file_list.pickle"""'], {}), "(path_to_save_train_set, 'train_file_list.pickle')\n", (2835, 2885), False, 'import os\n'), ((3333, 3358), 'librosa.to_mono', 'lb.to_mono', (['track_audio.T'], {}), '(track_audio.T)\n', (3343, 3358), True, 'import librosa as lb\n'), ((3396, 3453), 'librosa.core.resample', 'lb.core.resample', (['track_audio_mono', 'track.rate', 'target_sr'], {}), '(track_audio_mono, track.rate, target_sr)\n', (3412, 3453), True, 'import librosa as lb\n'), ((3472, 3571), 'librosa.util.frame', 'lb.util.frame', ([], {'y': 'track_audio_mono_resampled', 'frame_length': 'frame_length', 'hop_length': 'frame_length'}), '(y=track_audio_mono_resampled, frame_length=frame_length,\n hop_length=frame_length)\n', (3485, 3571), True, 'import librosa as lb\n'), ((3860, 3918), 'os.path.join', 'os.path.join', (['path_to_save_val_set', '"""val_file_list.pickle"""'], {}), "(path_to_save_val_set, 'val_file_list.pickle')\n", (3872, 3918), False, 'import os\n'), ((4357, 4382), 'librosa.to_mono', 'lb.to_mono', (['track_audio.T'], {}), '(track_audio.T)\n', (4367, 4382), True, 'import librosa as lb\n'), ((4420, 4477), 'librosa.core.resample', 'lb.core.resample', (['track_audio_mono', 'track.rate', 'target_sr'], {}), '(track_audio_mono, track.rate, target_sr)\n', (4436, 4477), True, 'import librosa as lb\n'), ((4496, 4595), 'librosa.util.frame', 'lb.util.frame', ([], {'y': 'track_audio_mono_resampled', 'frame_length': 'frame_length', 'hop_length': 'frame_length'}), '(y=track_audio_mono_resampled, frame_length=frame_length,\n hop_length=frame_length)\n', (4509, 4595), True, 'import librosa as lb\n'), ((4886, 4946), 'os.path.join', 'os.path.join', (['path_to_save_test_set', '"""test_file_list.pickle"""'], {}), "(path_to_save_test_set, 'test_file_list.pickle')\n", (4898, 4946), False, 'import os\n'), ((2691, 2738), 'os.path.join', 'os.path.join', (['path_to_save_train_set', 'file_name'], {}), '(path_to_save_train_set, file_name)\n', (2703, 2738), False, 'import os\n'), ((3732, 3777), 'os.path.join', 'os.path.join', (['path_to_save_val_set', 'file_name'], {}), '(path_to_save_val_set, file_name)\n', (3744, 3777), False, 'import os\n'), ((4756, 4802), 'os.path.join', 'os.path.join', (['path_to_save_test_set', 'file_name'], {}), '(path_to_save_test_set, file_name)\n', (4768, 4802), False, 'import os\n')] |
# Feature extraction example
import numpy as np
import librosa
np.seterr(divide='ignore', invalid='ignore')
# Load the example clip
audio_path = './generated-audio/test.mp3'
y, sr = librosa.load(audio_path)
# Separate harmonics and percussives into two waveforms
y_harmonic, y_percussive = librosa.effects.hpss(y)
# Beat track on the percussive signal
tempo, beat_frames = librosa.beat.beat_track(y=y_percussive,
sr=sr)
# Compute MFCC features from the raw signal
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=512, n_mfcc=13)
# And the first-order differences (delta features)
mfcc_delta = librosa.feature.delta(mfcc)
# Stack and synchronize between beat events
# This time, we'll use the mean value (default) instead of median
beat_mfcc_delta = librosa.feature.sync(np.vstack([mfcc, mfcc_delta]),
beat_frames)
# Compute chroma features from the harmonic signal
chromagram = librosa.feature.chroma_cqt(y=y_harmonic, sr=sr)
# Aggregate chroma features between beat events
# We'll use the median value of each feature between beat frames
beat_chroma = librosa.feature.sync(chromagram,
beat_frames,
aggregate=np.median)
# Finally, stack all beat-synchronous features together
beat_features = np.vstack([beat_chroma, beat_mfcc_delta])
| [
"librosa.feature.sync",
"librosa.feature.chroma_cqt",
"librosa.feature.delta",
"librosa.beat.beat_track",
"librosa.feature.mfcc",
"numpy.vstack",
"numpy.seterr",
"librosa.effects.hpss",
"librosa.load"
] | [((64, 108), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (73, 108), True, 'import numpy as np\n'), ((184, 208), 'librosa.load', 'librosa.load', (['audio_path'], {}), '(audio_path)\n', (196, 208), False, 'import librosa\n'), ((293, 316), 'librosa.effects.hpss', 'librosa.effects.hpss', (['y'], {}), '(y)\n', (313, 316), False, 'import librosa\n'), ((377, 423), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'y_percussive', 'sr': 'sr'}), '(y=y_percussive, sr=sr)\n', (400, 423), False, 'import librosa\n'), ((521, 580), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'y', 'sr': 'sr', 'hop_length': '(512)', 'n_mfcc': '(13)'}), '(y=y, sr=sr, hop_length=512, n_mfcc=13)\n', (541, 580), False, 'import librosa\n'), ((646, 673), 'librosa.feature.delta', 'librosa.feature.delta', (['mfcc'], {}), '(mfcc)\n', (667, 673), False, 'import librosa\n'), ((972, 1019), 'librosa.feature.chroma_cqt', 'librosa.feature.chroma_cqt', ([], {'y': 'y_harmonic', 'sr': 'sr'}), '(y=y_harmonic, sr=sr)\n', (998, 1019), False, 'import librosa\n'), ((1148, 1214), 'librosa.feature.sync', 'librosa.feature.sync', (['chromagram', 'beat_frames'], {'aggregate': 'np.median'}), '(chromagram, beat_frames, aggregate=np.median)\n', (1168, 1214), False, 'import librosa\n'), ((1358, 1399), 'numpy.vstack', 'np.vstack', (['[beat_chroma, beat_mfcc_delta]'], {}), '([beat_chroma, beat_mfcc_delta])\n', (1367, 1399), True, 'import numpy as np\n'), ((824, 853), 'numpy.vstack', 'np.vstack', (['[mfcc, mfcc_delta]'], {}), '([mfcc, mfcc_delta])\n', (833, 853), True, 'import numpy as np\n')] |
import torch
import numpy as np
import pandas as pd
import os
class MNIST:
def __init__(self, DATASET_DIR='./dataset/MNIST/'):
self.DATASET_DIR = DATASET_DIR
def fit_normalizer(self, x):
self.min = np.min(x)
self.max = np.max(x)
def transform_normalizer(self, x):
return (x - self.min)/(self.max - self.min)
def inv_transform_normalizer(self, x):
return (x * (self.max - self.min)) + self.min
def load_dataset(self):
test = pd.read_csv(self.DATASET_DIR+'test.csv')
test = test.values
train = pd.read_csv(self.DATASET_DIR+'train.csv')
train = train.values
test_x = test.T[1:].T
test_y = test.T[0]
train_x = train.T[1:].T
train_y = train.T[0]
train_x, test_x = train_x.astype(np.float32), test_x.astype(np.float32)
self.fit_normalizer(train_x)
train_x = self.transform_normalizer(train_x)
test_x = self.transform_normalizer(test_x)
train_x, train_y, test_x, test_y = torch.from_numpy(train_x), torch.from_numpy(train_y), torch.from_numpy(test_x), torch.from_numpy(test_y)
return train_x, train_y, test_x, test_y | [
"numpy.max",
"torch.from_numpy",
"pandas.read_csv",
"numpy.min"
] | [((206, 215), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (212, 215), True, 'import numpy as np\n'), ((229, 238), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (235, 238), True, 'import numpy as np\n'), ((446, 488), 'pandas.read_csv', 'pd.read_csv', (["(self.DATASET_DIR + 'test.csv')"], {}), "(self.DATASET_DIR + 'test.csv')\n", (457, 488), True, 'import pandas as pd\n'), ((518, 561), 'pandas.read_csv', 'pd.read_csv', (["(self.DATASET_DIR + 'train.csv')"], {}), "(self.DATASET_DIR + 'train.csv')\n", (529, 561), True, 'import pandas as pd\n'), ((913, 938), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (929, 938), False, 'import torch\n'), ((940, 965), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (956, 965), False, 'import torch\n'), ((967, 991), 'torch.from_numpy', 'torch.from_numpy', (['test_x'], {}), '(test_x)\n', (983, 991), False, 'import torch\n'), ((993, 1017), 'torch.from_numpy', 'torch.from_numpy', (['test_y'], {}), '(test_y)\n', (1009, 1017), False, 'import torch\n')] |
from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab
from climlab.tests.xarray_test import to_xarray
num_lev = 30
@pytest.mark.compiled
@pytest.mark.fast
def test_rrtmg_lw_creation():
state = climlab.column_state(num_lev=num_lev, water_depth=5.)
rad = climlab.radiation.RRTMG_LW(state=state)
# are the transformations reversible?
assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Ts)) == rad.Ts)
assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Tatm)) == rad.Tatm)
@pytest.mark.compiled
@pytest.mark.fast
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = climlab.radiation.RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is climlab.radiation.RRTMG_LW
assert type(rad.subprocess['SW']) is climlab.radiation.RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
# Test the xarray interface
to_xarray(rad)
@pytest.mark.compiled
@pytest.mark.fast
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = climlab.radiation.RRTMG(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', climlab.radiation.CAM3_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
@pytest.mark.compiled
@pytest.mark.fast
def test_multidim():
state = climlab.column_state(num_lev=40, num_lat=3, water_depth=5.)
rad = climlab.radiation.RRTMG_LW(state=state)
# are the transformations reversible?
assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Ts)) == rad.Ts)
assert np.all(_rrtm_to_climlab(_climlab_to_rrtm(rad.Tatm)) == rad.Tatm)
# Can we integrate?
rad.step_forward()
assert rad.OLR.shape == rad.Ts.shape
@pytest.mark.compiled
@pytest.mark.fast
def test_cloud():
'''Put a high cloud layer in a radiative model.
The all-sky ASR should be lower than clear-sky ASR.
The all-sky OLR should be lower than clear-sky OLR.'''
# State variables (Air and surface temperature)
state = climlab.column_state(num_lev=50, water_depth=1.)
lev = state.Tatm.domain.axes['lev'].points
# Define some local cloud characteristics
cldfrac = 0.5 # layer cloud fraction
r_liq = 14. # Cloud water drop effective radius (microns)
clwp = 60. # in-cloud liquid water path (g/m2)
# The cloud fraction is a Gaussian bump centered at level i
i = 25
mycloud = {'cldfrac': cldfrac*np.exp(-(lev-lev[i])**2/(2*25.)**2),
'clwp': np.zeros_like(state.Tatm) + clwp,
'r_liq': np.zeros_like(state.Tatm) + r_liq,}
# Test both RRTMG and CAM3:
#for module in [climlab.radiation.RRTMG, climlab.radiation.CAM3]:
# Apparently clouds in CAM3 are not working. Save this for later
for module in [climlab.radiation.RRTMG]:
rad = module(state=state, **mycloud)
rad.compute_diagnostics()
assert(rad.ASR - rad.ASRclr < 0.)
assert(rad.OLR - rad.OLRclr < 0.)
@pytest.mark.compiled
@pytest.mark.slow
def test_radiative_forcing():
'''Run a single-column radiative-convective model with RRTMG radiation
out to equilibrium. Clone the model, double CO2 and measure the instantaneous
change in TOA flux. It should be positive net downward flux.'''
# State variables (Air and surface temperature)
state = climlab.column_state(num_lev=30, water_depth=1.)
# Fixed relative humidity
h2o = climlab.radiation.ManabeWaterVapor(name='WaterVapor', state=state)
# Couple water vapor to radiation
# Set icld=0 for clear-sky only (no need to call cloud overlap routine)
rad = climlab.radiation.RRTMG(name='Radiation',
state=state,
specific_humidity=h2o.q,
icld=0)
# Convective adjustment
conv = climlab.convection.ConvectiveAdjustment(name='Convection',
state=state,
adj_lapse_rate=6.5)
# Couple everything together
rcm = climlab.couple([rad,h2o,conv], name='Radiative-Convective Model')
rcm.integrate_years(5.)
assert np.abs(rcm.ASR - rcm.OLR) < 0.1 # close to energy balance
rcm2 = climlab.process_like(rcm)
rcm2.subprocess['Radiation'].absorber_vmr['CO2'] *= 2.
rcm2.compute_diagnostics()
assert (rcm2.ASR - rcm2.OLR) > 1. # positive radiative forcing
# Test the xarray interface
to_xarray(rcm2)
@pytest.mark.compiled
@pytest.mark.slow
def test_latitude():
'''
Run a radiative equilibrum model with RRTMG radiation out to equilibrium
with an annual mean insolation profile as a function of latitude.
'''
num_lat = 8
# State variables (Air and surface temperature)
state = climlab.column_state(num_lev=30, num_lat=num_lat, water_depth=1.)
# insolation
#sol = climlab.radiation.AnnualMeanInsolation(domains=model.Ts.domain)
sol = climlab.radiation.AnnualMeanInsolation(name='Insolation',
domains=state.Ts.domain)
# radiation module with insolation as input
# Set icld=0 for clear-sky only (no need to call cloud overlap routine)
rad = climlab.radiation.RRTMG(name='Radiation', state=state, icld=0,
S0=sol.S0,
insolation=sol.insolation,
coszen=sol.coszen)
# Couple everything together
model = rad + sol
# Run out to equilibrium
model.integrate_years(2.)
# Test for energy balance
assert np.all(np.abs(model.ASR - model.OLR) < 0.1)
# Test for reasonable surface temperature gradient
# reversal of gradient at equator
grad = np.diff(model.Ts, axis=0)
assert np.all(grad[0:(int(num_lat/2)-1)] > 0.)
assert np.all(grad[int(num_lat/2):] < 0.)
@pytest.mark.compiled
@pytest.mark.fast
def test_no_ozone():
'''When user gives None as the ozone_file, the model is initialized
with zero ozone. This should work on arbitrary grids.'''
ps = 1060.
num_lev=4000
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
lev = state.Tatm.domain.lev
lev.bounds = np.linspace(0., ps, num_lev+1)
lev.points = lev.bounds[:-1] + np.diff(lev.bounds)/2.
lev.delta = np.abs(np.diff(lev.bounds))
# Create a RRTM radiation model
rad = climlab.radiation.RRTMG(state=state, ozone_file=None)
assert np.all(rad.absorber_vmr['O3']==0.)
@pytest.mark.compiled
@pytest.mark.fast
def test_fixed_insolation():
'''Make sure that we can run a model forward with specified time-invariant insolation'''
num_lat = 4; num_lev = 20 # grid size
day_of_year = 80. # days since Jan 1
lat = np.linspace(-80., 80., num_lat)
state = climlab.column_state(num_lev=num_lev, lat=lat)
insolation = climlab.solar.insolation.daily_insolation(lat=lat, day=day_of_year)
ins_array = insolation.values
rad = climlab.radiation.RRTMG(name='Radiation', state=state, insolation=ins_array)
rad.step_forward()
@pytest.mark.compiled
@pytest.mark.fast
def test_large_grid():
num_lev = 50; num_lat=90
state = climlab.column_state(num_lev=num_lev, num_lat=num_lat, water_depth=10.)
rad1 = climlab.radiation.CAM3(state=state)
rad1.step_forward()
rad2 = climlab.radiation.RRTMG(state=state)
rad2.step_forward()
| [
"climlab.radiation.RRTMG",
"climlab.tests.xarray_test.to_xarray",
"climlab.radiation.CAM3",
"climlab.radiation.AnnualMeanInsolation",
"climlab.radiation.CAM3_LW",
"numpy.diff",
"numpy.exp",
"climlab.process_like",
"numpy.linspace",
"climlab.couple",
"climlab.convection.ConvectiveAdjustment",
"... | [((295, 349), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': 'num_lev', 'water_depth': '(5.0)'}), '(num_lev=num_lev, water_depth=5.0)\n', (315, 349), False, 'import climlab\n'), ((359, 398), 'climlab.radiation.RRTMG_LW', 'climlab.radiation.RRTMG_LW', ([], {'state': 'state'}), '(state=state)\n', (385, 398), False, 'import climlab\n'), ((704, 769), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': 'num_lev', 'num_lat': '(1)', 'water_depth': '(5.0)'}), '(num_lev=num_lev, num_lat=1, water_depth=5.0)\n', (724, 769), False, 'import climlab\n'), ((816, 852), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'state': 'state'}), '(state=state)\n', (839, 852), False, 'import climlab\n'), ((1178, 1192), 'climlab.tests.xarray_test.to_xarray', 'to_xarray', (['rad'], {}), '(rad)\n', (1187, 1192), False, 'from climlab.tests.xarray_test import to_xarray\n'), ((1308, 1373), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': 'num_lev', 'num_lat': '(1)', 'water_depth': '(5.0)'}), '(num_lev=num_lev, num_lat=1, water_depth=5.0)\n', (1328, 1373), False, 'import climlab\n'), ((1420, 1456), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'state': 'state'}), '(state=state)\n', (1443, 1456), False, 'import climlab\n'), ((1776, 1836), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': '(40)', 'num_lat': '(3)', 'water_depth': '(5.0)'}), '(num_lev=40, num_lat=3, water_depth=5.0)\n', (1796, 1836), False, 'import climlab\n'), ((1846, 1885), 'climlab.radiation.RRTMG_LW', 'climlab.radiation.RRTMG_LW', ([], {'state': 'state'}), '(state=state)\n', (1872, 1885), False, 'import climlab\n'), ((2456, 2505), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': '(50)', 'water_depth': '(1.0)'}), '(num_lev=50, water_depth=1.0)\n', (2476, 2505), False, 'import climlab\n'), ((3762, 3811), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': '(30)', 'water_depth': '(1.0)'}), '(num_lev=30, water_depth=1.0)\n', (3782, 3811), False, 'import climlab\n'), ((3852, 3918), 'climlab.radiation.ManabeWaterVapor', 'climlab.radiation.ManabeWaterVapor', ([], {'name': '"""WaterVapor"""', 'state': 'state'}), "(name='WaterVapor', state=state)\n", (3886, 3918), False, 'import climlab\n'), ((4046, 4138), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'name': '"""Radiation"""', 'state': 'state', 'specific_humidity': 'h2o.q', 'icld': '(0)'}), "(name='Radiation', state=state, specific_humidity=\n h2o.q, icld=0)\n", (4069, 4138), False, 'import climlab\n'), ((4276, 4371), 'climlab.convection.ConvectiveAdjustment', 'climlab.convection.ConvectiveAdjustment', ([], {'name': '"""Convection"""', 'state': 'state', 'adj_lapse_rate': '(6.5)'}), "(name='Convection', state=state,\n adj_lapse_rate=6.5)\n", (4315, 4371), False, 'import climlab\n'), ((4514, 4581), 'climlab.couple', 'climlab.couple', (['[rad, h2o, conv]'], {'name': '"""Radiative-Convective Model"""'}), "([rad, h2o, conv], name='Radiative-Convective Model')\n", (4528, 4581), False, 'import climlab\n'), ((4690, 4715), 'climlab.process_like', 'climlab.process_like', (['rcm'], {}), '(rcm)\n', (4710, 4715), False, 'import climlab\n'), ((4911, 4926), 'climlab.tests.xarray_test.to_xarray', 'to_xarray', (['rcm2'], {}), '(rcm2)\n', (4920, 4926), False, 'from climlab.tests.xarray_test import to_xarray\n'), ((5233, 5299), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': '(30)', 'num_lat': 'num_lat', 'water_depth': '(1.0)'}), '(num_lev=30, num_lat=num_lat, water_depth=1.0)\n', (5253, 5299), False, 'import climlab\n'), ((5402, 5489), 'climlab.radiation.AnnualMeanInsolation', 'climlab.radiation.AnnualMeanInsolation', ([], {'name': '"""Insolation"""', 'domains': 'state.Ts.domain'}), "(name='Insolation', domains=state.Ts.\n domain)\n", (5440, 5489), False, 'import climlab\n'), ((5671, 5794), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'name': '"""Radiation"""', 'state': 'state', 'icld': '(0)', 'S0': 'sol.S0', 'insolation': 'sol.insolation', 'coszen': 'sol.coszen'}), "(name='Radiation', state=state, icld=0, S0=sol.S0,\n insolation=sol.insolation, coszen=sol.coszen)\n", (5694, 5794), False, 'import climlab\n'), ((6201, 6226), 'numpy.diff', 'np.diff', (['model.Ts'], {'axis': '(0)'}), '(model.Ts, axis=0)\n', (6208, 6226), True, 'import numpy as np\n'), ((6563, 6628), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': 'num_lev', 'num_lat': '(1)', 'water_depth': '(5.0)'}), '(num_lev=num_lev, num_lat=1, water_depth=5.0)\n', (6583, 6628), False, 'import climlab\n'), ((6677, 6710), 'numpy.linspace', 'np.linspace', (['(0.0)', 'ps', '(num_lev + 1)'], {}), '(0.0, ps, num_lev + 1)\n', (6688, 6710), True, 'import numpy as np\n'), ((6857, 6910), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'state': 'state', 'ozone_file': 'None'}), '(state=state, ozone_file=None)\n', (6880, 6910), False, 'import climlab\n'), ((6922, 6959), 'numpy.all', 'np.all', (["(rad.absorber_vmr['O3'] == 0.0)"], {}), "(rad.absorber_vmr['O3'] == 0.0)\n", (6928, 6959), True, 'import numpy as np\n'), ((7216, 7249), 'numpy.linspace', 'np.linspace', (['(-80.0)', '(80.0)', 'num_lat'], {}), '(-80.0, 80.0, num_lat)\n', (7227, 7249), True, 'import numpy as np\n'), ((7260, 7306), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': 'num_lev', 'lat': 'lat'}), '(num_lev=num_lev, lat=lat)\n', (7280, 7306), False, 'import climlab\n'), ((7324, 7391), 'climlab.solar.insolation.daily_insolation', 'climlab.solar.insolation.daily_insolation', ([], {'lat': 'lat', 'day': 'day_of_year'}), '(lat=lat, day=day_of_year)\n', (7365, 7391), False, 'import climlab\n'), ((7436, 7512), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'name': '"""Radiation"""', 'state': 'state', 'insolation': 'ins_array'}), "(name='Radiation', state=state, insolation=ins_array)\n", (7459, 7512), False, 'import climlab\n'), ((7641, 7713), 'climlab.column_state', 'climlab.column_state', ([], {'num_lev': 'num_lev', 'num_lat': 'num_lat', 'water_depth': '(10.0)'}), '(num_lev=num_lev, num_lat=num_lat, water_depth=10.0)\n', (7661, 7713), False, 'import climlab\n'), ((7724, 7759), 'climlab.radiation.CAM3', 'climlab.radiation.CAM3', ([], {'state': 'state'}), '(state=state)\n', (7746, 7759), False, 'import climlab\n'), ((7795, 7831), 'climlab.radiation.RRTMG', 'climlab.radiation.RRTMG', ([], {'state': 'state'}), '(state=state)\n', (7818, 7831), False, 'import climlab\n'), ((1608, 1646), 'climlab.radiation.CAM3_LW', 'climlab.radiation.CAM3_LW', ([], {'state': 'state'}), '(state=state)\n', (1633, 1646), False, 'import climlab\n'), ((4620, 4645), 'numpy.abs', 'np.abs', (['(rcm.ASR - rcm.OLR)'], {}), '(rcm.ASR - rcm.OLR)\n', (4626, 4645), True, 'import numpy as np\n'), ((6789, 6808), 'numpy.diff', 'np.diff', (['lev.bounds'], {}), '(lev.bounds)\n', (6796, 6808), True, 'import numpy as np\n'), ((2866, 2912), 'numpy.exp', 'np.exp', (['(-(lev - lev[i]) ** 2 / (2 * 25.0) ** 2)'], {}), '(-(lev - lev[i]) ** 2 / (2 * 25.0) ** 2)\n', (2872, 2912), True, 'import numpy as np\n'), ((2926, 2951), 'numpy.zeros_like', 'np.zeros_like', (['state.Tatm'], {}), '(state.Tatm)\n', (2939, 2951), True, 'import numpy as np\n'), ((2984, 3009), 'numpy.zeros_like', 'np.zeros_like', (['state.Tatm'], {}), '(state.Tatm)\n', (2997, 3009), True, 'import numpy as np\n'), ((6058, 6087), 'numpy.abs', 'np.abs', (['(model.ASR - model.OLR)'], {}), '(model.ASR - model.OLR)\n', (6064, 6087), True, 'import numpy as np\n'), ((6743, 6762), 'numpy.diff', 'np.diff', (['lev.bounds'], {}), '(lev.bounds)\n', (6750, 6762), True, 'import numpy as np\n'), ((477, 501), 'climlab.radiation.rrtm._climlab_to_rrtm', '_climlab_to_rrtm', (['rad.Ts'], {}), '(rad.Ts)\n', (493, 501), False, 'from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab\n'), ((549, 575), 'climlab.radiation.rrtm._climlab_to_rrtm', '_climlab_to_rrtm', (['rad.Tatm'], {}), '(rad.Tatm)\n', (565, 575), False, 'from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab\n'), ((1964, 1988), 'climlab.radiation.rrtm._climlab_to_rrtm', '_climlab_to_rrtm', (['rad.Ts'], {}), '(rad.Ts)\n', (1980, 1988), False, 'from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab\n'), ((2036, 2062), 'climlab.radiation.rrtm._climlab_to_rrtm', '_climlab_to_rrtm', (['rad.Tatm'], {}), '(rad.Tatm)\n', (2052, 2062), False, 'from climlab.radiation.rrtm import _climlab_to_rrtm, _rrtm_to_climlab\n')] |
"""
Low level interface to the SBIG Unversal Driver/Library.
Reproduces in Python (using ctypes) the C interface provided by SBIG's shared
library, i.e. 1 function that does 72 different things selected by passing an
integer as the first argument. This is basically a direct translation of the
enums and structs defined in the library C-header to Python dicts and
ctypes.Structures, plus a class (SBIGDriver) to load the library
and call the single command function (SBIGDriver._send_command()).
"""
import platform
import ctypes
from ctypes.util import find_library
import _ctypes
import os
import time
from threading import Timer, Lock
import numpy as np
from numpy.ctypeslib import as_ctypes
from astropy import units as u
from astropy.io import fits
from astropy.time import Time
from .. import PanBase
################################################################################
# Main SBIGDriver class
################################################################################
class SBIGDriver(PanBase):
def __init__(self, library_path=False, *args, **kwargs):
"""
Main class representing the SBIG Universal Driver/Library interface.
On construction loads SBIG's shared library which must have already
been installed (see http://archive.sbig.com/sbwhtmls/devsw.htm). The
name and location of the shared library can be manually specified with
the library_path argument, otherwise the ctypes.util.find_library function
will be used to locate it.
Args:
library_path (string, optional): shared library path,
e.g. '/usr/local/lib/libsbigudrv.so'
Returns:
`~pocs.camera.sbigudrv.SBIGDriver`
"""
super().__init__(*args, **kwargs)
# Open library
self.logger.debug('Opening SBIGUDrv library')
if not library_path:
library_path = find_library('sbigudrv')
if library_path is None:
self.logger.error('Could not find SBIG Universal Driver/Library!')
raise RuntimeError('Could not find SBIG Universal Driver/Library!')
# This CDLL loader will raise OSError if the library could not be loaded
self._CDLL = ctypes.CDLL(library_path)
# Open driver
self.logger.debug('Opening SBIGUDrv driver')
self._send_command('CC_OPEN_DRIVER')
# Query USB bus for connected cameras, store basic camera info.
self.logger.debug('Searching for connected SBIG cameras')
self._camera_info = QueryUSBResults2()
self._send_command('CC_QUERY_USB2', results=self._camera_info)
self._send_command('CC_CLOSE_DRIVER')
# Connect to each camera in turn, obtain its 'handle' and and store.
self._handles = []
for i in range(self._camera_info.camerasFound):
self._send_command('CC_OPEN_DRIVER')
odp = OpenDeviceParams(device_type_codes['DEV_USB{}'.format(i + 1)],
0, 0)
self._send_command('CC_OPEN_DEVICE', params=odp)
elp = EstablishLinkParams()
elr = EstablishLinkResults()
self._send_command('CC_ESTABLISH_LINK', params=elp, results=elr)
ghr = GetDriverHandleResults()
self._send_command('CC_GET_DRIVER_HANDLE', results=ghr)
self._handles.append(ghr.handle)
# This seems to have the side effect of closing both device and
# driver.
shp = SetDriverHandleParams(INVALID_HANDLE_VALUE)
self._send_command('CC_SET_DRIVER_HANDLE', params=shp)
# Prepare to keep track of which handles have been assigned to Camera objects
self._handle_assigned = [False] * len(self._handles)
self._ccd_info = {}
# Create a Lock that will used to prevent simultaneous commands from multiple
# cameras. Main reason for this is preventing overlapping readouts.
self._command_lock = Lock()
# Reopen driver ready for next command
self._send_command('CC_OPEN_DRIVER')
self.logger.info('\t\t\t SBIGDriver initialised: found {} cameras'.format(self._camera_info.camerasFound))
def __del__(self):
self.logger.debug('Closing SBIGUDrv driver')
# Using Set Handle to do this should ensure that both device and driver are closed
# regardless of current state
shp = SetDriverHandleParams(INVALID_HANDLE_VALUE)
self._send_command('CC_SET_DRIVER_HANDLE', params=shp)
# Vain attempt to unload the shared library
self.logger.debug('Closing SBIGUDrv library')
_ctypes.dlclose(self._CDLL._handle)
del self._CDLL
def assign_handle(self, serial=None):
"""
Returns the next unassigned camera handle, along with basic info on the coresponding camera.
If passed a serial number will attempt to assign the handle corresponding to a camera
with that serial number, raising an error if one is not available.
"""
if serial:
# Look for a connected, unassigned camera with matching serial number
# List of serial numbers
serials = [str(self._camera_info.usbInfo[i].serialNumber, encoding='ascii')
for i in range(self._camera_info.camerasFound)]
if serial not in serials:
# Camera we're looking for is not connected!
self.logger.error('SBIG camera serial number {} not connected!'.format(serial))
return (INVALID_HANDLE_VALUE, None)
index = serials.index(serial)
if self._handle_assigned[index]:
# Camera we're looking for has already been assigned!
self.logger.error('SBIG camera serial number {} already assigned!'.format(serial))
return (INVALID_HANDLE_VALUE, None)
else:
# No serial number specified, just take the first unassigned handle
try:
index = self._handle_assigned.index(False)
except ValueError:
# All handles already assigned, must be trying to intialising more cameras than are connected.
self.logger.error('No connected SBIG cameras available!')
return (INVALID_HANDLE_VALUE, None)
handle = self._handles[index]
self.logger.debug('Assigning handle {} to SBIG camera'.format(handle))
self._handle_assigned[index] = True
# Get all the information from the camera
self.logger.debug('Obtaining SBIG camera info from {}'.format(handle))
ccd_info = self._get_ccd_info(handle)
# Serial number, name and type should match with those from Query USB Info obtained earlier
camera_serial = str(self._camera_info.usbInfo[index].serialNumber, encoding='ascii')
assert camera_serial == ccd_info['serial_number'], self.logger.error('Serial number mismatch!')
# Keep camera info.
self._ccd_info[handle] = ccd_info
# Stop camera from skipping lowering of Vdd for exposures of 3 seconds of less
self._disable_vdd_optimized(handle)
# Return both a handle and the dictionary of camera info
return (handle, ccd_info)
def query_temp_status(self, handle):
query_temp_params = QueryTemperatureStatusParams(temp_status_request_codes['TEMP_STATUS_ADVANCED2'])
query_temp_results = QueryTemperatureStatusResults2()
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_QUERY_TEMPERATURE_STATUS', query_temp_params, query_temp_results)
return query_temp_results
def set_temp_regulation(self, handle, set_point):
if set_point is not None:
# Passed a value as set_point, turn on cooling.
enable_code = temperature_regulation_codes['REGULATION_ON']
if isinstance(set_point, u.Quantity):
set_point = set_point.to(u.Celsius).value
else:
# Passed None as set_point, turn off cooling and reset
# set point to +25 C
enable_code = temperature_regulation_codes['REGULATION_OFF']
set_point = 25.0
set_temp_params = SetTemperatureRegulationParams2(enable_code, set_point)
# Use temperature regulation autofreeze, if available (might marginally reduce read noise).
autofreeze_code = temperature_regulation_codes['REGULATION_ENABLE_AUTOFREEZE']
set_freeze_params = SetTemperatureRegulationParams2(autofreeze_code, set_point)
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_SET_TEMPERATURE_REGULATION2', params=set_temp_params)
self._send_command('CC_SET_TEMPERATURE_REGULATION2', params=set_freeze_params)
def take_exposure(self, handle, seconds, filename, exposure_event=None, dark=False):
"""
Starts an exposure and spawns thread that will perform readout and write
to file when the exposure is complete.
"""
ccd_info = self._ccd_info[handle]
# SBIG driver expects exposure time in 100ths of a second.
if isinstance(seconds, u.Quantity):
seconds = seconds.to(u.second).value
centiseconds = int(seconds * 100)
# This setting is ignored by most cameras (even if they do have ABG), only exceptions are the TC211 versions
# of the Tracking CCD on the ST-7/8/etc. and the Imaging CCD of the PixCel255
if ccd_info['imaging_ABG']:
# Camera supports anti-blooming, use it on medium setting?
abg_command_code = abg_state_codes['ABG_CLK_MED7']
else:
# Camera doesn't support anti-blooming, don't try to use it.
abg_command_code = abg_state_codes['ABG_LOW7']
if not dark:
# Normal exposure, will open (and close) shutter
shutter_command_code = shutter_command_codes['SC_OPEN_SHUTTER']
else:
# Dark frame, will keep shutter closed throughout
shutter_command_code = shutter_command_codes['SC_CLOSE_SHUTTER']
# TODO: implement control of binning readout modes.
# For now use standard unbinned mode.
readout_mode = 'RM_1X1'
readout_mode_code = readout_mode_codes[readout_mode]
# TODO: implement windowed readout.
# For now use full image size for unbinned mode.
top = 0
left = 0
height = int(ccd_info['readout_modes'][readout_mode]['height'].value)
width = int(ccd_info['readout_modes'][readout_mode]['width'].value)
start_exposure_params = StartExposureParams2(ccd_codes['CCD_IMAGING'],
centiseconds,
abg_command_code,
shutter_command_code,
readout_mode_code,
top, left,
height, width)
# Make sure there isn't already an exposure in progress on this camera.
# If there is then we need to wait otherwise we'll cause a hang.
# Could do this with Locks but it's more robust to directly query the hardware.
query_status_params = QueryCommandStatusParams(command_codes['CC_START_EXPOSURE2'])
query_status_results = QueryCommandStatusResults()
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_QUERY_COMMAND_STATUS', params=query_status_params, results=query_status_results)
if query_status_results.status != status_codes['CS_IDLE']:
self.logger.warning('Attempt to start exposure on {} while camera busy!'.format(handle))
# Wait until camera is idle
while query_status_results.status != status_codes['CS_IDLE']:
self.logger.warning('Waiting for exposure on {} to complete'.format(handle))
time.sleep(1)
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_QUERY_COMMAND_STATUS',
params=query_status_params,
results=query_status_results)
# Assemble basic FITS header
temp_status = self.query_temp_status(handle)
if temp_status.coolingEnabled:
if abs(temp_status.imagingCCDTemperature - temp_status.ccdSetpoint) > 0.5 or \
temp_status.imagingCCDPower == 100.0:
self.logger.warning('Unstable CCD temperature in {}'.format(handle))
time_now = Time.now()
header = fits.Header()
header.set('INSTRUME', self._ccd_info[handle]['serial_number'])
header.set('DATE-OBS', time_now.fits)
header.set('EXPTIME', seconds)
header.set('CCD-TEMP', temp_status.imagingCCDTemperature)
header.set('SET-TEMP', temp_status.ccdSetpoint)
header.set('EGAIN', self._ccd_info[handle]['readout_modes'][readout_mode]['gain'].value)
header.set('XPIXSZ', self._ccd_info[handle]['readout_modes'][readout_mode]['pixel_width'].value)
header.set('YPIXSZ', self._ccd_info[handle]['readout_modes'][readout_mode]['pixel_height'].value)
if dark:
header.set('IMAGETYP', 'Dark Frame')
else:
header.set('IMAGETYP', 'Light Frame')
# Start exposure
self.logger.debug('Starting {} second exposure on {}'.format(seconds, handle))
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_START_EXPOSURE2', params=start_exposure_params)
# Use a Timer to schedule the exposure readout and return a reference to the Timer.
wait = seconds - 0.1 if seconds > 0.1 else 0.0
readout_args = (handle, centiseconds, filename, readout_mode_code,
top, left, height, width,
header, exposure_event)
readout_thread = Timer(interval=wait,
function=self._readout,
args=readout_args)
readout_thread.start()
return readout_thread
# Private methods
def _readout(self, handle, centiseconds, filename, readout_mode_code,
top, left, height, width,
header, exposure_event=None):
"""
"""
# Set up all the parameter and result Structures that will be needed.
end_exposure_params = EndExposureParams(ccd_codes['CCD_IMAGING'])
start_readout_params = StartReadoutParams(ccd_codes['CCD_IMAGING'],
readout_mode_code,
top, left,
height, width)
query_status_params = QueryCommandStatusParams(command_codes['CC_START_EXPOSURE2'])
query_status_results = QueryCommandStatusResults()
readout_line_params = ReadoutLineParams(ccd_codes['CCD_IMAGING'],
readout_mode_code,
left, width)
end_readout_params = EndReadoutParams(ccd_codes['CCD_IMAGING'])
# Array to hold the image data
image_data = np.zeros((height, width), dtype=np.uint16)
# Check for the end of the exposure.
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_QUERY_COMMAND_STATUS', params=query_status_params, results=query_status_results)
# Poll if needed.
while query_status_results.status != status_codes['CS_INTEGRATION_COMPLETE']:
self.logger.debug('Waiting for exposure on {} to complete'.format(handle))
time.sleep(0.1)
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_QUERY_COMMAND_STATUS', params=query_status_params, results=query_status_results)
self.logger.debug('Exposure on {} complete'.format(handle))
# Readout data
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_END_EXPOSURE', params=end_exposure_params)
self._send_command('CC_START_READOUT', params=start_readout_params)
for i in range(height):
self._send_command('CC_READOUT_LINE', params=readout_line_params, results=as_ctypes(image_data[i]))
self._send_command('CC_END_READOUT', params=end_readout_params)
self.logger.debug('Readout on {} complete'.format(handle))
# Write to FITS file. Includes basic headers directly related to the camera only.
hdu = fits.PrimaryHDU(image_data, header=header)
# Create the images directory if it doesn't already exist
if os.path.dirname(filename):
os.makedirs(os.path.dirname(filename), mode=0o775, exist_ok=True)
hdu.writeto(filename)
self.logger.debug('Image written to {}'.format(filename))
# Use Event to notify that exposure has completed.
if exposure_event:
exposure_event.set()
def _get_ccd_info(self, handle):
"""
Use Get CCD Info to gather all relevant info about CCD capabilities. Already
have camera type, 'name' and serial number, this gets the rest.
"""
# 'CCD_INFO_IMAGING' will get firmware version, and a list of readout modes (binning)
# with corresponding image widths, heights, gains and also physical pixel width, height.
ccd_info_params0 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_IMAGING'])
ccd_info_results0 = GetCCDInfoResults0()
# 'CCD_INFO_EXTENDED' will get bad column info, and whether the CCD has ABG or not.
ccd_info_params2 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED'])
ccd_info_results2 = GetCCDInfoResults2()
# 'CCD_INFO_EXTENDED2_IMAGING' will info like full frame/frame transfer, interline or not,
# presence of internal frame buffer, etc.
ccd_info_params4 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED2_IMAGING'])
ccd_info_results4 = GetCCDInfoResults4()
# 'CCD_INFO_EXTENDED3' will get info like mechanical shutter or not, mono/colour, Bayer/Truesense.
ccd_info_params6 = GetCCDInfoParams(ccd_info_request_codes['CCD_INFO_EXTENDED3'])
ccd_info_results6 = GetCCDInfoResults6()
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_GET_CCD_INFO', params=ccd_info_params0, results=ccd_info_results0)
self._send_command('CC_GET_CCD_INFO', params=ccd_info_params2, results=ccd_info_results2)
self._send_command('CC_GET_CCD_INFO', params=ccd_info_params4, results=ccd_info_results4)
self._send_command('CC_GET_CCD_INFO', params=ccd_info_params6, results=ccd_info_results6)
# Now to convert all this ctypes stuff into Pythonic data structures.
ccd_info = {'firmware_version': self._bcd_to_string(ccd_info_results0.firmwareVersion),
'camera_type': camera_types[ccd_info_results0.cameraType],
'camera_name': str(ccd_info_results0.name, encoding='ascii'),
'bad_columns': ccd_info_results2.columns[0:ccd_info_results2.badColumns],
'imaging_ABG': bool(ccd_info_results2.imagingABG),
'serial_number': str(ccd_info_results2.serialNumber, encoding='ascii'),
'frame_transfer': bool(ccd_info_results4.capabilities_b0),
'electronic_shutter': bool(ccd_info_results4.capabilities_b1),
'remote_guide_head_support': bool(ccd_info_results4.capabilities_b2),
'Biorad_TDI_support': bool(ccd_info_results4.capabilities_b3),
'AO8': bool(ccd_info_results4.capabilities_b4),
'frame_buffer': bool(ccd_info_results4.capabilities_b5),
'dump_extra': ccd_info_results4.dumpExtra,
'STXL': bool(ccd_info_results6.camera_b0),
'mechanical_shutter': not bool(ccd_info_results6.camera_b1),
'colour': bool(ccd_info_results6.ccd_b0),
'Truesense': bool(ccd_info_results6.ccd_b1)}
readout_mode_info = self._parse_readout_info(ccd_info_results0.readoutInfo[0:ccd_info_results0.readoutModes])
ccd_info['readout_modes'] = readout_mode_info
return ccd_info
def _bcd_to_int(self, bcd, int_type='ushort'):
"""
Function to decode the Binary Coded Decimals returned by the Get CCD Info command.
These will be integers of C types ushort or ulong, encoding decimal numbers of the form
XX.XX or XXXXXX.XX, i.e. when converting to a numerical value they will need dividing by
100.
"""
# BCD has been automatically converted by ctypes to a Python int. Need to convert to
# bytes sequence of correct length and byte order. SBIG library seems to use
# big endian byte order for the BCDs regardless of platform.
if int_type == 'ushort':
bcd = bcd.to_bytes(ctypes.sizeof(ctypes.c_ushort), byteorder='big')
elif int_type == 'ulong':
bcd = bcd.to_bytes(ctypes.sizeof(ctypes.c_ulong), byteorder='big')
else:
self.logger.error('Unknown integer type {}!'.format(int_type))
return
# Convert bytes sequence to hexadecimal string representation, which will also be the
# string representation of the decoded binary coded decimal, apart from possible
# leading zeros. Convert back to an int to strip the leading zeros.
return int(bcd.hex())
def _bcd_to_float(self, bcd, int_type='ushort'):
# Includes conversion to intended numerical value, i.e. division by 100
return self._bcd_to_int(bcd, int_type) / 100.0
def _bcd_to_string(self, bcd, int_type='ushort'):
# Includes conversion to intended numerical value, i.e. division by 100
s = str(self._bcd_to_int(bcd, int_type))
return "{}.{}".format(s[:-2], s[-2:])
def _parse_readout_info(self, infos):
readout_mode_info = {}
for info in infos:
mode = readout_modes[info.mode]
gain = self._bcd_to_float(info.gain)
pixel_width = self._bcd_to_float(info.pixelWidth, int_type='ulong')
pixel_height = self._bcd_to_float(info.pixelHeight, int_type='ulong')
readout_mode_info[mode] = {'width': info.width * u.pixel,
'height': info.height * u.pixel,
'gain': gain * u.electron / u.adu,
'pixel_width': pixel_width * u.um,
'pixel_height': pixel_height * u.um}
return readout_mode_info
def _disable_vdd_optimized(self, handle):
"""
There are many driver control parameters, almost all of which we would not want to change from their default
values. The one exception is DCP_VDD_OPTIMIZED. From the SBIG manual:
The DCP_VDD_OPTIMIZED parameter defaults to TRUE which lowers the CCD’s Vdd (which reduces amplifier glow)
only for images 3 seconds and longer. This was done to increase the image throughput for short exposures as
raising and lowering Vdd takes 100s of milliseconds. The lowering and subsequent raising of Vdd delays the
image readout slightly which causes short exposures to have a different bias structure than long exposures.
Setting this parameter to FALSE stops the short exposure optimization from occurring.
The default behaviour will improve image throughput for exposure times of 3 seconds or less but at the penalty
of altering the bias structure between short and long exposures. This could cause systematic errors in bias
frames, dark current measurements, etc. It's probably not worth it.
"""
set_driver_control_params = SetDriverControlParams(driver_control_codes['DCP_VDD_OPTIMIZED'], 0)
self.logger.debug('Disabling DCP_VDD_OPTIMIZE on {}'.format(handle))
with self._command_lock:
self._set_handle(handle)
self._send_command('CC_SET_DRIVER_CONTROL', params=set_driver_control_params)
def _set_handle(self, handle):
set_handle_params = SetDriverHandleParams(handle)
self._send_command('CC_SET_DRIVER_HANDLE', params=set_handle_params)
def _send_command(self, command, params=None, results=None):
"""
Function for sending a command to the SBIG Universal Driver/Library.
Args:
command (string): Name of command to send
params (ctypes.Structure, optional): Subclass of Structure
containing command parameters
results (ctypes.Structure, optional): Subclass of Structure to
store command results
Returns:
int: return code from SBIG driver
Raises:
KeyError: Raised if command not in SBIG command list
RuntimeError: Raised if return code indicates a fatal error, or is
not recognised
"""
# Look up integer command code for the given command string, raises
# KeyError if no matches found.
try:
command_code = command_codes[command]
except KeyError:
raise KeyError("Invalid SBIG command '{}'!".format(command))
# Send the command to the driver. Need to pass pointers to params,
# results structs or None (which gets converted to a null pointer).
return_code = self._CDLL.SBIGUnivDrvCommand(command_code,
(ctypes.byref(params) if params else None),
(ctypes.byref(results) if results else None))
# Look up the error message for the return code, raises Error is no
# match found.
try:
error = errors[return_code]
except KeyError:
raise RuntimeError("SBIG Driver returned unknown error code '{}'".format(return_code))
# Raise a RuntimeError exception if return code is not 0 (no error).
# This is probably excessively cautious and will need to be relaxed,
# there are likely to be situations where other return codes don't
# necessarily indicate a fatal error.
if error != 'CE_NO_ERROR':
raise RuntimeError("SBIG Driver returned error '{}'!".format(error))
return error
#################################################################################
# Commands and error messages
#################################################################################
# Camera command codes. Doesn't include the 'SBIG only" commands.
command_codes = {'CC_NULL': 0,
'CC_START_EXPOSURE': 1,
'CC_END_EXPOSURE': 2,
'CC_READOUT_LINE': 3,
'CC_DUMP_LINES': 4,
'CC_SET_TEMPERATURE_REGULATION': 5,
'CC_QUERY_TEMPERATURE_STATUS': 6,
'CC_ACTIVATE_RELAY': 7,
'CC_PULSE_OUT': 8,
'CC_ESTABLISH_LINK': 9,
'CC_GET_DRIVER_INFO': 10,
'CC_GET_CCD_INFO': 11,
'CC_QUERY_COMMAND_STATUS': 12,
'CC_MISCELLANEOUS_CONTROL': 13,
'CC_READ_SUBTRACT_LINE': 14,
'CC_UPDATE_CLOCK': 15,
'CC_READ_OFFSET': 16,
'CC_OPEN_DRIVER': 17,
'CC_CLOSE_DRIVER': 18,
'CC_TX_SERIAL_BYTES': 19,
'CC_GET_SERIAL_STATUS': 20,
'CC_AO_TIP_TILT': 21,
'CC_AO_SET_FOCUS': 22,
'CC_AO_DELAY': 23,
'CC_GET_TURBO_STATUS': 24,
'CC_END_READOUT': 25,
'CC_GET_US_TIMER': 26,
'CC_OPEN_DEVICE': 27,
'CC_CLOSE_DEVICE': 28,
'CC_SET_IRQL': 29,
'CC_GET_IRQL': 30,
'CC_GET_LINE': 31,
'CC_GET_LINK_STATUS': 32,
'CC_GET_DRIVER_HANDLE': 33,
'CC_SET_DRIVER_HANDLE': 34,
'CC_START_READOUT': 35,
'CC_GET_ERROR_STRING': 36,
'CC_SET_DRIVER_CONTROL': 37,
'CC_GET_DRIVER_CONTROL': 38,
'CC_USB_AD_CONTROL': 39,
'CC_QUERY_USB': 40,
'CC_GET_PENTIUM_CYCLE_COUNT': 41,
'CC_RW_USB_I2C': 42,
'CC_CFW': 43,
'CC_BIT_IO': 44,
'CC_USER_EEPROM': 45,
'CC_AO_CENTER': 46,
'CC_BTDI_SETUP': 47,
'CC_MOTOR_FOCUS': 48,
'CC_QUERY_ETHERNET': 49,
'CC_START_EXPOSURE2': 50,
'CC_SET_TEMPERATURE_REGULATION2': 51,
'CC_READ_OFFSET2': 52,
'CC_DIFF_GUIDER': 53,
'CC_COLUMN_EEPROM': 54,
'CC_CUSTOMER_OPTIONS': 55,
'CC_DEBUG_LOG': 56,
'CC_QUERY_USB2': 57,
'CC_QUERY_ETHERNET2': 58}
# Reversed dictionary, just in case you ever need to look up a command given a
# command code.
commands = {code: command for command, code in command_codes.items()}
# Camera error messages
errors = {0: 'CE_NO_ERROR',
1: 'CE_CAMERA_NOT_FOUND',
2: 'CE_EXPOSURE_IN_PROGRESS',
3: 'CE_NO_EXPOSURE_IN_PROGRESS',
4: 'CE_UNKNOWN_COMMAND',
5: 'CE_BAD_CAMERA_COMMAND',
6: 'CE_BAD_PARAMETER',
7: 'CE_TX_TIMEOUT',
8: 'CE_RX_TIMEOUT',
9: 'CE_NAK_RECEIVED',
10: 'CE_CAN_RECEIVED',
11: 'CE_UNKNOWN_RESPONSE',
12: 'CE_BAD_LENGTH',
13: 'CE_AD_TIMEOUT',
14: 'CE_KBD_ESC',
15: 'CE_CHECKSUM_ERROR',
16: 'CE_EEPROM_ERROR',
17: 'CE_SHUTTER_ERROR',
18: 'CE_UNKNOWN_CAMERA',
19: 'CE_DRIVER_NOT_FOUND',
20: 'CE_DRIVER_NOT_OPEN',
21: 'CE_DRIVER_NOT_CLOSED',
22: 'CE_SHARE_ERROR',
23: 'CE_TCE_NOT_FOUND',
24: 'CE_AO_ERROR',
25: 'CE_ECP_ERROR',
26: 'CE_MEMORY_ERROR',
27: 'CE_DEVICE_NOT_FOUND',
28: 'CE_DEVICE_NOT_OPEN',
29: 'CE_DEVICE_NOT_CLOSED',
30: 'CE_DEVICE_NOT_IMPLEMENTED',
31: 'CE_DEVICE_DISABLED',
32: 'CE_OS_ERROR',
33: 'CE_SOCK_ERROR',
34: 'CE_SERVER_NOT_FOUND',
35: 'CE_CFW_ERROR',
36: 'CE_MF_ERROR',
37: 'CE_FIRMWARE_ERROR',
38: 'CE_DIFF_GUIDER_ERROR',
39: 'CE_RIPPLE_CORRECTION_ERROR',
40: 'CE_EZUSB_RESET',
41: 'CE_NEXT_ERROR'}
# Reverse dictionary, just in case you ever need to look up an error code given
# an error name
error_codes = {error: error_code for error_code, error in errors.items()}
#################################################################################
# Query USB Info related.
#################################################################################
class QueryUSBInfo(ctypes.Structure):
"""
ctypes (Sub-)Structure used to hold details of individual cameras returned
by 'CC_QUERY_USB' command
"""
# Rather than use C99 _Bool type SBIG library uses 0 = False, 1 = True
_fields_ = [('cameraFound', ctypes.c_ushort),
('cameraType', ctypes.c_ushort),
('name', ctypes.c_char * 64),
('serialNumber', ctypes.c_char * 10)]
class QueryUSBResults(ctypes.Structure):
"""
ctypes Structure used to hold the results from 'CC_QUERY_USB' command
"""
_fields_ = [('camerasFound', ctypes.c_ushort),
('usbInfo', QueryUSBInfo * 4)]
class QueryUSBResults2(ctypes.Structure):
"""
ctypes Structure used to hold the results from 'CC_QUERY_USB2' command
"""
_fields_ = [('camerasFound', ctypes.c_ushort),
('usbInfo', QueryUSBInfo * 8)]
# Camera type codes, returned by Query USB Info, Establish Link, Get CCD Info, etc.
camera_types = {4: "ST7_CAMERA",
5: "ST8_CAMERA",
6: "ST5C_CAMERA",
7: "TCE_CONTROLLER",
8: "ST237_CAMERA",
9: "STK_CAMERA",
10: "ST9_CAMERA",
11: "STV_CAMERA",
12: "ST10_CAMERA",
13: "ST1K_CAMERA",
14: "ST2K_CAMERA",
15: "STL_CAMERA",
16: "ST402_CAMERA",
17: "STX_CAMERA",
18: "ST4K_CAMERA",
19: "STT_CAMERA",
20: "STI_CAMERA",
21: "STF_CAMERA",
22: "NEXT_CAMERA",
0xFFFF: "NO_CAMERA"}
# Reverse dictionary
camera_type_codes = {camera: code for code, camera in camera_types.items()}
#################################################################################
# Open Device, Establish Link, Get Link status related
#################################################################################
# Device types by code. Used with Open Device, etc.
device_types = {0: "DEV_NONE",
1: "DEV_LPT1",
2: "DEV_LPT2",
3: "DEV_LPT3",
0x7F00: "DEV_USB",
0x7F01: "DEV_ETH",
0x7F02: "DEV_USB1",
0x7F03: "DEV_USB2",
0x7F04: "DEV_USB3",
0x7F05: "DEV_USB4",
0x7F06: "DEV_USB5",
0x7F07: "DEV_USB6",
0x7F08: "DEV_USB7",
0x7F09: "DEV_USB8"}
# Reverse dictionary
device_type_codes = {device: code for code, device in device_types.items()}
class OpenDeviceParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Open Device command.
"""
_fields_ = [('deviceType', ctypes.c_ushort),
('lptBaseAddress', ctypes.c_ushort),
('ipAddress', ctypes.c_ulong)]
class EstablishLinkParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Establish Link command.
"""
_fields_ = [('sbigUseOnly', ctypes.c_ushort)]
class EstablishLinkResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Establish Link command.
"""
_fields_ = [('cameraType', ctypes.c_ushort)]
class GetLinkStatusResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get Link Status command.
"""
_fields_ = [('linkEstablished', ctypes.c_ushort),
('baseAddress', ctypes.c_ushort),
('cameraType', ctypes.c_ushort),
('comTotal', ctypes.c_ulong),
('comFailed', ctypes.c_ulong)]
#################################################################################
# Get Driver Handle, Set Driver Handle related
#################################################################################
class GetDriverHandleResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get Driver Handle command.
The handle is the camera ID used when switching control between connected
cameras with the Set Driver Handle command.
"""
_fields_ = [('handle', ctypes.c_short)]
# Used to disconnect from a camera in order to get the handle for another
# Had to google to find this value, it is NOT in sbigudrv.h or the
# SBIG Universal Driver docs.
INVALID_HANDLE_VALUE = -1
class SetDriverHandleParams(ctypes.Structure):
"""
ctypes Structure to hold the parameter for the Set Driver Handle command.
"""
_fields_ = [('handle', ctypes.c_short)]
#################################################################################
# Temperature and cooling control related
#################################################################################
class QueryTemperatureStatusParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the
Query Temperature Status command.
"""
_fields_ = [('request', ctypes.c_ushort)]
temp_status_requests = {0: 'TEMP_STATUS_STANDARD',
1: 'TEMP_STATUS_ADVANCED',
2: 'TEMP_STATUS_ADVANCED2'}
temp_status_request_codes = {request: code for code, request in temp_status_requests.items()}
class QueryTemperatureStatusResults(ctypes.Structure):
"""
ctypes Structure used to hold the results from the Query Temperature Status
command (standard version).
"""
_fields_ = [('enabled', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_ushort),
('power', ctypes.c_ushort),
('ccdThermistor', ctypes.c_ushort),
('ambientThermistor', ctypes.c_ushort)]
class QueryTemperatureStatusResults2(ctypes.Structure):
"""
ctypes Structure used to hold the results from the Query Temperature Status
command (extended version).
"""
_fields_ = [('coolingEnabled', ctypes.c_ushort),
('fanEnabled', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_double),
('imagingCCDTemperature', ctypes.c_double),
('trackingCCDTemperature', ctypes.c_double),
('externalTrackingCCDTemperature', ctypes.c_double),
('ambientTemperature', ctypes.c_double),
('imagingCCDPower', ctypes.c_double),
('trackingCCDPower', ctypes.c_double),
('externalTrackingCCDPower', ctypes.c_double),
('heatsinkTemperature', ctypes.c_double),
('fanPower', ctypes.c_double),
('fanSpeed', ctypes.c_double),
('trackingCCDSetpoint', ctypes.c_double)]
temperature_regulations = {0: "REGULATION_OFF",
1: "REGULATION_ON",
2: "REGULATION_OVERRIDE",
3: "REGULATION_FREEZE",
4: "REGULATION_UNFREEZE",
5: "REGULATION_ENABLE_AUTOFREEZE",
6: "REGULATION_DISABLE_AUTOFREEZE"}
temperature_regulation_codes = {regulation: code for code, regulation in temperature_regulations.items()}
class SetTemperatureRegulationParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the
Set Temperature Regulation command.
"""
_fields_ = [('regulation', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_ushort)]
class SetTemperatureRegulationParams2(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the
Set Temperature Regulation 2 command.
"""
_fields_ = [('regulation', ctypes.c_ushort),
('ccdSetpoint', ctypes.c_double)]
################################################################################
# Get CCD Info related
################################################################################
class GetCCDInfoParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Get CCD Info command,
used obtain the details & capabilities of the connected camera.
"""
_fields_ = [('request', ctypes.c_ushort)]
ccd_info_requests = {0: 'CCD_INFO_IMAGING',
1: 'CCD_INFO_TRACKING',
2: 'CCD_INFO_EXTENDED',
3: 'CCD_INFO_EXTENDED_5C',
4: 'CCD_INFO_EXTENDED2_IMAGING',
5: 'CCD_INFO_EXTENDED2_TRACKING',
6: 'CCD_INFO_EXTENDED3'}
ccd_info_request_codes = {request: code for code, request in ccd_info_requests.items()}
class ReadoutInfo(ctypes.Structure):
"""
ctypes Structure to store details of an individual readout mode. An array of up
to 20 of these will be returned as part of the GetCCDInfoResults0 struct when the
Get CCD Info command is used with request 'CCD_INFO_IMAGING'.
The gain field is a 4 digit Binary Coded Decimal (yes, really) of the form XX.XX,
in units of electrons/ADU.
The pixel_width and pixel_height fields are 6 digit Binary Coded Decimals for the
form XXXXXX.XX in units of microns, helpfully supporting pixels up to 1 metre across.
"""
_fields_ = [('mode', ctypes.c_ushort),
('width', ctypes.c_ushort),
('height', ctypes.c_ushort),
('gain', ctypes.c_ushort),
('pixelWidth', ctypes.c_ulong),
('pixelHeight', ctypes.c_ulong)]
class GetCCDInfoResults0(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
requests 'CCD_INFO_IMAGING' or 'CCD_INFO_TRACKING'.
The firmwareVersion field is 4 digit binary coded decimal of the form XX.XX.
"""
_fields_ = [('firmwareVersion', ctypes.c_ushort),
('cameraType', ctypes.c_ushort),
('name', ctypes.c_char * 64),
('readoutModes', ctypes.c_ushort),
('readoutInfo', ReadoutInfo * 20)]
class GetCCDInfoResults2(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
request 'CCD_INFO_EXTENDED'.
"""
_fields_ = [('badColumns', ctypes.c_ushort),
('columns', ctypes.c_ushort * 4),
('imagingABG', ctypes.c_ushort),
('serialNumber', ctypes.c_char * 10)]
class GetCCDInfoResults4(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
requests 'CCD_INFO_EXTENDED2_IMAGING' or 'CCD_INFO_EXTENDED2_TRACKING'.
The capabilitiesBits is a bitmask, yay.
"""
_fields_ = [('capabilities_b0', ctypes.c_int, 1),
('capabilities_b1', ctypes.c_int, 1),
('capabilities_b2', ctypes.c_int, 1),
('capabilities_b3', ctypes.c_int, 1),
('capabilities_b4', ctypes.c_int, 1),
('capabilities_b5', ctypes.c_int, 1),
('capabilities_unusued', ctypes.c_int, ctypes.sizeof(ctypes.c_ushort) * 8 - 6),
('dumpExtra', ctypes.c_ushort)]
class GetCCDInfoResults6(ctypes.Structure):
"""
ctypes Structure to hold the results from the Get CCD Info command when used with
the request 'CCD_INFO_EXTENDED3'.
The sbigudrv.h C header says there should be three bitmask fields, each of type
ulong, which would be 64 bits on this platform (OS X), BUT trial and error has
determined they're actually 32 bits long.
"""
_fields_ = [('camera_b0', ctypes.c_int, 1),
('camera_b1', ctypes.c_int, 1),
('camera_unused', ctypes.c_int, 30),
('ccd_b0', ctypes.c_int, 1),
('ccd_b1', ctypes.c_int, 1),
('ccd_unused', ctypes.c_int, 30),
('extraBits', ctypes.c_int, 32)]
#################################################################################
# Get Driver Control, Set Driver Control related
#################################################################################
driver_control_params = {i: param for i, param in enumerate(('DCP_USB_FIFO_ENABLE',
'DCP_CALL_JOURNAL_ENABLE',
'DCP_IVTOH_RATIO',
'DCP_USB_FIFO_SIZE',
'DCP_USB_DRIVER',
'DCP_KAI_RELGAIN',
'DCP_USB_PIXEL_DL_ENABLE',
'DCP_HIGH_THROUGHPUT',
'DCP_VDD_OPTIMIZED',
'DCP_AUTO_AD_GAIN',
'DCP_NO_HCLKS_FOR_INTEGRATION',
'DCP_TDI_MODE_ENABLE',
'DCP_VERT_FLUSH_CONTROL_ENABLE',
'DCP_ETHERNET_PIPELINE_ENABLE',
'DCP_FAST_LINK',
'DCP_OVERSCAN_ROWSCOLS',
'DCP_PIXEL_PIPELINE_ENABLE',
'DCP_COLUMN_REPAIR_ENABLE',
'DCP_WARM_PIXEL_REPAIR_ENABLE',
'DCP_WARM_PIXEL_REPAIR_COUNT',
'DCP_LAST'))}
driver_control_codes = {param: code for code, param in driver_control_params.items()}
class GetDriverControlParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Get Driver Control command,
used to query the value of a specific driver control parameter.
"""
_fields_ = [('controlParameter', ctypes.c_ushort), ]
class GetDriverControlResults(ctypes.Structure):
"""
ctypes Structure to hold the result from the Get Driver Control command,
used to query the value of a specific driver control parameter
"""
_fields_ = [('controlValue', ctypes.c_ulong), ]
class SetDriverControlParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Set Driver Control command,
used to set the value of a specific driver control parameter
"""
_fields_ = [('controlParameter', ctypes.c_ushort),
('controlValue', ctypes.c_ulong)]
#################################################################################
# Start Exposure, Query Command Status, End Exposure related
#################################################################################
class StartExposureParams2(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Start Exposure 2 command.
(The Start Exposure command is deprecated.)
"""
_fields_ = [('ccd', ctypes.c_ushort),
('exposureTime', ctypes.c_ulong),
('abgState', ctypes.c_ushort),
('openShutter', ctypes.c_ushort),
('readoutMode', ctypes.c_ushort),
('top', ctypes.c_ushort),
('left', ctypes.c_ushort),
('height', ctypes.c_ushort),
('width', ctypes.c_ushort)]
# CCD selection for cameras with built in or connected tracking CCDs
ccds = {0: 'CCD_IMAGING',
1: 'CCD_TRACKING',
2: 'CCD_EXT_TRACKING'}
ccd_codes = {ccd: code for code, ccd in ccds.items()}
# Anti-Blooming Gate states
abg_states = {0: 'ABG_LOW7',
1: 'ABG_CLK_LOW7',
2: 'ABG_CLK_MED7',
3: 'ABG_CLK_HI7'}
abg_state_codes = {abg: code for code, abg in abg_states.items()}
# Shutter mode commands
shutter_commands = {0: 'SC_LEAVE_SHUTTER',
1: 'SC_OPEN_SHUTTER',
2: 'SC_CLOSE_SHUTTER',
3: 'SC_INITIALIZE_SHUTTER',
4: 'SC_OPEN_EXP_SHUTTER',
5: 'SC_CLOSE_EXT_SHUTTER'}
shutter_command_codes = {command: code for code, command in shutter_commands.items()}
# Readout binning modes
readout_modes = {0: 'RM_1X1',
1: 'RM_2X2',
2: 'RM_3X3',
3: 'RM_NX1',
4: 'RM_NX2',
5: 'RM_NX3',
6: 'RM_1X1_VOFFCHIP',
7: 'RM_2X2_VOFFCHIP',
8: 'RM_3X3_VOFFCHIP',
9: 'RM_9X9',
10: 'RM_NXN'}
readout_mode_codes = {mode: code for code, mode in readout_modes.items()}
# Command status codes and corresponding messages as returned by
# Query Command Status
statuses = {0: "CS_IDLE",
1: "CS_IN_PROGRESS",
2: "CS_INTEGRATING",
3: "CS_INTEGRATION_COMPLETE"}
# Reverse dictionary
status_codes = {status: code for code, status in statuses.items()}
class QueryCommandStatusParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Query Command Status
command.
"""
_fields_ = [('command', ctypes.c_ushort)]
class QueryCommandStatusResults(ctypes.Structure):
"""
ctypes Structure to hold the results from the Query Command Status command.
"""
_fields_ = [('status', ctypes.c_ushort)]
class EndExposureParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the End Exposure command.
"""
_fields_ = [('ccd', ctypes.c_ushort)]
#################################################################################
# Start Readout, Readout Line, End Readout related
#################################################################################
class StartReadoutParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Start Readout command.
"""
_fields_ = [('ccd', ctypes.c_ushort),
('readoutMode', ctypes.c_ushort),
('top', ctypes.c_ushort),
('left', ctypes.c_ushort),
('height', ctypes.c_ushort),
('width', ctypes.c_ushort)]
class ReadoutLineParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the Readout Line command.
"""
_fields_ = [('ccd', ctypes.c_ushort),
('readoutMode', ctypes.c_ushort),
('pixelStart', ctypes.c_ushort),
('pixelLength', ctypes.c_ushort)]
class EndReadoutParams(ctypes.Structure):
"""
ctypes Structure to hold the parameters for the End Readout Params.
"""
_fields_ = [('ccd', ctypes.c_ushort)]
#################################################################################
# Get Driver Info related
#################################################################################
# Requests relevant to Get Driver Info command
driver_requests = {0: "DRIVER_STD",
1: "DRIVER_EXTENDED",
2: "DRIVER_USB_LOADER"}
# Reverse dictionary
driver_request_codes = {request: code for code, request in driver_requests.items()}
class GetDriverInfoParams(ctypes.Structure):
"""
ctypes Structure used to hold the parameters for the Get Driver Info command
"""
_fields_ = [('request', ctypes.c_ushort)]
class GetDriverInfoResults0(ctypes.Structure):
"""
ctypes Structure used to hold the results from the Get Driver Info command
"""
_fields_ = [('version', ctypes.c_ushort),
('name', ctypes.c_char * 64),
('maxRequest', ctypes.c_ushort)]
| [
"ctypes.util.find_library",
"ctypes.byref",
"astropy.time.Time.now",
"astropy.io.fits.PrimaryHDU",
"threading.Lock",
"threading.Timer",
"time.sleep",
"os.path.dirname",
"numpy.zeros",
"ctypes.CDLL",
"astropy.io.fits.Header",
"numpy.ctypeslib.as_ctypes",
"_ctypes.dlclose",
"ctypes.sizeof"
] | [((2242, 2267), 'ctypes.CDLL', 'ctypes.CDLL', (['library_path'], {}), '(library_path)\n', (2253, 2267), False, 'import ctypes\n'), ((3999, 4005), 'threading.Lock', 'Lock', ([], {}), '()\n', (4003, 4005), False, 'from threading import Timer, Lock\n'), ((4656, 4691), '_ctypes.dlclose', '_ctypes.dlclose', (['self._CDLL._handle'], {}), '(self._CDLL._handle)\n', (4671, 4691), False, 'import _ctypes\n'), ((12820, 12830), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (12828, 12830), False, 'from astropy.time import Time\n'), ((12848, 12861), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (12859, 12861), False, 'from astropy.io import fits\n'), ((14191, 14254), 'threading.Timer', 'Timer', ([], {'interval': 'wait', 'function': 'self._readout', 'args': 'readout_args'}), '(interval=wait, function=self._readout, args=readout_args)\n', (14196, 14254), False, 'from threading import Timer, Lock\n'), ((15502, 15544), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint16'}), '((height, width), dtype=np.uint16)\n', (15510, 15544), True, 'import numpy as np\n'), ((16925, 16967), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['image_data'], {'header': 'header'}), '(image_data, header=header)\n', (16940, 16967), False, 'from astropy.io import fits\n'), ((17045, 17070), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (17060, 17070), False, 'import os\n'), ((1911, 1935), 'ctypes.util.find_library', 'find_library', (['"""sbigudrv"""'], {}), "('sbigudrv')\n", (1923, 1935), False, 'from ctypes.util import find_library\n'), ((15989, 16004), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (15999, 16004), False, 'import time\n'), ((12140, 12153), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12150, 12153), False, 'import time\n'), ((17096, 17121), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (17111, 17121), False, 'import os\n'), ((21456, 21486), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_ushort'], {}), '(ctypes.c_ushort)\n', (21469, 21486), False, 'import ctypes\n'), ((26203, 26223), 'ctypes.byref', 'ctypes.byref', (['params'], {}), '(params)\n', (26215, 26223), False, 'import ctypes\n'), ((26299, 26320), 'ctypes.byref', 'ctypes.byref', (['results'], {}), '(results)\n', (26311, 26320), False, 'import ctypes\n'), ((21570, 21599), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_ulong'], {}), '(ctypes.c_ulong)\n', (21583, 21599), False, 'import ctypes\n'), ((42693, 42723), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_ushort'], {}), '(ctypes.c_ushort)\n', (42706, 42723), False, 'import ctypes\n'), ((16650, 16674), 'numpy.ctypeslib.as_ctypes', 'as_ctypes', (['image_data[i]'], {}), '(image_data[i])\n', (16659, 16674), False, 'from numpy.ctypeslib import as_ctypes\n')] |
# Adapt from https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
import numpy as np
import keras
import os
from sklearn import preprocessing
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=2, shuffle=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
# 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim))
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
x_ = np.load(ID) # shape (1, 19, 750)
# print ('__data_generation', x_.shape)
#x_ = np.expand_dims(x_, axis=0)
#x_ = preprocessing.normalize(x_)
# 20,30,26
#x_ = np.moveaxis(x_, 1, 0)
#30,20,26
#for k in range(30):
#x_[k] = preprocessing.normalize(x_[k],axis=0)
#x_ = np.moveaxis(x_, 1, -1)
#30,26,20
#13,2,126,1
x_=np.moveaxis(x_, 0, -1)
#F3-F7, P3-O1
#['FP1', 'FP2', 'F7', 'F3', 'FZ', 'F4', 'F8', 'T3', 'C3', 'CZ', 'C4', 'T4', 'T5', 'P3', 'PZ', 'P4', 'T6',
#'O1', 'O2']
#x_ = x_.astype(float)
#diff1= x_[:,3:4,:,:]-x_[:,2:3,:,:]
#print(i,'done')
#diff2 = x_[:,13:14,:,:]-x_[:,17:18,:,:]
#x_ = np.concatenate((diff1,diff2), axis= 1)
#13,2,126,1
X[i,] = x_
#30,26,1,20
#print('-----------------')
#print(x_.shape)
#X[i,] =x_
#X[i,] = np.transpose(x_,(0,1,3,2)) # shape (1,5,126,20)
# print ('__data_generation transpose', X[i,].shape)
# Store class
# Store class
label = ID.strip().split('_')
# label = label[-1].strip().split('.')[0]
label = label[-2]
if label=='seiz':
y[i]= 1
elif label == 'bckg':
y[i]= 0
return X, keras.utils.to_categorical(y, num_classes=self.n_classes)
#return X, y
# X = []
# y = []
# for i, ID in enumerate(list_IDs_temp):
# # Store sample
# x_ = np.load(ID) # shape (1, 5, 20, 126)
# x_ = np.expand_dims(x_, axis=0) # (1, 1, 5, 20, 126)
# print ('__data_generation', x_.shape)
# X.append(np.transpose(x_,(0,1,2,4,3))) # shape (1,1,5,126,20)
# print ('__data_generation transpose', X[-1].shape)
# # Store class
# label = ID.strip().split('_')
# label = label[-1].strip().split('.')[0]
# if label=='seiz':
# y.append(1)
# elif label == 'bckg':
# y.append(0)
# X = np.concatenate(X, axis=0)
# y = np.array(y)
# assert X.shape[0] == self.batch_size
# return X, keras.utils.to_categorical(y, num_classes=self.n_classes) | [
"keras.utils.to_categorical",
"numpy.empty",
"numpy.moveaxis",
"numpy.load",
"numpy.random.shuffle"
] | [((1511, 1549), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.dim)'], {}), '((self.batch_size, *self.dim))\n', (1519, 1549), True, 'import numpy as np\n'), ((1562, 1598), 'numpy.empty', 'np.empty', (['self.batch_size'], {'dtype': 'int'}), '(self.batch_size, dtype=int)\n', (1570, 1598), True, 'import numpy as np\n'), ((1300, 1331), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (1317, 1331), True, 'import numpy as np\n'), ((1718, 1729), 'numpy.load', 'np.load', (['ID'], {}), '(ID)\n', (1725, 1729), True, 'import numpy as np\n'), ((2178, 2200), 'numpy.moveaxis', 'np.moveaxis', (['x_', '(0)', '(-1)'], {}), '(x_, 0, -1)\n', (2189, 2200), True, 'import numpy as np\n'), ((3202, 3259), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y'], {'num_classes': 'self.n_classes'}), '(y, num_classes=self.n_classes)\n', (3228, 3259), False, 'import keras\n')] |
#!/usr/bin/env python
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
import unittest
import numpy
from awkward import *
class Test(unittest.TestCase):
def runTest(self):
pass
def test_object_nbytes(self):
class Point(object):
def __init__(self, array):
self.x, self.y, self.z = array
def __repr__(self):
return "<Point {0} {1} {2}>".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, Point) and self.x == other.x and self.y == other.y and self.z == other.z
assert isinstance(ObjectArray([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], [7.7, 8.8, 9.9]], Point).nbytes, int)
def test_object_floats(self):
class Point(object):
def __init__(self, array):
self.x, self.y, self.z = array
def __repr__(self):
return "<Point {0} {1} {2}>".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, Point) and self.x == other.x and self.y == other.y and self.z == other.z
a = ObjectArray([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], [7.7, 8.8, 9.9]], Point)
assert a[0] == Point([1.1, 2.2, 3.3])
assert a[1] == Point([4.4, 5.5, 6.6])
assert a[2] == Point([7.7, 8.8, 9.9])
assert a[:].tolist() == [Point([1.1, 2.2, 3.3]), Point([4.4, 5.5, 6.6]), Point([7.7, 8.8, 9.9])]
assert a[::2].tolist() == [Point([1.1, 2.2, 3.3]), Point([7.7, 8.8, 9.9])]
assert a[[True, False, True]].tolist() == [Point([1.1, 2.2, 3.3]), Point([7.7, 8.8, 9.9])]
assert a[[2, 0]].tolist() == [Point([7.7, 8.8, 9.9]), Point([1.1, 2.2, 3.3])]
def test_object_bytes(self):
class Point(object):
def __init__(self, bytes):
self.x, self.y, self.z = struct.unpack("ddd", bytes)
def __repr__(self):
return "<Point {0} {1} {2}>".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, Point) and self.x == other.x and self.y == other.y and self.z == other.z
a = ObjectArray(numpy.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]).view("u1").reshape(-1, 24), Point)
assert a[0] == Point(numpy.array([1.1, 2.2, 3.3]).tobytes())
assert a[1] == Point(numpy.array([4.4, 5.5, 6.6]).tobytes())
assert a[2] == Point(numpy.array([7.7, 8.8, 9.9]).tobytes())
assert a[:].tolist() == [Point(numpy.array([1.1, 2.2, 3.3]).tobytes()), Point(numpy.array([4.4, 5.5, 6.6]).tobytes()), Point(numpy.array([7.7, 8.8, 9.9]).tobytes())]
assert a[::2].tolist() == [Point(numpy.array([1.1, 2.2, 3.3]).tobytes()), Point(numpy.array([7.7, 8.8, 9.9]).tobytes())]
assert a[[True, False, True]].tolist() == [Point(numpy.array([1.1, 2.2, 3.3]).tobytes()), Point(numpy.array([7.7, 8.8, 9.9]).tobytes())]
assert a[[2, 0]].tolist() == [Point(numpy.array([7.7, 8.8, 9.9]).tobytes()), Point(numpy.array([1.1, 2.2, 3.3]).tobytes())]
| [
"numpy.array",
"struct.unpack"
] | [((3368, 3395), 'struct.unpack', 'struct.unpack', (['"""ddd"""', 'bytes'], {}), "('ddd', bytes)\n", (3381, 3395), False, 'import struct\n'), ((3803, 3831), 'numpy.array', 'numpy.array', (['[1.1, 2.2, 3.3]'], {}), '([1.1, 2.2, 3.3])\n', (3814, 3831), False, 'import numpy\n'), ((3872, 3900), 'numpy.array', 'numpy.array', (['[4.4, 5.5, 6.6]'], {}), '([4.4, 5.5, 6.6])\n', (3883, 3900), False, 'import numpy\n'), ((3941, 3969), 'numpy.array', 'numpy.array', (['[7.7, 8.8, 9.9]'], {}), '([7.7, 8.8, 9.9])\n', (3952, 3969), False, 'import numpy\n'), ((3680, 3738), 'numpy.array', 'numpy.array', (['[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]'], {}), '([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n', (3691, 3738), False, 'import numpy\n'), ((4020, 4048), 'numpy.array', 'numpy.array', (['[1.1, 2.2, 3.3]'], {}), '([1.1, 2.2, 3.3])\n', (4031, 4048), False, 'import numpy\n'), ((4067, 4095), 'numpy.array', 'numpy.array', (['[4.4, 5.5, 6.6]'], {}), '([4.4, 5.5, 6.6])\n', (4078, 4095), False, 'import numpy\n'), ((4114, 4142), 'numpy.array', 'numpy.array', (['[7.7, 8.8, 9.9]'], {}), '([7.7, 8.8, 9.9])\n', (4125, 4142), False, 'import numpy\n'), ((4196, 4224), 'numpy.array', 'numpy.array', (['[1.1, 2.2, 3.3]'], {}), '([1.1, 2.2, 3.3])\n', (4207, 4224), False, 'import numpy\n'), ((4243, 4271), 'numpy.array', 'numpy.array', (['[7.7, 8.8, 9.9]'], {}), '([7.7, 8.8, 9.9])\n', (4254, 4271), False, 'import numpy\n'), ((4341, 4369), 'numpy.array', 'numpy.array', (['[1.1, 2.2, 3.3]'], {}), '([1.1, 2.2, 3.3])\n', (4352, 4369), False, 'import numpy\n'), ((4388, 4416), 'numpy.array', 'numpy.array', (['[7.7, 8.8, 9.9]'], {}), '([7.7, 8.8, 9.9])\n', (4399, 4416), False, 'import numpy\n'), ((4473, 4501), 'numpy.array', 'numpy.array', (['[7.7, 8.8, 9.9]'], {}), '([7.7, 8.8, 9.9])\n', (4484, 4501), False, 'import numpy\n'), ((4520, 4548), 'numpy.array', 'numpy.array', (['[1.1, 2.2, 3.3]'], {}), '([1.1, 2.2, 3.3])\n', (4531, 4548), False, 'import numpy\n')] |
# white_signals.py
"""Contains class factories for white noise signals. White noise signals are
defined as the class of signals that only modifies the white noise matrix `N`.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import scipy.sparse
from enterprise.signals import parameter, selections, signal_base, utils
from enterprise.signals.parameter import function
from enterprise.signals.selections import Selection
def WhiteNoise(varianceFunction, selection=Selection(selections.no_selection), name=""):
""" Class factory for generic white noise signals."""
class WhiteNoise(signal_base.Signal):
signal_type = "white noise"
signal_name = name
signal_id = name
def __init__(self, psr):
super(WhiteNoise, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
self._do_selection(psr, varianceFunction, selection)
def _do_selection(self, psr, vfn, selection):
sel = selection(psr)
self._keys = list(sorted(sel.masks.keys()))
self._masks = [sel.masks[key] for key in self._keys]
self._ndiag, self._params = {}, {}
for key, mask in zip(self._keys, self._masks):
pnames = [psr.name, name, key]
pname = "_".join([n for n in pnames if n])
self._ndiag[key] = vfn(pname, psr=psr)
for param in list(self._ndiag[key]._params.values()):
self._params[param.name] = param
@property
def ndiag_params(self):
"""Get any varying ndiag parameters."""
return [pp.name for pp in self.params]
@signal_base.cache_call("ndiag_params")
def get_ndiag(self, params):
ret = 0
for key, mask in zip(self._keys, self._masks):
ret += self._ndiag[key](params=params) * mask
return signal_base.ndarray_alt(ret)
return WhiteNoise
@function
def efac_ndiag(toaerrs, efac=1.0):
return efac ** 2 * toaerrs ** 2
def MeasurementNoise(efac=parameter.Uniform(0.5, 1.5), selection=Selection(selections.no_selection), name=""):
"""Class factory for EFAC type measurement noise."""
varianceFunction = efac_ndiag(efac=efac)
BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name)
class MeasurementNoise(BaseClass):
signal_name = "efac"
signal_id = "efac_" + name if name else "efac"
return MeasurementNoise
@function
def equad_ndiag(toas, log10_equad=-8):
return np.ones_like(toas) * 10 ** (2 * log10_equad)
def EquadNoise(log10_equad=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), name=""):
"""Class factory for EQUAD type measurement noise."""
varianceFunction = equad_ndiag(log10_equad=log10_equad)
BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name)
class EquadNoise(BaseClass):
signal_name = "equad"
signal_id = "equad_" + name if name else "equad"
return EquadNoise
def EcorrKernelNoise(
log10_ecorr=parameter.Uniform(-10, -5),
selection=Selection(selections.no_selection),
method="sherman-morrison",
name="",
):
r"""Class factory for ECORR type noise.
:param log10_ecorr: ``Parameter`` type for log10 or ecorr parameter.
:param selection:
``Selection`` object specifying masks for backends, time segments, etc.
:param method: Method for computing noise covariance matrix.
Options include `sherman-morrison`, `sparse`, and `block`
:return: ``EcorrKernelNoise`` class.
ECORR is a noise signal that is used for data with multi-channel TOAs
that are nearly simultaneous in time. It is a white noise signal that
is uncorrelated epoch to epoch but completely correlated for TOAs in a
given observing epoch.
For this implementation we use this covariance matrix as part of the
white noise covariance matrix :math:`N`. It can be seen from above that
this covariance is block diagonal, thus allowing us to exploit special
methods to make matrix manipulations easier.
In this signal implementation we offer three methods of performing these
matrix operations:
sherman-morrison
Uses the `Sherman-Morrison`_ forumla to compute the matrix
inverse and other matrix operations. **Note:** This method can only
be used for covariances that can be constructed by the outer product
of two vectors, :math:`uv^T`.
sparse
Uses `Scipy Sparse`_ matrices to construct the block diagonal
covariance matrix and perform matrix operations.
block
Uses a custom scheme that uses the individual blocks from the block
diagonal matrix to perform fast matrix inverse and other solve
operations.
.. note:: The sherman-morrison method is the fastest, followed by the block
and then sparse methods, however; the block and sparse methods are more
general and should be used if sub-classing this signal for more
complicated blocks.
.. _Sherman-Morrison: https://en.wikipedia.org/wiki/Sherman-Morrison_formula
.. _Scipy Sparse: https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html
.. # noqa E501
"""
if method not in ["sherman-morrison", "block", "sparse"]:
msg = "EcorrKernelNoise does not support method: {}".format(method)
raise TypeError(msg)
class EcorrKernelNoise(signal_base.Signal):
signal_type = "white noise"
signal_name = "ecorr_" + method
signal_id = "_".join(["ecorr", name, method]) if name else "_".join(["ecorr", method])
def __init__(self, psr):
super(EcorrKernelNoise, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
sel = selection(psr)
self._params, self._masks = sel("log10_ecorr", log10_ecorr)
keys = list(sorted(self._masks.keys()))
masks = [self._masks[key] for key in keys]
Umats = []
for key, mask in zip(keys, masks):
Umats.append(utils.create_quantization_matrix(psr.toas[mask], nmin=2)[0])
nepoch = sum(U.shape[1] for U in Umats)
U = np.zeros((len(psr.toas), nepoch))
self._slices = {}
netot = 0
for ct, (key, mask) in enumerate(zip(keys, masks)):
nn = Umats[ct].shape[1]
U[mask, netot : nn + netot] = Umats[ct]
self._slices.update({key: utils.quant2ind(U[:, netot : nn + netot])})
netot += nn
# initialize sparse matrix
self._setup(psr)
@property
def ndiag_params(self):
"""Get any varying ndiag parameters."""
return [pp.name for pp in self.params]
@signal_base.cache_call("ndiag_params")
def get_ndiag(self, params):
if method == "sherman-morrison":
return self._get_ndiag_sherman_morrison(params)
elif method == "sparse":
return self._get_ndiag_sparse(params)
elif method == "block":
return self._get_ndiag_block(params)
def _setup(self, psr):
if method == "sparse":
self._setup_sparse(psr)
def _setup_sparse(self, psr):
Ns = scipy.sparse.csc_matrix((len(psr.toas), len(psr.toas)))
for key, slices in self._slices.items():
for slc in slices:
if slc.stop - slc.start > 1:
Ns[slc, slc] = 1.0
self._Ns = signal_base.csc_matrix_alt(Ns)
def _get_ndiag_sparse(self, params):
for p in self._params:
for slc in self._slices[p]:
if slc.stop - slc.start > 1:
self._Ns[slc, slc] = 10 ** (2 * self.get(p, params))
return self._Ns
def _get_ndiag_sherman_morrison(self, params):
slices, jvec = self._get_jvecs(params)
return signal_base.ShermanMorrison(jvec, slices)
def _get_ndiag_block(self, params):
slices, jvec = self._get_jvecs(params)
blocks = []
for jv, slc in zip(jvec, slices):
nb = slc.stop - slc.start
blocks.append(np.ones((nb, nb)) * jv)
return signal_base.BlockMatrix(blocks, slices)
def _get_jvecs(self, params):
slices = sum([self._slices[key] for key in sorted(self._slices.keys())], [])
jvec = np.concatenate(
[
np.ones(len(self._slices[key])) * 10 ** (2 * self.get(key, params))
for key in sorted(self._slices.keys())
]
)
return (slices, jvec)
return EcorrKernelNoise
| [
"enterprise.signals.selections.Selection",
"numpy.ones_like",
"enterprise.signals.signal_base.BlockMatrix",
"numpy.ones",
"enterprise.signals.utils.create_quantization_matrix",
"enterprise.signals.signal_base.csc_matrix_alt",
"enterprise.signals.signal_base.ShermanMorrison",
"enterprise.signals.signal... | [((524, 558), 'enterprise.signals.selections.Selection', 'Selection', (['selections.no_selection'], {}), '(selections.no_selection)\n', (533, 558), False, 'from enterprise.signals.selections import Selection\n'), ((2130, 2157), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (2147, 2157), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((2169, 2203), 'enterprise.signals.selections.Selection', 'Selection', (['selections.no_selection'], {}), '(selections.no_selection)\n', (2178, 2203), False, 'from enterprise.signals.selections import Selection\n'), ((2684, 2710), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (2701, 2710), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((2722, 2756), 'enterprise.signals.selections.Selection', 'Selection', (['selections.no_selection'], {}), '(selections.no_selection)\n', (2731, 2756), False, 'from enterprise.signals.selections import Selection\n'), ((3148, 3174), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (3165, 3174), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((3190, 3224), 'enterprise.signals.selections.Selection', 'Selection', (['selections.no_selection'], {}), '(selections.no_selection)\n', (3199, 3224), False, 'from enterprise.signals.selections import Selection\n'), ((1731, 1769), 'enterprise.signals.signal_base.cache_call', 'signal_base.cache_call', (['"""ndiag_params"""'], {}), "('ndiag_params')\n", (1753, 1769), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((2610, 2628), 'numpy.ones_like', 'np.ones_like', (['toas'], {}), '(toas)\n', (2622, 2628), True, 'import numpy as np\n'), ((6918, 6956), 'enterprise.signals.signal_base.cache_call', 'signal_base.cache_call', (['"""ndiag_params"""'], {}), "('ndiag_params')\n", (6940, 6956), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((1967, 1995), 'enterprise.signals.signal_base.ndarray_alt', 'signal_base.ndarray_alt', (['ret'], {}), '(ret)\n', (1990, 1995), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((7705, 7735), 'enterprise.signals.signal_base.csc_matrix_alt', 'signal_base.csc_matrix_alt', (['Ns'], {}), '(Ns)\n', (7731, 7735), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((8141, 8182), 'enterprise.signals.signal_base.ShermanMorrison', 'signal_base.ShermanMorrison', (['jvec', 'slices'], {}), '(jvec, slices)\n', (8168, 8182), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((8464, 8503), 'enterprise.signals.signal_base.BlockMatrix', 'signal_base.BlockMatrix', (['blocks', 'slices'], {}), '(blocks, slices)\n', (8487, 8503), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((6195, 6251), 'enterprise.signals.utils.create_quantization_matrix', 'utils.create_quantization_matrix', (['psr.toas[mask]'], {'nmin': '(2)'}), '(psr.toas[mask], nmin=2)\n', (6227, 6251), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((6613, 6652), 'enterprise.signals.utils.quant2ind', 'utils.quant2ind', (['U[:, netot:nn + netot]'], {}), '(U[:, netot:nn + netot])\n', (6628, 6652), False, 'from enterprise.signals import parameter, selections, signal_base, utils\n'), ((8421, 8438), 'numpy.ones', 'np.ones', (['(nb, nb)'], {}), '((nb, nb))\n', (8428, 8438), True, 'import numpy as np\n')] |
import networkx as nx
import numpy as np
import pandas as pd
from tqdm import tqdm
from feature_engineering.tools import lit_eval_nan_proof
# this script computes some features by considering the bidirectional graph of citations: jaccard, adar,
# preferential_attachment, resource_allocation_index and common_neighbors
# approx 10 minutes to run it
# progress bar for pandas
tqdm.pandas(tqdm())
# path
path_to_data = "data/"
# loading data
converter_dict = {'authors': lit_eval_nan_proof, 'journal': lit_eval_nan_proof,
'title': lit_eval_nan_proof, 'abstract': lit_eval_nan_proof}
nodes = pd.read_csv(path_to_data + "nodes_preprocessed.csv", converters=converter_dict)
nodes.set_index("id", inplace=True)
training = pd.read_csv(path_to_data + "training_features.txt")
training.set_index("my_index", inplace=True)
testing = pd.read_csv(path_to_data + "testing_features.txt")
testing.set_index("my_index", inplace=True)
G = nx.Graph()
G.add_nodes_from(nodes.index.values)
G.add_edges_from(zip(training[training["target"] == 1]["id1"], training[training["target"] == 1]["id2"]))
# IDs for training set
id1 = training['id1'].values
id2 = training['id2'].values
# placeholder for feature
n = len(id1)
katz = np.zeros(n)
katz_2 = np.zeros(n)
beta = 0.98
beta_2 = 0.90
breaking_point = 10
# computing features for training set
for i in tqdm(range(len(id1))):
if training.at[str(id1[i]) + "|" + str(id2[i]), "target"] == 1:
G.remove_edge(id1[i], id2[i])
katz_acc = 0.0
katz_2_acc = 0.0
counter = 0
try:
iterator = nx.all_shortest_paths(G, source=id1[i], target=id2[i])
for p in iterator:
len_p = len(p)
katz_acc += len_p * (beta ** len_p)
katz_2_acc += len_p * (beta_2 ** len_p)
counter += 1
if counter >= breaking_point:
break
katz[i] = katz_acc
katz[i] = katz_2_acc
except:
katz[i] = -1
katz_2[i] = -1
if training.at[str(id1[i]) + "|" + str(id2[i]), "target"] == 1:
G.add_edge(id1[i], id2[i])
# add feature to data-frame
training["katz"] = katz
training["katz_2"] = katz_2
# IDs for training set
id1 = testing['id1'].values
id2 = testing['id2'].values
# placeholder for feature
n = len(id1)
katz = np.zeros(n)
katz_2 = np.zeros(n)
# computing features for training set
for i in tqdm(range(len(id1))):
katz_acc = 0.0
katz_2_acc = 0.0
counter = 0
try:
iterator = nx.all_shortest_paths(G, source=id1[i], target=id2[i])
for p in iterator:
len_p = len(p)
katz_acc += len_p * (beta ** len_p)
katz_2_acc += len_p * (beta_2 ** len_p)
counter += 1
if counter >= breaking_point:
break
katz[i] = katz_acc
katz[i] = katz_2_acc
except:
katz[i] = -1
katz_2[i] = -1
# add feature to data-frame
testing["katz"] = katz
testing["katz_2"] = katz_2
# save data-frame
training.to_csv(path_to_data + "training_features.txt")
testing.to_csv(path_to_data + "testing_features.txt")
| [
"pandas.read_csv",
"tqdm.tqdm",
"networkx.Graph",
"networkx.all_shortest_paths",
"numpy.zeros"
] | [((613, 692), 'pandas.read_csv', 'pd.read_csv', (["(path_to_data + 'nodes_preprocessed.csv')"], {'converters': 'converter_dict'}), "(path_to_data + 'nodes_preprocessed.csv', converters=converter_dict)\n", (624, 692), True, 'import pandas as pd\n'), ((740, 791), 'pandas.read_csv', 'pd.read_csv', (["(path_to_data + 'training_features.txt')"], {}), "(path_to_data + 'training_features.txt')\n", (751, 791), True, 'import pandas as pd\n'), ((847, 897), 'pandas.read_csv', 'pd.read_csv', (["(path_to_data + 'testing_features.txt')"], {}), "(path_to_data + 'testing_features.txt')\n", (858, 897), True, 'import pandas as pd\n'), ((947, 957), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (955, 957), True, 'import networkx as nx\n'), ((1230, 1241), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1238, 1241), True, 'import numpy as np\n'), ((1251, 1262), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1259, 1262), True, 'import numpy as np\n'), ((2293, 2304), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2301, 2304), True, 'import numpy as np\n'), ((2314, 2325), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2322, 2325), True, 'import numpy as np\n'), ((391, 397), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (395, 397), False, 'from tqdm import tqdm\n'), ((1571, 1625), 'networkx.all_shortest_paths', 'nx.all_shortest_paths', (['G'], {'source': 'id1[i]', 'target': 'id2[i]'}), '(G, source=id1[i], target=id2[i])\n', (1592, 1625), True, 'import networkx as nx\n'), ((2481, 2535), 'networkx.all_shortest_paths', 'nx.all_shortest_paths', (['G'], {'source': 'id1[i]', 'target': 'id2[i]'}), '(G, source=id1[i], target=id2[i])\n', (2502, 2535), True, 'import networkx as nx\n')] |
"""
First run tsne_encoder.py until the visualizations look good, and then set tsne_cache path to that experiment dir.
"""
import time
import numpy as np
import os
import matplotlib
import mlflow
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import argparse
import os
import _pickle
from process_results_multiple import weighted_std
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
import skimage.io as skio
import skimage.transform as skts
from matplotlib.colors import ListedColormap
def retry(f):
while True:
try:
return f()
except:
time.sleep(1)
print("Retrying mlflow.")
def setup_mlflow():
os.environ["MLFLOW_TRACKING_USERNAME"] = "exp-01.mlflow-yang.ericst"
os.environ["MLFLOW_TRACKING_PASSWORD"] = "<PASSWORD>"
remote_server_uri = "https://exp-01.mlflow-yang.inf.ethz.ch"
retry(lambda: mlflow.set_tracking_uri(remote_server_uri))
def plot_tsne(exp_path, uc, ax, color_space=None, n_color_max=20, cat2color=None):
X_embedded = None
ALL_Y = None
Cat2Y = None
for (dirpath, dirnames, filenames) in os.walk(exp_path):
for filename in filenames:
if ("embedded_UC%i" % uc) in filename:
X_embedded = np.load(os.path.join(exp_path, filename))
elif ("selectedY_UC%i" % uc) in filename:
ALL_Y = np.load(os.path.join(exp_path, filename))
elif ("cat2y_UC%i" % uc) in filename:
with open(os.path.join(exp_path, filename), "rb") as fp:
Cat2Y = _pickle.load(fp)
if any([X_embedded is None, ALL_Y is None, Cat2Y is None]):
raise FileNotFoundError("Missing some files from exp dir: %s" % exp_path)
else:
Y2Cat = dict([(v, k) for k, v in Cat2Y.items()])
cat_labels = [Y2Cat[y] for y in ALL_Y]
if color_space is None:
assert cat2color is None
color_space = sns.color_palette("hls", n_color_max)
cat2color = {}
i = 0
for y, cat in Y2Cat.items():
cat2color[cat] = color_space[i]
i += 1
this_color_list = [c for cat, c in cat2color.items()]
this_cmap = ListedColormap(this_color_list)
else:
assert cat2color is not None
n_used_colors = len(list(cat2color.values()))
this_color_list = []
for y, cat in Y2Cat.items():
if cat in cat2color.keys():
this_color_list.append(cat2color[cat])
else:
# new category
this_color_list.append(color_space[n_used_colors])
cat2color[cat] = color_space[n_used_colors]
n_used_colors += 1
this_cmap = ListedColormap(this_color_list)
df = pd.DataFrame(
{"x": X_embedded[:, 0], "y": X_embedded[:, 1], "Dataset": cat_labels}
)
sns.scatterplot(
"x",
"y",
hue="Dataset",
s=1.0,
data=df,
ax=ax,
linewidth=0,
palette=cat2color,
)
ax.set_axis_off()
ax.legend_.remove()
# ax.legend(loc='upper right')
return ax, color_space, cat2color
def plot_image(file_path, ax, uc):
img = skio.imread(file_path)
long_edge = max(img.shape[0], img.shape[1])
img = skts.resize(img, (long_edge, long_edge))
if len(img.shape) > 2:
ax.imshow(img)
else:
ax.imshow(img, cmap="gray")
ax.set_axis_off()
ax.annotate(
"Usecase %d" % uc,
xy=(0.0, 0.5),
xycoords="axes fraction",
fontsize=5,
xytext=(-5, 12),
textcoords="offset points",
ha="center",
va="baseline",
rotation=90,
)
# ax.set_ylabel("Usecase %d" % uc)
return ax
def load_reto(d1, uc, pretrained: bool = False):
if d1 == "NIHCC":
D_id = "nih_id"
D_ood = {
1: "uc1_and_mura",
2: "pc_for_nih",
3: "nih_ood",
}[uc]
elif d1 == "PAD":
D_id = "pc_id"
D_ood = {
1: "uc1_and_mura",
2: "pc_uc2",
3: "pc_uc3",
}[uc]
elif d1 == "DRD":
D_id = "drd"
D_ood = {
1: "uc1_rgb",
2: "drimdb",
3: "riga",
}[uc]
else:
assert False, f"Unknown dataset: {d1}"
exp_name = f"{D_id}_vs_{D_ood}"
exp = retry(lambda: mlflow.get_experiment_by_name(exp_name))
runs = retry(lambda: mlflow.list_run_infos(exp.experiment_id))
stats = {
"auroc": [],
"aupr": [],
}
for run_info in runs:
run = retry(lambda: mlflow.get_run(run_info.run_id))
if run.data.params.get("ensemble_type") != "assign_one_label":
continue
if run.data.params.get("use_pretrained_model") != str(pretrained):
continue
if run.data.params.get("model_arch") != "densenet":
continue
if run.data.tags.get("goal") != "final":
continue
metrics = run.data.metrics
for k, v in {
"auroc": metrics["heur_auroc_avg_diff"],
"aupr": metrics["heur_aupr_avg_diff"],
}.items():
stats[k].append(v)
for k, v in stats.items():
assert len(v) > 0, f"Could not find any stats for {k}."
return {k: np.mean(v) for k, v in stats.items()}
def plot_result(
exp_path,
d1,
uc,
ax,
order=None,
c_order=None,
with_x_tick=False,
keep_only_handles=None,
alias=None,
color_scheme="rainbow",
legend=False,
sort_separate=True,
):
csv_data = np.load(os.path.join(exp_path, "data_UC%d_%s.npy" % (uc, d1)))
assert os.path.isfile(os.path.join(exp_path, "headers_UC%d_%s.pkl" % (uc, d1)))
with open(os.path.join(exp_path, "headers_UC%d_%s.pkl" % (uc, d1)), "rb") as fp:
csv_headers = _pickle.load(fp)
csv_headers[0].append("RETO")
csv_headers[0].append("RETO(pretrained)")
method_handles = csv_headers[0]
weights = csv_data[0]
uc_roc = csv_data[2] * 100
uc_prc = csv_data[3] * 100
if len(uc_roc.shape) == 3:
rocm = np.average(uc_roc, axis=(1, 2), weights=weights)
prcm = np.average(uc_prc, axis=(1, 2), weights=weights)
rocv = weighted_std(uc_roc, weights, axis=(1, 2))
prcv = weighted_std(uc_prc, weights, axis=(1, 2))
else:
new_weights = np.zeros_like(uc_roc)
for i in range((uc_roc.shape[0])):
for j in range((uc_roc.shape[1])):
for k in range((uc_roc.shape[2])):
n = int(weights[i, j, k, 0])
for l in range(n):
new_weights[i, j, k, l] = 1
rocm = np.average(uc_roc, axis=(1, 2, 3), weights=new_weights)
prcm = np.average(uc_prc, axis=(1, 2, 3), weights=new_weights)
rocv = (
weighted_std(uc_roc, new_weights, axis=(1, 2, 3))
* 1.96
/ np.sqrt(new_weights.sum((1, 2, 3)))
)
prcv = (
weighted_std(uc_prc, new_weights, axis=(1, 2, 3))
* 1.96
/ np.sqrt(new_weights.sum((1, 2, 3)))
)
reto_data = load_reto(d1, uc, pretrained=False)
rocm = np.append(rocm, reto_data["auroc"] * 100)
rocv = np.append(rocv, 0.0)
prcm = np.append(prcm, reto_data["aupr"] * 100)
prcv = np.append(prcv, 0.0)
pretrained_reto_data = load_reto(d1, uc, pretrained=True)
rocm = np.append(rocm, pretrained_reto_data["auroc"] * 100)
rocv = np.append(rocv, 0.0)
prcm = np.append(prcm, pretrained_reto_data["aupr"] * 100)
prcv = np.append(prcv, 0.0)
if order is None:
if sort_separate:
group1_handles_inds = []
group2_handles_inds = []
for i, handle in enumerate(method_handles):
if ("ae" in handle.lower()) or ("ali" in handle.lower()):
group2_handles_inds.append(i)
else:
group1_handles_inds.append(i)
group1_sum = rocm[group1_handles_inds] + prcm[group1_handles_inds]
group2_sum = rocm[group2_handles_inds] + prcm[group2_handles_inds]
sorted_inds_g1 = [group1_handles_inds[i] for i in np.argsort(group1_sum)]
sorted_inds_g2 = [group2_handles_inds[i] for i in np.argsort(group2_sum)]
assert type(sorted_inds_g1) is list
full_inds = np.array(sorted_inds_g1 + sorted_inds_g2)
else:
group_sums = rocm # + prcm
sorted_inds_g1 = np.argsort(group_sums)
# assert type(sorted_inds_g1) is list
full_inds = np.array(sorted_inds_g1)
else:
if type(order) is dict:
order = [order[m] for m in method_handles]
full_inds = np.zeros(len(order), dtype=int)
for i, j in enumerate(order):
full_inds[j] = i
else:
full_inds = np.array(order)
sorted_rocm = rocm[full_inds]
sorted_rocv = rocv[full_inds]
sorted_prcm = prcm[full_inds]
sorted_prcv = prcv[full_inds]
sorted_method_handles = [method_handles[i] for i in full_inds]
def proc_var(m, v):
upper = []
lower = []
for n in range(m.shape[0]):
if m[n] - v[n] < 30.0:
lower.append(m[n])
else:
lower.append(v[n])
if m[n] + v[n] > 100.0:
upper.append(100.0 - m[n])
else:
upper.append(v[n])
return np.array([lower, upper])
pp_rocv = proc_var(sorted_rocm, sorted_rocv)
pp_prcv = proc_var(sorted_prcm, sorted_prcv)
if keep_only_handles is not None:
keep_inds = []
for i, method in enumerate(sorted_method_handles):
if method in keep_only_handles:
keep_inds.append(i)
else:
print("leaving out %s" % method)
keep_inds = np.array(keep_inds)
sorted_rocm = sorted_rocm[keep_inds]
sorted_prcm = sorted_prcm[keep_inds]
pp_rocv = pp_rocv[:, keep_inds]
pp_prcv = pp_prcv[:, keep_inds]
sorted_method_handles = [sorted_method_handles[i] for i in keep_inds]
full_inds = full_inds[keep_inds]
ind = np.arange(len(sorted_rocm)) # the x locations for the groups
width = 0.35 # the width of the bars
group_inds0 = []
group_inds1 = []
group_inds2 = []
if color_scheme == "rgb":
sorted_colors = [(0.9, 0.0, 0.0, 1.0) for i in sorted_method_handles]
sorted_colors_0 = [(0.1, 0.9, 0.1, 1.0) for i in sorted_method_handles]
# sorted_colors_1 = [(0.0, 0.2, 0.8, 1.0) for i in sorted_method_handles]
elif color_scheme == "bluered":
sorted_colors = []
for i, handle in enumerate(sorted_method_handles):
if ("ae" in handle.lower()) or ("ali" in handle.lower()):
sorted_colors.append((0.9, 0.1, 0.0, 1.0))
group_inds1.append(i)
elif handle == "knn/1" or handle == "knn/8":
sorted_colors.append((0.0, 0.8, 0.2, 1.0))
group_inds2.append(i)
else:
sorted_colors.append((0.1, 0.2, 0.9, 1.0))
group_inds0.append(i)
sorted_colors_0 = [
(
color[0],
color[1],
color[2],
0.3,
)
for color in sorted_colors
]
# sorted_colors_1 = [(color[0], color[1], color[2], 0.7,) for color in sorted_colors]
else:
rb = cm.get_cmap("rainbow")
grad = np.linspace(0, 1, len(sorted_rocm))
colors = [rb(g) for g in grad]
if c_order is None:
this_order = np.arange(0, len(full_inds))
c_order = {}
for c, i in zip(this_order, full_inds):
c_order[i] = c
else:
this_order = [c_order[i] for i in full_inds]
sorted_colors = [colors[i] for i in this_order]
sorted_colors_0 = [
(
color[0],
color[1],
color[2],
0.3,
)
for color in sorted_colors
]
# sorted_colors_1 = [(color[0], color[1], color[2], 0.7,) for color in sorted_colors]
sorted_colors = np.array(sorted_colors)
sorted_colors_0 = np.array(sorted_colors_0)
group_inds0 = np.array(group_inds0)
group_inds1 = np.array(group_inds1)
group_inds2 = np.array(group_inds2)
rects10 = ax.bar(
ind[group_inds0] - width * 0.5,
sorted_rocm[group_inds0],
width,
# yerr=pp_rocv[:, group_inds0],
label="AUROC",
color=sorted_colors_0[group_inds0],
error_kw={"elinewidth": 0.8},
)
rects11 = ax.bar(
ind[group_inds1] - width * 0.5,
sorted_rocm[group_inds1],
width,
# yerr=pp_rocv[:, group_inds1],
label="AUROC",
color=sorted_colors_0[group_inds1],
error_kw={"elinewidth": 0.8},
)
rects12 = ax.bar(
ind[group_inds2] - width * 0.5,
sorted_rocm[group_inds2],
width,
# yerr=pp_rocv[:, group_inds2],
label="AUROC",
color=sorted_colors_0[group_inds2],
error_kw={"elinewidth": 0.8},
)
# rects2 = ax.bar(ind, sorted_rocm, width, yerr=pp_rocv,
# label='AUROC', color=sorted_colors_0, error_kw={'elinewidth':0.8})
rects30 = ax.bar(
ind[group_inds0] + width * 0.5,
sorted_prcm[group_inds0],
width,
# yerr=pp_prcv[:, group_inds0],
label="AUPRC",
color=sorted_colors[group_inds0],
error_kw={"elinewidth": 0.8},
)
rects31 = ax.bar(
ind[group_inds1] + width * 0.5,
sorted_prcm[group_inds1],
width,
# yerr=pp_prcv[:, group_inds1],
label="AUPRC",
color=sorted_colors[group_inds1],
error_kw={"elinewidth": 0.8},
)
rects32 = ax.bar(
ind[group_inds2] + width * 0.5,
sorted_prcm[group_inds2],
width,
# yerr=pp_prcv[:, group_inds2],
label="AUPRC",
color=sorted_colors[group_inds2],
error_kw={"elinewidth": 0.8},
)
if with_x_tick:
ax.set_xticks(ind)
if alias is not None:
this_method_handles = []
for handle in sorted_method_handles:
if handle in alias:
this_method_handles.append(alias[handle])
else:
this_method_handles.append(handle)
else:
this_method_handles = sorted_method_handles
ax.set_xticklabels(this_method_handles, rotation=-45, ha="left")
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks(np.linspace(50, 100, 3))
ax.set_ylim(30, 110)
if legend:
ax.legend(
[rects12, rects32, rects10, rects30, rects11, rects31],
[
"AUROC, data-only",
"AUPRC, data-only",
"AUROC, classifier only",
"AUPRC, classifier only",
"AUROC, with auxilary NN",
"AUPRC, with auxilary NN",
],
loc="upper left",
ncol=3,
markerscale=0.6,
labelspacing=0.3, # default 0.5
columnspacing=1.0, # default 2.0
borderaxespad=0.3, # default 0.5
)
ax.axhline(y=50, linewidth=0.5, color=(0.3, 0.3, 0.35, 0.8), ls="--")
ax.axhline(y=75, linewidth=0.5, color=(0.3, 0.3, 0.35, 0.8), ls="--")
ax.axhline(y=100, linewidth=0.5, color=(0.3, 0.3, 0.35, 0.8), ls="--")
# ax.legend()
return ax, full_inds, c_order
if __name__ == "__main__":
setup_mlflow()
PREVIEW_DOUBLE = True
parser = argparse.ArgumentParser()
parser.add_argument("output_name", type=str, help="save images")
parser.add_argument(
"--dataset", type=str, default="NIHCC", help="PAD or NIHCC or DRD or PCAM"
)
parser.add_argument(
"--global_order",
action="store_true",
help="If true, it sorts the baselines based on how "
"well they perform on average on all datasets, not just on the current one.",
)
args = parser.parse_args()
SAVE_NAME = args.output_name # "DRD_MAINFIG"#"PAD_MAINFIG" #"NIHCC_MAINFIG"
if args.dataset == "NIHCC":
TSNE_PATH = (
"umap_nih_AEBCE" # "umap_drd_AEBCE"#"umap_padchest_AEBCE" #"ALI_NIHCC"
)
BAR_PATH = "nih_proced_res_mode1" # "drd_proced_res_2"#"pad_new_res" #"nih_res"
D1 = "NIHCC" # "DRD"#"PAD"#"NIHCC"
IMG1 = "sample_images/mura.png" # "sample_images/mnist.png"#"sample_images/tinyimagenet.JPEG"#"sample_images/mura.png"
IMG2 = "sample_images/padchest_lateral.png" # "sample_images/drimdb.png"#"sample_images/padchest_AP.png"#"sample_images/padchest_lateral.png"
IMG3 = "sample_images/pneumothorax.png" # "sample_images/riga.jpg"#"sample_images/pneumothorax.png"
ORDER = {
"score_svm/0": 0,
"prob_threshold/0": 1,
"odin/0": 2,
"12Layer-AE-BCE": 3,
"svknn": 4,
"mseaeknn/1": 5,
"bceaeknn/1": 6,
"vaebceaeknn/1": 7,
"bceaeknn/8": 8,
"mseaeknn/8": 9,
"12Layer-VAE-MSE": 10,
"vaemseaeknn/1": 11,
"12Layer-AE-MSE": 12,
"vaebceaeknn/8": 13,
"knn/1": 14,
"ALI_reconst/0": 15,
"vaemseaeknn/8": 16,
"Maha1layer": 17,
"binclass/0": 18,
"12Layer-VAE-BCE": 19,
"knn/8": 20,
"Maha": 21,
"RETO": 22,
"RETO(pretrained)": 23,
}
N_MAX = 20
elif args.dataset == "PAD":
TSNE_PATH = "umap_padchest_AEBCE"
BAR_PATH = "pad_proced_res_mode1"
D1 = "PAD"
IMG1 = "sample_images/tinyimagenet.JPEG"
IMG2 = "sample_images/padchest_AP.png"
IMG3 = "sample_images/pad_cardiomegaly.png"
ORDER = {
"bceaeknn/1": 0,
"prob_threshold/0": 1,
"odin/0": 2,
"mseaeknn/1": 3,
"bceaeknn/8": 4,
"vaebceaeknn/1": 5,
"score_svm/0": 6,
"12Layer-AE-BCE": 7,
"mseaeknn/8": 8,
"12Layer-VAE-BCE": 9,
"12Layer-AE-MSE": 10,
"vaemseaeknn/1": 11,
"knn/1": 12,
"12Layer-VAE-MSE": 13,
"vaebceaeknn/8": 14,
"vaemseaeknn/8": 15,
"svknn": 16,
"Maha": 17,
"knn/8": 18,
"binclass/0": 19,
"Maha1layer": 20,
"RETO": 21,
"RETO(pretrained)": 22,
}
N_MAX = 20
elif args.dataset == "DRD":
TSNE_PATH = "umap_drd_AEBCE"
BAR_PATH = "drd_proced_res_mode1"
D1 = "DRD"
IMG1 = "sample_images/mnist.png"
IMG2 = "sample_images/drimdb.png"
IMG3 = "sample_images/riga_sq.jpg"
ORDER = {
"score_svm/0": 0,
"prob_threshold/0": 1,
"odin/0": 2,
"svknn": 3,
"vaemseaeknn/1": 4,
"vaemseaeknn/8": 5,
"vaebceaeknn/1": 6,
"12Layer-AE-BCE": 7,
"vaebceaeknn/8": 8,
"Maha": 9,
"Maha1layer": 10,
"mseaeknn/1": 11,
"binclass/0": 12,
"12Layer-AE-MSE": 13,
"12Layer-VAE-BCE": 14,
"12Layer-VAE-MSE": 15,
"knn/1": 16,
"knn/8": 17,
"bceaeknn/1": 18,
"mseaeknn/8": 19,
"bceaeknn/8": 20,
"RETO": 21,
"RETO(pretrained)": 22,
}
N_MAX = 10
elif args.dataset == "PCAM":
TSNE_PATH = "umap_pcam_AEBCE"
BAR_PATH = "pcam_proced_res_mode1"
D1 = "PCAM"
IMG1 = "sample_images/malaria.png"
IMG2 = "sample_images/IDC.png"
ORDER = {
"prob_threshold/0": 0,
"svknn": 1,
"odin/0": 2,
"12Layer-AE-BCE": 3,
"12Layer-AE-MSE": 4,
"score_svm/0": 5,
"mseaeknn/1": 6,
"vaemseaeknn/1": 7,
"vaemseaeknn/8": 8,
"vaebceaeknn/8": 9,
"vaebceaeknn/1": 10,
"12Layer-VAE-BCE": 11,
"mseaeknn/8": 12,
"bceaeknn/1": 13,
"bceaeknn/8": 14,
"12Layer-VAE-MSE": 15,
"knn/1": 16,
"knn/8": 17,
"binclass/0": 18,
"Maha1layer": 19,
"Maha": 20,
"RETO": 21,
"RETO(pretrained)": 22,
}
N_MAX = 15
matplotlib.rc("axes", edgecolor=(0.3, 0.3, 0.3, 0.8))
if args.global_order:
method_order = [
"prob_threshold/0",
"score_svm/0",
"odin/0",
"bceaeknn/1",
"svknn",
"mseaeknn/1",
"12Layer-AE-BCE",
"vaebceaeknn/1",
"vaemseaeknn/1",
"knn/1",
"bceaeknn/8",
"Maha1layer",
"mseaeknn/8",
"vaemseaeknn/8",
"Maha",
"ALI_reconst/0",
"vaebceaeknn/8",
"knn/8",
"12Layer-VAE-MSE",
"12Layer-VAE-BCE",
"binclass/0",
"12Layer-AE-MSE",
"RETO(pretrained)",
"RETO",
]
if args.dataset != "NIHCC":
method_order = list(filter(lambda x: x != "ALI_reconst/0", method_order))
ORDER = {m: i for i, m in enumerate(method_order)}
plt.rc("font", size=5) # controls default text sizes
plt.rc("axes", titlesize=5) # fontsize of the axes title
plt.rc("axes", labelsize=5) # fontsize of the x and y labels
plt.rc("xtick", labelsize=5) # fontsize of the tick labels
plt.rc("ytick", labelsize=5) # fontsize of the tick labels
plt.rc("legend", fontsize=5) # legend fontsize
plt.rc("figure", titlesize=10) # fontsize of the figure title
kiohandles = [
"prob_threshold/0",
"score_svm/0",
"binclass/0",
"odin/0",
"Maha",
"Maha1layer",
"svknn",
"12Layer-AE-BCE",
"12Layer-AE-MSE",
"12Layer-VAE-BCE",
"12Layer-VAE-MSE",
"ALI_reconst/0",
"knn/1",
"knn/8",
"bceaeknn/8",
"vaebceaeknn/8",
"mseaeknn/8",
"vaemseaeknn/8",
"bceaeknn/1",
"vaebceaeknn/1",
"mseaeknn/1",
"vaemseaeknn/1",
"RETO",
"RETO(pretrained)",
]
alias = {
"prob_threshold/0": "Prob. threshold",
"score_svm/0": "Score SVM",
"binclass/0": "Binary classifier",
"odin/0": "ODIN",
"Maha": "Mahalanobis",
"Maha1layer": "Single layer Maha.",
"svknn": "Feature knn",
"12Layer-AE-BCE": "Reconst. AEBCE",
"12Layer-AE-MSE": "Reconst. AEMSE",
"12Layer-VAE-MSE": "Reconst. VAEMSE",
"12Layer-VAE-BCE": "Reconst. VAEBCE",
"ALI_reconst/0": "Reconst. ALI",
"knn/1": "KNN-1",
"knn/8": "KNN-8",
"bceaeknn/8": "AEBCE-KNN-8",
"vaebceaeknn/8": "VAEBCE-KNN-8",
"mseaeknn/8": "AEMSE-KNN-8",
"vaemseaeknn/8": "VAEMSE-KNN-8",
"bceaeknn/1": "AEBCE-KNN-1",
"vaebceaeknn/1": "VAEBCE-KNN-1",
"mseaeknn/1": "AEMSE-KNN-1",
"vaemseaeknn/1": "VAEMSE-KNN-1",
"RETO": "ERD++",
"RETO(pretrained)": "ERD",
}
catalias = {
"UniformNoise": "Noise",
"FashionMNIST": "Fashion",
"PADChestAP": "PC Ant. Pos.",
"PADChestL": "PC Lateral",
"PADChestAPHorizontal": "PC AP Horizontal",
"PADChestPED": "PC Pediatric",
"RIGA": "Glaucoma",
}
if args.dataset == "PCAM":
if PREVIEW_DOUBLE:
fig = plt.figure(1, figsize=(6, 2.5), dpi=109 * 2)
else:
fig = plt.figure(
1, figsize=(6, 2.5), dpi=109
) # dpi set to match preview to print size on a 14 inch tall, 1440p monitor.
# set up subplot grid
gridspec.GridSpec(2, 6) # nrow by n col # effectively 1 inch squres
ax_L0 = plt.subplot2grid((2, 6), (0, 2), colspan=4, rowspan=1)
# ax_L0.annotate(
# "C",
# xy=(0.0, 1),
# xycoords="axes fraction",
# fontsize=7,
# xytext=(0, 3),
# textcoords="offset points",
# ha="center",
# va="baseline",
# )
ax_L0.yaxis.tick_right()
_, inds, corder = plot_result(
BAR_PATH,
D1,
1,
ax_L0,
order=ORDER,
keep_only_handles=kiohandles,
color_scheme="bluered",
sort_separate=False,
)
ax_L1 = plt.subplot2grid((2, 6), (1, 2), colspan=4, rowspan=1)
ax_L1.yaxis.tick_right()
plot_result(
BAR_PATH,
D1,
2,
ax_L1,
inds,
c_order=corder,
keep_only_handles=kiohandles,
color_scheme="bluered",
with_x_tick=True,
legend=True,
alias=alias,
)
ax_s11 = plt.subplot2grid((2, 6), (0, 1), colspan=1, rowspan=1)
ax_s11.annotate(
"B",
xy=(0.0, 1),
xycoords="axes fraction",
fontsize=7,
xytext=(0, 3),
textcoords="offset points",
ha="center",
va="baseline",
)
_, color_space, cat2color = plot_tsne(TSNE_PATH, 1, ax_s11, n_color_max=N_MAX)
ax_s21 = plt.subplot2grid((2, 6), (1, 1), colspan=1, rowspan=1)
plot_tsne(
TSNE_PATH,
2,
ax_s21,
color_space=color_space,
cat2color=cat2color,
n_color_max=N_MAX,
)
cats = list(cat2color.keys())
for i, cat in enumerate(cats):
if cat in catalias:
cats[i] = catalias[cat]
colors = list(cat2color.values())
id = cats.index("In-Data")
cats = (
[
"In-data",
]
+ cats[:id]
+ cats[id + 1 :]
)
colors = (
[
colors[id],
]
+ colors[:id]
+ colors[id + 1 :]
)
markers = [
plt.Line2D([0, 0], [0, 0], color=color, marker="o", linestyle="")
for color in colors
]
fig.legend(
markers,
cats,
title="Visualization of AEBCE latent space",
numpoints=1,
loc="upper left",
bbox_to_anchor=(0.0, 0.265),
ncol=3,
markerscale=0.6,
labelspacing=0.2, # default 0.5
columnspacing=0.8, # default 2.0
borderaxespad=0.4, # default 0.5
)
ax_s10 = plt.subplot2grid((2, 6), (0, 0), colspan=1, rowspan=1)
ax_s10.annotate(
"A",
xy=(0.0, 1),
xycoords="axes fraction",
fontsize=7,
xytext=(0, 3),
textcoords="offset points",
ha="center",
va="baseline",
)
plot_image(IMG1, ax_s10, uc=1)
ax_s20 = plt.subplot2grid((2, 6), (1, 0), colspan=1, rowspan=1)
plot_image(IMG2, ax_s20, uc=2)
plt.subplots_adjust(
wspace=0.1, hspace=0.1, bottom=0.265, top=0.95, right=0.95, left=0.018
) # bottom=0.205
# plt.savefig(SAVE_NAME + ".svg")
plt.savefig(SAVE_NAME + ".png", bbox_inches="tight")
fig.show()
else:
if PREVIEW_DOUBLE:
fig = plt.figure(1, figsize=(6, 3.5), dpi=109 * 2)
else:
fig = plt.figure(
1, figsize=(6, 3.5), dpi=109
) # dpi set to match preview to print size on a 14 inch tall, 1440p monitor.
# set up subplot grid
gridspec.GridSpec(3, 6) # nrow by n col # effectively 1 inch squres
ax_L0 = plt.subplot2grid((3, 6), (0, 0), colspan=6, rowspan=1)
# ax_L0.annotate(
# "C",
# xy=(0.0, 1),
# xycoords="axes fraction",
# fontsize=7,
# xytext=(0, 3),
# textcoords="offset points",
# ha="center",
# va="baseline",
# )
ax_L0.yaxis.tick_right()
_, inds, corder = plot_result(
BAR_PATH,
D1,
1,
ax_L0,
order=ORDER,
keep_only_handles=kiohandles,
color_scheme="bluered",
sort_separate=False,
)
ax_L1 = plt.subplot2grid((3, 6), (1, 0), colspan=6, rowspan=1)
ax_L1.yaxis.tick_right()
plot_result(
BAR_PATH,
D1,
2,
ax_L1,
inds,
c_order=corder,
keep_only_handles=kiohandles,
color_scheme="bluered",
alias=alias,
)
ax_L2 = plt.subplot2grid((3, 6), (2, 0), colspan=6, rowspan=1)
ax_L2.yaxis.tick_right()
plot_result(
BAR_PATH,
D1,
3,
ax_L2,
inds,
c_order=corder,
with_x_tick=True,
keep_only_handles=kiohandles,
alias=alias,
color_scheme="bluered",
legend=True,
)
plt.subplots_adjust(
wspace=0.1, hspace=0.1, bottom=0.205, top=0.95, right=0.95, left=0.018
) # bottom=0.205
# plt.savefig(SAVE_NAME + ".svg")
plt.savefig(SAVE_NAME + ".png", bbox_inches="tight")
fig.show()
print("done")
| [
"time.sleep",
"numpy.argsort",
"numpy.array",
"matplotlib.rc",
"seaborn.scatterplot",
"matplotlib.pyplot.Line2D",
"os.walk",
"numpy.mean",
"argparse.ArgumentParser",
"seaborn.color_palette",
"mlflow.set_tracking_uri",
"mlflow.list_run_infos",
"_pickle.load",
"matplotlib.colors.ListedColorm... | [((1171, 1188), 'os.walk', 'os.walk', (['exp_path'], {}), '(exp_path)\n', (1178, 1188), False, 'import os\n'), ((3394, 3416), 'skimage.io.imread', 'skio.imread', (['file_path'], {}), '(file_path)\n', (3405, 3416), True, 'import skimage.io as skio\n'), ((3475, 3515), 'skimage.transform.resize', 'skts.resize', (['img', '(long_edge, long_edge)'], {}), '(img, (long_edge, long_edge))\n', (3486, 3515), True, 'import skimage.transform as skts\n'), ((7401, 7442), 'numpy.append', 'np.append', (['rocm', "(reto_data['auroc'] * 100)"], {}), "(rocm, reto_data['auroc'] * 100)\n", (7410, 7442), True, 'import numpy as np\n'), ((7454, 7474), 'numpy.append', 'np.append', (['rocv', '(0.0)'], {}), '(rocv, 0.0)\n', (7463, 7474), True, 'import numpy as np\n'), ((7487, 7527), 'numpy.append', 'np.append', (['prcm', "(reto_data['aupr'] * 100)"], {}), "(prcm, reto_data['aupr'] * 100)\n", (7496, 7527), True, 'import numpy as np\n'), ((7539, 7559), 'numpy.append', 'np.append', (['prcv', '(0.0)'], {}), '(prcv, 0.0)\n', (7548, 7559), True, 'import numpy as np\n'), ((7634, 7686), 'numpy.append', 'np.append', (['rocm', "(pretrained_reto_data['auroc'] * 100)"], {}), "(rocm, pretrained_reto_data['auroc'] * 100)\n", (7643, 7686), True, 'import numpy as np\n'), ((7698, 7718), 'numpy.append', 'np.append', (['rocv', '(0.0)'], {}), '(rocv, 0.0)\n', (7707, 7718), True, 'import numpy as np\n'), ((7731, 7782), 'numpy.append', 'np.append', (['prcm', "(pretrained_reto_data['aupr'] * 100)"], {}), "(prcm, pretrained_reto_data['aupr'] * 100)\n", (7740, 7782), True, 'import numpy as np\n'), ((7794, 7814), 'numpy.append', 'np.append', (['prcv', '(0.0)'], {}), '(prcv, 0.0)\n', (7803, 7814), True, 'import numpy as np\n'), ((12494, 12517), 'numpy.array', 'np.array', (['sorted_colors'], {}), '(sorted_colors)\n', (12502, 12517), True, 'import numpy as np\n'), ((12540, 12565), 'numpy.array', 'np.array', (['sorted_colors_0'], {}), '(sorted_colors_0)\n', (12548, 12565), True, 'import numpy as np\n'), ((12584, 12605), 'numpy.array', 'np.array', (['group_inds0'], {}), '(group_inds0)\n', (12592, 12605), True, 'import numpy as np\n'), ((12624, 12645), 'numpy.array', 'np.array', (['group_inds1'], {}), '(group_inds1)\n', (12632, 12645), True, 'import numpy as np\n'), ((12664, 12685), 'numpy.array', 'np.array', (['group_inds2'], {}), '(group_inds2)\n', (12672, 12685), True, 'import numpy as np\n'), ((15979, 16004), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16002, 16004), False, 'import argparse\n'), ((20953, 21006), 'matplotlib.rc', 'matplotlib.rc', (['"""axes"""'], {'edgecolor': '(0.3, 0.3, 0.3, 0.8)'}), "('axes', edgecolor=(0.3, 0.3, 0.3, 0.8))\n", (20966, 21006), False, 'import matplotlib\n'), ((21894, 21916), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(5)'}), "('font', size=5)\n", (21900, 21916), True, 'import matplotlib.pyplot as plt\n'), ((21952, 21979), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(5)'}), "('axes', titlesize=5)\n", (21958, 21979), True, 'import matplotlib.pyplot as plt\n'), ((22014, 22041), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(5)'}), "('axes', labelsize=5)\n", (22020, 22041), True, 'import matplotlib.pyplot as plt\n'), ((22080, 22108), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(5)'}), "('xtick', labelsize=5)\n", (22086, 22108), True, 'import matplotlib.pyplot as plt\n'), ((22144, 22172), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(5)'}), "('ytick', labelsize=5)\n", (22150, 22172), True, 'import matplotlib.pyplot as plt\n'), ((22208, 22236), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(5)'}), "('legend', fontsize=5)\n", (22214, 22236), True, 'import matplotlib.pyplot as plt\n'), ((22260, 22290), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': '(10)'}), "('figure', titlesize=10)\n", (22266, 22290), True, 'import matplotlib.pyplot as plt\n'), ((2899, 2986), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': X_embedded[:, 0], 'y': X_embedded[:, 1], 'Dataset': cat_labels}"], {}), "({'x': X_embedded[:, 0], 'y': X_embedded[:, 1], 'Dataset':\n cat_labels})\n", (2911, 2986), True, 'import pandas as pd\n'), ((3013, 3112), 'seaborn.scatterplot', 'sns.scatterplot', (['"""x"""', '"""y"""'], {'hue': '"""Dataset"""', 's': '(1.0)', 'data': 'df', 'ax': 'ax', 'linewidth': '(0)', 'palette': 'cat2color'}), "('x', 'y', hue='Dataset', s=1.0, data=df, ax=ax, linewidth=0,\n palette=cat2color)\n", (3028, 3112), True, 'import seaborn as sns\n'), ((5506, 5516), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (5513, 5516), True, 'import numpy as np\n'), ((5798, 5851), 'os.path.join', 'os.path.join', (['exp_path', "('data_UC%d_%s.npy' % (uc, d1))"], {}), "(exp_path, 'data_UC%d_%s.npy' % (uc, d1))\n", (5810, 5851), False, 'import os\n'), ((5879, 5935), 'os.path.join', 'os.path.join', (['exp_path', "('headers_UC%d_%s.pkl' % (uc, d1))"], {}), "(exp_path, 'headers_UC%d_%s.pkl' % (uc, d1))\n", (5891, 5935), False, 'import os\n'), ((6044, 6060), '_pickle.load', '_pickle.load', (['fp'], {}), '(fp)\n', (6056, 6060), False, 'import _pickle\n'), ((6314, 6362), 'numpy.average', 'np.average', (['uc_roc'], {'axis': '(1, 2)', 'weights': 'weights'}), '(uc_roc, axis=(1, 2), weights=weights)\n', (6324, 6362), True, 'import numpy as np\n'), ((6378, 6426), 'numpy.average', 'np.average', (['uc_prc'], {'axis': '(1, 2)', 'weights': 'weights'}), '(uc_prc, axis=(1, 2), weights=weights)\n', (6388, 6426), True, 'import numpy as np\n'), ((6442, 6484), 'process_results_multiple.weighted_std', 'weighted_std', (['uc_roc', 'weights'], {'axis': '(1, 2)'}), '(uc_roc, weights, axis=(1, 2))\n', (6454, 6484), False, 'from process_results_multiple import weighted_std\n'), ((6500, 6542), 'process_results_multiple.weighted_std', 'weighted_std', (['uc_prc', 'weights'], {'axis': '(1, 2)'}), '(uc_prc, weights, axis=(1, 2))\n', (6512, 6542), False, 'from process_results_multiple import weighted_std\n'), ((6575, 6596), 'numpy.zeros_like', 'np.zeros_like', (['uc_roc'], {}), '(uc_roc)\n', (6588, 6596), True, 'import numpy as np\n'), ((6893, 6948), 'numpy.average', 'np.average', (['uc_roc'], {'axis': '(1, 2, 3)', 'weights': 'new_weights'}), '(uc_roc, axis=(1, 2, 3), weights=new_weights)\n', (6903, 6948), True, 'import numpy as np\n'), ((6964, 7019), 'numpy.average', 'np.average', (['uc_prc'], {'axis': '(1, 2, 3)', 'weights': 'new_weights'}), '(uc_prc, axis=(1, 2, 3), weights=new_weights)\n', (6974, 7019), True, 'import numpy as np\n'), ((9696, 9720), 'numpy.array', 'np.array', (['[lower, upper]'], {}), '([lower, upper])\n', (9704, 9720), True, 'import numpy as np\n'), ((10108, 10127), 'numpy.array', 'np.array', (['keep_inds'], {}), '(keep_inds)\n', (10116, 10127), True, 'import numpy as np\n'), ((14968, 14991), 'numpy.linspace', 'np.linspace', (['(50)', '(100)', '(3)'], {}), '(50, 100, 3)\n', (14979, 14991), True, 'import numpy as np\n'), ((24444, 24467), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(6)'], {}), '(2, 6)\n', (24461, 24467), True, 'import matplotlib.gridspec as gridspec\n'), ((24538, 24592), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 6)', '(0, 2)'], {'colspan': '(4)', 'rowspan': '(1)'}), '((2, 6), (0, 2), colspan=4, rowspan=1)\n', (24554, 24592), True, 'import matplotlib.pyplot as plt\n'), ((25176, 25230), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 6)', '(1, 2)'], {'colspan': '(4)', 'rowspan': '(1)'}), '((2, 6), (1, 2), colspan=4, rowspan=1)\n', (25192, 25230), True, 'import matplotlib.pyplot as plt\n'), ((25589, 25643), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 6)', '(0, 1)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 6), (0, 1), colspan=1, rowspan=1)\n', (25605, 25643), True, 'import matplotlib.pyplot as plt\n'), ((26006, 26060), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 6)', '(1, 1)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 6), (1, 1), colspan=1, rowspan=1)\n', (26022, 26060), True, 'import matplotlib.pyplot as plt\n'), ((27319, 27373), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 6)', '(0, 0)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 6), (0, 0), colspan=1, rowspan=1)\n', (27335, 27373), True, 'import matplotlib.pyplot as plt\n'), ((27688, 27742), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 6)', '(1, 0)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((2, 6), (1, 0), colspan=1, rowspan=1)\n', (27704, 27742), True, 'import matplotlib.pyplot as plt\n'), ((27791, 27887), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.1)', 'hspace': '(0.1)', 'bottom': '(0.265)', 'top': '(0.95)', 'right': '(0.95)', 'left': '(0.018)'}), '(wspace=0.1, hspace=0.1, bottom=0.265, top=0.95, right=\n 0.95, left=0.018)\n', (27810, 27887), True, 'import matplotlib.pyplot as plt\n'), ((27972, 28024), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(SAVE_NAME + '.png')"], {'bbox_inches': '"""tight"""'}), "(SAVE_NAME + '.png', bbox_inches='tight')\n", (27983, 28024), True, 'import matplotlib.pyplot as plt\n'), ((28365, 28388), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', '(6)'], {}), '(3, 6)\n', (28382, 28388), True, 'import matplotlib.gridspec as gridspec\n'), ((28458, 28512), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 6)', '(0, 0)'], {'colspan': '(6)', 'rowspan': '(1)'}), '((3, 6), (0, 0), colspan=6, rowspan=1)\n', (28474, 28512), True, 'import matplotlib.pyplot as plt\n'), ((29096, 29150), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 6)', '(1, 0)'], {'colspan': '(6)', 'rowspan': '(1)'}), '((3, 6), (1, 0), colspan=6, rowspan=1)\n', (29112, 29150), True, 'import matplotlib.pyplot as plt\n'), ((29453, 29507), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 6)', '(2, 0)'], {'colspan': '(6)', 'rowspan': '(1)'}), '((3, 6), (2, 0), colspan=6, rowspan=1)\n', (29469, 29507), True, 'import matplotlib.pyplot as plt\n'), ((29857, 29953), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.1)', 'hspace': '(0.1)', 'bottom': '(0.205)', 'top': '(0.95)', 'right': '(0.95)', 'left': '(0.018)'}), '(wspace=0.1, hspace=0.1, bottom=0.205, top=0.95, right=\n 0.95, left=0.018)\n', (29876, 29953), True, 'import matplotlib.pyplot as plt\n'), ((30038, 30090), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(SAVE_NAME + '.png')"], {'bbox_inches': '"""tight"""'}), "(SAVE_NAME + '.png', bbox_inches='tight')\n", (30049, 30090), True, 'import matplotlib.pyplot as plt\n'), ((944, 986), 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['remote_server_uri'], {}), '(remote_server_uri)\n', (967, 986), False, 'import mlflow\n'), ((1991, 2028), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'n_color_max'], {}), "('hls', n_color_max)\n", (2008, 2028), True, 'import seaborn as sns\n'), ((2276, 2307), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['this_color_list'], {}), '(this_color_list)\n', (2290, 2307), False, 'from matplotlib.colors import ListedColormap\n'), ((2853, 2884), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['this_color_list'], {}), '(this_color_list)\n', (2867, 2884), False, 'from matplotlib.colors import ListedColormap\n'), ((4581, 4620), 'mlflow.get_experiment_by_name', 'mlflow.get_experiment_by_name', (['exp_name'], {}), '(exp_name)\n', (4610, 4620), False, 'import mlflow\n'), ((4647, 4687), 'mlflow.list_run_infos', 'mlflow.list_run_infos', (['exp.experiment_id'], {}), '(exp.experiment_id)\n', (4668, 4687), False, 'import mlflow\n'), ((5951, 6007), 'os.path.join', 'os.path.join', (['exp_path', "('headers_UC%d_%s.pkl' % (uc, d1))"], {}), "(exp_path, 'headers_UC%d_%s.pkl' % (uc, d1))\n", (5963, 6007), False, 'import os\n'), ((8592, 8633), 'numpy.array', 'np.array', (['(sorted_inds_g1 + sorted_inds_g2)'], {}), '(sorted_inds_g1 + sorted_inds_g2)\n', (8600, 8633), True, 'import numpy as np\n'), ((8717, 8739), 'numpy.argsort', 'np.argsort', (['group_sums'], {}), '(group_sums)\n', (8727, 8739), True, 'import numpy as np\n'), ((8814, 8838), 'numpy.array', 'np.array', (['sorted_inds_g1'], {}), '(sorted_inds_g1)\n', (8822, 8838), True, 'import numpy as np\n'), ((9105, 9120), 'numpy.array', 'np.array', (['order'], {}), '(order)\n', (9113, 9120), True, 'import numpy as np\n'), ((11745, 11767), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (11756, 11767), True, 'import matplotlib.cm as cm\n'), ((24178, 24222), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6, 2.5)', 'dpi': '(109 * 2)'}), '(1, figsize=(6, 2.5), dpi=109 * 2)\n', (24188, 24222), True, 'import matplotlib.pyplot as plt\n'), ((24255, 24295), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6, 2.5)', 'dpi': '(109)'}), '(1, figsize=(6, 2.5), dpi=109)\n', (24265, 24295), True, 'import matplotlib.pyplot as plt\n'), ((26785, 26850), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[0, 0]', '[0, 0]'], {'color': 'color', 'marker': '"""o"""', 'linestyle': '""""""'}), "([0, 0], [0, 0], color=color, marker='o', linestyle='')\n", (26795, 26850), True, 'import matplotlib.pyplot as plt\n'), ((28099, 28143), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6, 3.5)', 'dpi': '(109 * 2)'}), '(1, figsize=(6, 3.5), dpi=109 * 2)\n', (28109, 28143), True, 'import matplotlib.pyplot as plt\n'), ((28176, 28216), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6, 3.5)', 'dpi': '(109)'}), '(1, figsize=(6, 3.5), dpi=109)\n', (28186, 28216), True, 'import matplotlib.pyplot as plt\n'), ((656, 669), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (666, 669), False, 'import time\n'), ((4806, 4837), 'mlflow.get_run', 'mlflow.get_run', (['run_info.run_id'], {}), '(run_info.run_id)\n', (4820, 4837), False, 'import mlflow\n'), ((7049, 7098), 'process_results_multiple.weighted_std', 'weighted_std', (['uc_roc', 'new_weights'], {'axis': '(1, 2, 3)'}), '(uc_roc, new_weights, axis=(1, 2, 3))\n', (7061, 7098), False, 'from process_results_multiple import weighted_std\n'), ((7207, 7256), 'process_results_multiple.weighted_std', 'weighted_std', (['uc_prc', 'new_weights'], {'axis': '(1, 2, 3)'}), '(uc_prc, new_weights, axis=(1, 2, 3))\n', (7219, 7256), False, 'from process_results_multiple import weighted_std\n'), ((1313, 1345), 'os.path.join', 'os.path.join', (['exp_path', 'filename'], {}), '(exp_path, filename)\n', (1325, 1345), False, 'import os\n'), ((8410, 8432), 'numpy.argsort', 'np.argsort', (['group1_sum'], {}), '(group1_sum)\n', (8420, 8432), True, 'import numpy as np\n'), ((8496, 8518), 'numpy.argsort', 'np.argsort', (['group2_sum'], {}), '(group2_sum)\n', (8506, 8518), True, 'import numpy as np\n'), ((1433, 1465), 'os.path.join', 'os.path.join', (['exp_path', 'filename'], {}), '(exp_path, filename)\n', (1445, 1465), False, 'import os\n'), ((1618, 1634), '_pickle.load', '_pickle.load', (['fp'], {}), '(fp)\n', (1630, 1634), False, 'import _pickle\n'), ((1543, 1575), 'os.path.join', 'os.path.join', (['exp_path', 'filename'], {}), '(exp_path, filename)\n', (1555, 1575), False, 'import os\n')] |
from six import assertRaisesRegex
from unittest import TestCase
from tempfile import mkstemp
from os import close, unlink, write
from contextlib import contextmanager
from io import StringIO
from json import dumps, loads
import numpy as np
from pysam import CHARD_CLIP, CMATCH
from dark.reads import Read, ReadFilter
from dark.sam import (
PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference,
InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix)
# Note: getReferenceInfo in dark/sam.py is tested by the calls to
# consensusFromBAM in test/test_consensus.py
# Some tests below actually use the filesystem to read files. That's due to
# the API to pysam and the fact that it calls a C function to open files,
# so we can't mock Python's 'open' method. Hence the following context
# manager.
@contextmanager
def dataFile(data):
"""
Create a context manager to store data in a temporary file and
later remove it.
"""
fd, filename = mkstemp()
write(fd, data.encode('utf-8'))
close(fd)
yield filename
unlink(filename)
class TestSAMFilter(TestCase):
"""
Test the SAMFilter class.
"""
def testUnknownReferences(self):
"""
Passing an unknown reference id to the referenceLengths method must
result in an UnknownReference exception.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:90',
]).replace(' ', '\t')
with dataFile(data) as filename:
sam = SAMFilter(filename, referenceIds={'unknown'})
error = ("^Reference 'unknown' is not present in the "
"SAM/BAM file\\.$")
assertRaisesRegex(self, UnknownReference, error,
sam.referenceLengths)
def testNoFilteringOptions(self):
"""
If no filtering options are given, the noFiltering attribute
on the SAM filter must be True.
"""
sf = SAMFilter(None)
self.assertTrue(sf.noFiltering)
def testNoFilteringAllAlignmentsReturned(self):
"""
When no filtering options are given, all alignments must be returned.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
(alignment1, alignment2, alignment3) = list(sf.alignments())
self.assertEqual('query1', alignment1.query_name)
self.assertEqual('query2', alignment2.query_name)
self.assertEqual('query3', alignment3.query_name)
def testAFilteringOptionSetsNoFiltering(self):
"""
If a filtering options is given, the noFiltering attribute
on the SAM filter must be False.
"""
sf = SAMFilter(None, storeQueryIds=True)
self.assertFalse(sf.noFiltering)
def testStoreQueryIds(self):
"""
If we request that query ids are saved, they must be.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2= * 0 0 TC XY',
'query2 0 ref1 2 60 2= * 0 0 TC XY',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, storeQueryIds=True)
list(sf.alignments())
self.assertEqual({'query1', 'query2'}, sf.queryIds)
def testAlignmentCount(self):
"""
When all queries have been yielded, the alignment count must be
as expected.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2= * 0 0 TC XY',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
list(sf.alignments())
self.assertEqual(2, sf.alignmentCount)
def testMinLength(self):
"""
A request for reads that are only longer than a certain value should
result in the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
filterRead = ReadFilter(minLength=6).filter
sf = SAMFilter(filename, filterRead=filterRead)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testDropSecondary(self):
"""
Dropping matches flagged as secondary must give the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 256 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, dropSecondary=True)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testDropSupplementary(self):
"""
Dropping matches flagged as supplementary must give the expected
result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 2048 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, dropSupplementary=True)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testDropDuplicates(self):
"""
Dropping matches flagged as optical or PCR duplicates must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 1024 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, dropDuplicates=True)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testKeepQualityControlFailures(self):
"""
Keeping matches flagged as quality control failures must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 512 ref1 4 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, keepQCFailures=True)
(alignment1, alignment2) = list(sf.alignments())
self.assertEqual('query1', alignment1.query_name)
self.assertEqual('query2', alignment2.query_name)
def testMinScoreNoScores(self):
"""
A request for reads with alignment scores no lower than a given value
must produce an empty result when no alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, minScore=6)
self.assertEqual([], list(sf.alignments()))
def testMinScore(self):
"""
A request for reads with alignment scores no lower than a given value
must produce the expected result when some alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, minScore=6)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testMaxScoreNoScores(self):
"""
A request for reads with alignment scores no higher than a given value
must produce an empty result when no alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, maxScore=6)
self.assertEqual([], list(sf.alignments()))
def testMaxScore(self):
"""
A request for reads with alignment scores no higher than a given value
must produce the expected result when some alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, maxScore=6)
(alignment,) = list(sf.alignments())
self.assertEqual('query3', alignment.query_name)
def testMinAndMaxScore(self):
"""
A request for reads with alignment scores no lower or higher than
given values must produce the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:12',
'query3 0 ref1 2 60 2= * 0 0 TC ZZ',
'query4 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
'query5 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:2',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, minScore=3, maxScore=10)
(alignment1, alignment2) = list(sf.alignments())
self.assertEqual('query1', alignment1.query_name)
self.assertEqual('query4', alignment2.query_name)
def testCloseButNoCIGAR(self):
"""
An unmapped query with no CIGAR string must be passed through
unchanged if dropUnmapped is not specified.
"""
data = '\n'.join([
'@SQ SN:ref LN:10',
'query1 4 * 0 0 * * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
self.assertEqual('TCTAGG', alignment.query_sequence)
self.assertEqual('ZZZZZZ', ''.join(
map(lambda x: chr(x + 33), alignment.query_qualities)))
def testNoQuality(self):
"""
If an alignment has * for the quality string, the filter must
return an alignment with a C{None} quality value.
"""
data = '\n'.join([
'@SQ SN:ref LN:10',
'query1 4 * 0 0 6M * 0 0 TCTAGG *',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
self.assertEqual('TCTAGG', alignment.query_sequence)
self.assertIsNone(alignment.query_qualities)
class TestPaddedSAM(TestCase):
"""
Test the PaddedSAM class.
"""
# In reading the tests below, it is important to remember that the start
# position (in the reference) of the match in SAM format is 1-based. This
# is the 4th field in the non-header SAM lines (i.e., those that don't
# start with @). If you look at the code in ../dark/sam.py, pysam provides
# a 'reference_start' attribute that is 0-based.
def testUnequalReferenceLengths(self):
"""
Passing no reference ids when the references have different lengths
must result in an UnequalReferenceLengthError exception.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:91',
]).replace(' ', '\t')
with dataFile(data) as filename:
error = ('^Your 2 SAM/BAM file reference sequence lengths '
'\\(id1=90, id2=91\\) are not all identical\\.$')
assertRaisesRegex(self, UnequalReferenceLengthError, error,
PaddedSAM, SAMFilter(filename))
def testQueryTooLong(self):
"""
If the query sequence is longer than the total of the lengths in the
CIGAR operations, a ValueError must be raised.
"""
# This test just returns. It used to be possible to reach the
# "Query ... not fully consumed when parsing CIGAR string."
# ValueError in sam.py, prior to the fix of
# https://github.com/acorg/dark-matter/issues/630 but it is not
# possible to get a CIGAR string that has a different total length
# from the sequence length through to our code in sam.py because
# pysam catches the error. I'm leaving this test here because it
# documents that the error checked for in sam.py cannot currently
# be reached and the test may become useful. For now it just returns.
return
data = '\n'.join([
'@SQ SN:ref1 LN:90',
'query1 0 ref1 1 60 4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^Query TCTAGG not fully consumed when parsing CIGAR '
'string\\.')
assertRaisesRegex(self, ValueError, error, list, ps.queries())
def testAllMMatch(self):
"""
A simple all-'M' match must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 6M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testMixedMatch(self):
"""
A match that is a mix of M, =, and X must result in the expected
padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testHardClipLeft(self):
"""
A simple all-'M' match with a hard clip left must result in the
expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 10H6M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testHardClipRight(self):
"""
A simple all-'M' match with a hard clip right must result in the
expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 6M10H * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testRcNeeded(self):
"""
A reverse-complemented match (flag = 16) when rcNeeded=True is passed
must result in the expected (reverse complemented) padded sequence
and reversed quality string.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 16 ref1 2 60 6M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(rcNeeded=True))
self.assertEqual(Read('query1', '-CCTAGA---', '!654321!!!'), read)
def testRcSuffix(self):
"""
A reverse-complemented sequence should have the rcSuffix string added
to its id when an rcSuffix value is passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 16 ref1 2 60 6M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(rcSuffix='-rc', rcNeeded=True))
self.assertEqual(Read('query1-rc', '-CCTAGA---', '!654321!!!'),
read)
def testQuerySoftClipLeft(self):
"""
A match with a soft-clipped region that does not extend to the left of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 60 2S4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testQuerySoftClipReachesLeftEdge(self):
"""
A match with a soft-clipped region that reaches to the left edge of the
reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 5 60 4S2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'TCTAGG----', 'ZZZZZZ!!!!'), read)
def testQuerySoftClipProtrudesLeft(self):
"""
A match with a soft-clipped region that extends to the left of the
reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 4S2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'AGG-------', 'ZZZ!!!!!!!'), read)
def testKF414679SoftClipLeft(self):
"""
Test for a case that wasn't working.
"""
seq = ('GCCATGCAGTGGAACTCCACAGCATTCCACCAAGCTCTGC'
'AGAATCCCAAAGTCAGGGGTTTGTATCTTCTTGCTGGTGGC')
quality = ('ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 5 60 18S63M * 0 0 %s %s' % (seq, quality),
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', seq[14:], quality[14:]), read)
def testQuerySoftClipRight(self):
"""
A match with a soft-clipped region that does not extend to the right of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 60 4M2S * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '---TCTAGG-', '!!!ZZZZZZ!'), read)
def testQuerySoftClipReachesRightEdge(self):
"""
A match with a soft-clipped region that reaches to the right edge of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 5 60 2M4S * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '----TCTAGG', '!!!!ZZZZZZ'), read)
def testQuerySoftClipProtrudesRight(self):
"""
A match with a soft-clipped region that extends to the right of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 6 60 2M4S * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-----TCTAG', '!!!!!ZZZZZ'), read)
def testQuerySoftClipProtrudesBothSides(self):
"""
A match with a soft-clipped region that extends to both the left and
right of the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 60 5S5M5S * 0 0 TCTAGGCTGACTAAG ZZZZZZZZZZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'TAGGCTGACT', 'ZZZZZZZZZZ'), read)
def testQueryHardClipAndSoftClipProtrudesBothSides(self):
"""
A match with a soft-clipped region that extends to both the left and
right of the reference must result in the expected padded sequence
when hard clipping is also indicated by the CIGAR string.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 0 3H5S5M4S5H * 0 0 TCTAGGCTGACTAA ZZZZZZZZZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'TAGGCTGACT', 'ZZZZZZZZZZ'), read)
def testReferenceInsertion(self):
"""
An insertion into the reference must result in the expected padded
sequence and the expected value in the referenceInsertions dictionary.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2I2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCGG-----', '!ZZZZ!!!!!'), read)
self.assertEqual(
{
'query1': [(3, 'TA')],
},
ps.referenceInsertions)
def testPrimaryAndSecondaryReferenceInsertion(self):
"""
A primary and secondary insertion into the reference (of the same
query) must result in the expected padded sequences and the expected
value in the referenceInsertions dictionary.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2I2M * 0 0 TCTAGG ZZZZZZ',
'query1 256 ref1 4 60 2M3I1M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries())
self.assertEqual(Read('query1', '-TCGG-----', '!ZZZZ!!!!!'), read1)
self.assertEqual(Read('query1/1', '---TCG----', '!!!ZZZ!!!!'),
read2)
self.assertEqual(
{
'query1': [(3, 'TA')],
'query1/1': [(5, 'TAG')],
},
ps.referenceInsertions)
def testReferenceDeletion(self):
"""
An deletion of reference bases must result in the expected padded
sequence (with Ns inserted for the deleted reference bases).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2D4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCNNTAGG-', '!ZZ!!ZZZZ!'), read)
def testReferenceDeletionAlternateChars(self):
"""
An deletion of reference bases must result in the expected padded
sequence (with the passed query insertion character and unknown
quality character) when queryInsertionChar and unknownQualityChar
arguments are passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2D4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(queryInsertionChar='?',
unknownQualityChar='+'))
self.assertEqual(Read('query1', '-TC??TAGG-', '+ZZ++ZZZZ+'), read)
def testReferenceSkip(self):
"""
An skip of reference bases must result in the expected padded
sequence with the passed unknown quality character when the
unknownQualityChar argument is passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2N4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(unknownQualityChar='.'))
self.assertEqual(Read('query1', '-TCNNTAGG-', '.ZZ..ZZZZ.'), read)
def testReferenceSkipAlternateChars(self):
"""
An skip of reference bases must result in the expected padded
sequence (with the passed query insertion character and unknown
quality character) when queryInsertionChar and unknownQualityChar
arguments are passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2N4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(queryInsertionChar='X',
unknownQualityChar='+'))
self.assertEqual(Read('query1', '-TCXXTAGG-', '+ZZ++ZZZZ+'), read)
def testMixedMatchSpecificReferenceButNoMatches(self):
"""
A request for reads aligned against a reference that exists but that
has no matches must result in an empty list.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:15',
'@SQ SN:ref2 LN:15',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, referenceIds={'ref2'}))
self.assertEqual([], list(ps.queries()))
def testMixedMatchSpecificReference(self):
"""
A match that is a mix of M, =, and X must result in the expected
padded sequence when a reference sequence is specified.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, referenceIds={'ref1'}))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testMinLength(self):
"""
A request for reads that are only longer than a certain value should
result in the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
filterRead = ReadFilter(minLength=6).filter
ps = PaddedSAM(SAMFilter(filename, filterRead=filterRead))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testDropSecondary(self):
"""
Dropping matches flagged as secondary must give the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 256 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, dropSecondary=True))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testDropSupplementary(self):
"""
Dropping matches flagged as supplementary must give the expected
result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 2048 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, dropSupplementary=True))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testDropDuplicates(self):
"""
Dropping matches flagged as optical or PCR duplicates must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 1024 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, dropDuplicates=True))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testAllowDuplicateIds(self):
"""
It must be possible to allow duplicate ids (in this case due to a
secondary match).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 0 ref1 3 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(allowDuplicateIds=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual(Read('query1', '--TC------', '!!ZZ!!!!!!'),
read2)
def testDuplicateIdDisambiguation(self):
"""
Duplicate ids must be disambiguated if allowDuplicateIds is not given.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 0 ref1 3 60 2= * 0 0 TC ZZ',
'query1 0 ref1 5 60 2S2= * 0 0 TCGA ZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual(Read('query1/1', '--TC------', '!!ZZ!!!!!!'),
read2)
self.assertEqual(Read('query1/2', '--TCGA----', '!!ZZZZ!!!!'),
read3)
def testKeepQualityControlFailures(self):
"""
Keeping matches flagged as quality control failures must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 512 ref1 4 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, keepQCFailures=True))
(read1, read2) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual(Read('query2', '---TC-----', '!!!ZZ!!!!!'), read2)
def testSecondaryWithNoPreviousSequence(self):
"""
A secondary match with a '*' seq that is not preceeded by a query with
a sequence must result in a ValueError being raised.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query 256 ref1 3 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^pysam produced an alignment \\(number 1\\) with no '
'query sequence without previously giving an alignment '
'with a sequence\\.$')
queries = ps.queries()
assertRaisesRegex(self, InvalidSAM, error, list, queries)
def testSecondaryWithNoSequence(self):
"""
A secondary match with a '*' seq must result in the sequence from the
previous query being used.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 3M * 0 0 TCT ZZZ',
'query2 0 ref1 2 60 4M * 0 0 TCTA ZZZZ',
'query2 256 ref1 6 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3) = list(ps.queries())
self.assertEqual(Read('query1', '-TCT------', '!ZZZ!!!!!!'), read1)
self.assertEqual(Read('query2', '-TCTA-----', '!ZZZZ!!!!!'), read2)
self.assertEqual(Read('query2/1', '-----TCTA-', '!!!!!ZZZZ!'),
read3)
def testSupplementaryWithNoPreviousSequence(self):
"""
A supplementary match with a '*' seq that is not preceeded by a query
with a sequence must result in a ValueError being raised.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query 2048 ref1 3 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^pysam produced an alignment \\(number 1\\) with no '
'query sequence without previously giving an alignment '
'with a sequence\\.$')
queries = ps.queries()
assertRaisesRegex(self, InvalidSAM, error, list, queries)
def testSupplementaryWithNoSequence(self):
"""
A supplementary match with a '*' seq must result in the sequence from
the previous query being used.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 3M * 0 0 TCT ZZZ',
'query2 0 ref1 2 60 4M * 0 0 TCTA ZZZZ',
'query2 2048 ref1 6 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3) = list(ps.queries())
self.assertEqual(Read('query1', '-TCT------', '!ZZZ!!!!!!'), read1)
self.assertEqual(Read('query2', '-TCTA-----', '!ZZZZ!!!!!'), read2)
self.assertEqual(Read('query2/1', '-----TCTA-', '!!!!!ZZZZ!'),
read3)
def testNotSecondaryAndNotSupplementaryWithNoSequence(self):
"""
An alignment with a '*' seq that is not secondary or supplementary
must result in a ValueError being raised.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query 0 ref1 3 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^pysam produced an alignment \\(number 1\\) with no '
'query sequence without previously giving an alignment '
'with a sequence\\.$')
queries = ps.queries()
assertRaisesRegex(self, InvalidSAM, error, list, queries)
def testAlsoYieldAlignments(self):
"""
A request for queries with their pysam alignments should have the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2= * 0 0 TC 78',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!123456!!!'), read1)
self.assertEqual('TCTAGG', read1.alignment.query_sequence)
self.assertEqual('123456', ''.join(
map(lambda x: chr(x + 33), read1.alignment.query_qualities)))
self.assertEqual(Read('query2', '-TC-------', '!78!!!!!!!'), read2)
self.assertEqual('TC', read2.alignment.query_sequence)
self.assertEqual('78', ''.join(
map(lambda x: chr(x + 33), read2.alignment.query_qualities)))
def testHardClippingInCIGARButQueryNotHardClipped(self):
"""
As documented in https://github.com/acorg/dark-matter/issues/630 we
must deal correctly with a case in which the CIGAR string says a
query is hard-clipped but the query sequence in the SAM file
actually isn't. This can be due to a prior alignment with a soft clip,
in which case the full query sequence has to be given before the
secondary alignment with the hard clip.
"""
data = '\n'.join([
'@SQ SN:Chimp-D00220 LN:8',
'@SQ SN:D-AM494716 LN:8',
'@SQ SN:D-XXX LN:8',
'@SQ SN:Chimp-YYY LN:8',
'query1 0 Chimp-D00220 1 0 3S5M * 0 0 TTTTGGTT 12345678',
'query1 256 D-AM494716 1 0 3H5M * 0 0 * *',
'query1 256 D-XXX 1 0 5H3M * 0 0 * *',
'query1 0 Chimp-YYY 1 0 8M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3, read4) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', 'TGGTT---', '45678!!!'), read1)
self.assertEqual('TTTTGGTT', read1.alignment.query_sequence)
self.assertEqual(Read('query1/1', 'TGGTT---', '45678!!!'), read2)
self.assertEqual('TGGTT', read2.alignment.query_sequence)
self.assertEqual(Read('query1/2', 'GTT-----', '678!!!!!'), read3)
self.assertEqual('GTT', read3.alignment.query_sequence)
self.assertEqual(Read('query1/3', 'TTTTGGTT', '12345678'), read4)
self.assertEqual('TTTTGGTT', read4.alignment.query_sequence)
def testSecondaryAlignmentHasQuery(self):
"""
If the first alignment of a query is against a reference that is not
wanted, a subsequent secondary alignment (SAM flag = 256) must have
the original query and quality strings (even though these are only
present in the SAM as * characters and the query is None when it comes
back from pysam).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 256 ref2 2 60 2=2X2M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual('ref1', read1.alignment.reference_name)
self.assertEqual(Read('query1/1', '-TCTAGG---', '!ZZZZZZ!!!'),
read2)
self.assertEqual('ref2', read2.alignment.reference_name)
def testSupplementaryAlignmentHasQuery(self):
"""
If the first alignment of a query is against a reference that is not
wanted, a subsequent supplementary alignment (SAM flag = 2048) must
have the original query and quality strings (even though these are only
present in the SAM as * characters and the query is None when it comes
back from pysam).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 2048 ref2 2 60 2=2X2M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual('ref1', read1.alignment.reference_name)
self.assertEqual(Read('query1/1', '-TCTAGG---', '!ZZZZZZ!!!'),
read2)
self.assertEqual('ref2', read2.alignment.reference_name)
class TestSamReferencesToStr(TestCase):
"""
Test the samReferencesToStr function.
"""
def testSimple(self):
"""
The referencesToStr method must return the expected string.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:91',
]).replace(' ', '\t')
with dataFile(data) as filename:
self.assertEqual('id1 (length 90)\nid2 (length 91)',
samReferencesToStr(filename))
def testIndent(self):
"""
The referencesToStr method must return the expected string when
passed an indent.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:91',
]).replace(' ', '\t')
with dataFile(data) as filename:
self.assertEqual(' id1 (length 90)\n id2 (length 91)',
samReferencesToStr(filename, indent=2))
class TestHardClip(TestCase):
"""
Test the _hardClip function.
"""
def testHardClipInMiddle(self):
"""
If hard clipping is given as an operation not at the beginning or end
of the sequence, a ValueError must be raised.
"""
error = (
'^Invalid CIGAR tuples .* contains hard-clipping operation '
'that is neither at the start nor the end of the sequence\\.$')
assertRaisesRegex(
self, ValueError, error, _hardClip, 'CGT', '123',
((CMATCH, 1), (CHARD_CLIP, 1), (CMATCH, 1),))
def testThreeHardClips(self):
"""
If hard clipping is specified more than twice, a ValueError must be
raised.
"""
error = ('^Invalid CIGAR tuples .* specifies hard-clipping 3 times '
'\\(2 is the maximum\\).$')
assertRaisesRegex(
self, ValueError, error, _hardClip, 'CGT', '123',
((CHARD_CLIP, 1), (CHARD_CLIP, 1), (CHARD_CLIP, 1),))
def testNoClip(self):
"""
If no hard clipping is indicated, the function must return the
original sequence.
"""
self.assertEqual(('CGT', '123', False),
_hardClip('CGT', '123', ((CMATCH, 3),)))
def testClipLeft(self):
"""
If hard clipping on the left is indicated, and has not been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CGT', '456', True),
_hardClip('CAACGT', '123456', ((CHARD_CLIP, 3), (CMATCH, 3),)))
def testClipRight(self):
"""
If hard clipping on the right is indicated, and has not been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CA', '12', True),
_hardClip('CAACGT', '123456', ((CMATCH, 2), (CHARD_CLIP, 4),)))
def testClipBoth(self):
"""
If hard clipping on the left and right is indicated, and has not been
done, the function must return the expected sequence.
"""
self.assertEqual(
('AA', '23', True),
_hardClip('CAACGT', '123456',
((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3),)))
def testClipLeftAlreadyDone(self):
"""
If hard clipping on the left is indicated, and has already been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CGT', '123', False),
_hardClip('CGT', '123', ((CHARD_CLIP, 3), (CMATCH, 3),)))
def testClipRightAlreadyDone(self):
"""
If hard clipping on the right is indicated, and has already been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CA', '12', False),
_hardClip('CA', '12', ((CMATCH, 2), (CHARD_CLIP, 4),)))
def testClipBothAlreadyDone(self):
"""
If hard clipping on the left and right is indicated, and has already
been done, the function must return the expected sequence.
"""
self.assertEqual(
('AA', '12', False),
_hardClip('AA', '12',
((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3),)))
class TestDistanceMatrix(TestCase):
"""
Test the DistanceMatrix class.
"""
def testEmpty(self):
"""
The similarity (scores) matrix must be empty after intitialization.
"""
dm = DistanceMatrix()
self.assertEqual({}, dm.scores)
def testNoMatchesHasNoMatchedReferenceIds(self):
"""
If a SAM file with no query records is added, there should be no
matched reference ids (even though there is a reference in the SAM
header, it has no matches).
"""
dm = DistanceMatrix()
data = '\n'.join([
'@SQ SN:id1 LN:90',
]).replace(' ', '\t')
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual({}, dm.scores)
def testAddEmptySAMNoScoreTag(self):
"""
If a SAM file with no query records is added and no score tag is
passed, the scores matrix must be empty and the distance between
two (non-existent) references must be 1.0.
"""
dm = DistanceMatrix()
data = '\n'.join([
'@SQ SN:id1 LN:90',
]).replace(' ', '\t')
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual({}, dm.scores)
self.assertEqual(1.0, dm.jaccardDistance('ref1', 'ref2'))
self.assertEqual(1.0, dm.soergelDistance('ref1', 'ref2'))
def testAddEmptySAMWithScoreTag(self):
"""
If a SAM file with no query records is added and a score tag is passed,
the similarity (scores) matrix must be empty.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual({}, dm.scores)
self.assertEqual(1.0, dm.jaccardDistance('ref1', 'ref2'))
self.assertEqual(1.0, dm.soergelDistance('ref1', 'ref2'))
def testOneQueryMappedNoScoreTag(self):
"""
If one query is mapped to one reference, the scores matrix must have
a 1.0 score if no score tag is passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(
{
'ref1': {
'query1': 1.0,
},
},
dm.scores)
def testOneQueryMappedWithScoreTag(self):
"""
If one query is mapped to one reference, the scores matrix must have
the correct score if a score tag is passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:77',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(
{
'ref1': {
'query1': 77.0,
},
},
dm.scores)
self.assertEqual(77.0, dm.score('ref1', 'query1'))
def testOneQueryMappedWithScoreTagFloat(self):
"""
If one query is mapped to one reference, the scores matrix must have
the correct score if a score tag is passed and the score is of type
float (the AS:f:77.5 in the SAM record).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:f:77.5',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(
{
'ref1': {
'query1': 77.5,
},
},
dm.scores)
self.assertEqual(77.5, dm.score('ref1', 'query1'))
def testNonExistentQueryNotMapped(self):
"""
If a query (not even existing in this case) is not mapped to the
reference, the score between the two must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.0, dm.score('ref1', 'query1'))
def testNonExistentQuery(self):
"""
The score for a non-existent query must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.0, dm.score('ref1', 'query1'))
def testQueryNotMapped(self):
"""
If a query did not map to a reference, the score between the two must
be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:77',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(
{
'ref2': {
'query1': 77,
},
},
dm.scores)
self.assertEqual(0.0, dm.score('ref1', 'query1'))
def testJaccardDistanceToSelf(self):
"""
The Jaccard distance between a reference and itself must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.0, dm.jaccardDistance('ref1', 'ref1'))
def testJaccardDistanceToIdentical(self):
"""
The Jaccard distance between a reference and another with the same set
of matching queries must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.0, dm.jaccardDistance('ref1', 'ref1'))
def testJaccardDistanceWithNoQueriesInCommon(self):
"""
The Jaccarddistance between two references that have no matching
queries in common must be 1.0.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(1.0, dm.jaccardDistance('ref1', 'ref2'))
def testJaccardDistanceWithOneQueryInCommon(self):
"""
The Jaccard similarity between two references with one query in common
is one over the number of queries that match them in total (four),
i.e., 1/4 and the Jaccard distance is 1.0 minus this, or 3/4.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.75, dm.jaccardDistance('ref1', 'ref2'))
def testJaccardDistanceWithTwoQueriesInCommon(self):
"""
The Jaccard similarity between two references with two queries in
common is two over the number of queries that match them in total
(five), i.e., 2/5 and the Jaccard distance is 1.0 minus this, or 3/5.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query5 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query5 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.6, dm.jaccardDistance('ref1', 'ref2'))
def testSoergelDistanceWithNegativeScore(self):
"""
Soergel distance cannot be computed if a negative score is present.
A ValueError must be raised in such cases.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:-50',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
error = (fr"^Alignment 1 in {filename!r} has tag 'AS' with "
fr"negative value \(-50\)\.$")
self.assertRaisesRegex(ValueError, error, dm.addFile, filename,
scoreTag='AS')
def testSoergelDistanceWithOneQueryInCommonNoScoreTag(self):
"""
The Soergel similarity between two references with one query in common
if no score tag was given is one over the number of queries that match
them in total (four), i.e., 1/4 and the distance is 1.0 minus this, or
3/4.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename)
self.assertEqual(0.75, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceWithNoQueryInCommon(self):
"""
The Soergel similarity between two references with no queries in common
when using a score tag given is the sum of the minimum scores (all are
zero) over the sum of the maximum scores (50 + 10 + 60 + 30 = 150),
i.e., zero, and the distance is 1.0 minus this, or 1.0.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
'query4 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(1.0, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceToIdentical(self):
"""
The Soergel similarity between two references with two queries in
common with the same scores must be zero.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:20',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:20',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.0, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceSameQueriesDifferentScores(self):
"""
The Soergel similarity between two references with two queries in
common but with different scores is the sum of the minimum scores
(10 + 15 = 25) over the sum of the maximum scores (30 + 70 = 100),
or 1/4, and the distance is 1.0 minus this, or 3/4. The unrelated
query3 and ref3 are ignored.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'@SQ SN:ref3 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:15',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:70',
'query3 0 ref3 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:70',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.75, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceWithOneQueryInCommon(self):
"""
The Soergel similarity between two references with one query in common
when using a score tag given is the sum of the minimum scores (30) over
the sum of the maximum scores (50 + 10 + 60 = 120), or 1/4, and the
distance is 1.0 minus this, or 3/4.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.75, dm.soergelDistance('ref1', 'ref2'))
def testSoergelDistanceWithTwoQueriesInCommon(self):
"""
The Soergel similarity between two references with two queries in
common when using a score tag given is the sum of the minimum scores
(10 + 20) over the sum of the maximum scores (50 + 10 + 60 = 120),
or 1/4, and the distance is 1.0 minus this, or 3/4.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:20',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
self.assertEqual(0.75, dm.soergelDistance('ref1', 'ref2'))
def testSave(self):
"""
The save method must write out the correct JSON.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:f:10.0',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:f:11.0',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:f:12.0',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
fp = StringIO()
dm.save(fp)
self.assertEqual(
{
'ref1': {
'query1': 10.0,
},
'ref2': {
'query1': 11.0,
'query2': 12.0,
},
},
loads(fp.getvalue()))
def testLoad(self):
"""
The load method must read the JSON and store it correctly.
"""
data = {
'ref1': {
'query1': 10.0,
},
'ref2': {
'query1': 11.0,
'query2': 12.0,
},
}
dm = DistanceMatrix()
fp = StringIO(dumps(data))
dm.load(fp)
self.assertEqual(data, dm.scores)
def testJaccardMatrixWithOneQueryInCommon(self):
"""
The Jaccard similarity between two references is the number of reads in
common (1) over the number of reads in the union (4) or 0.25. Check
that the distance and similarity matrice have the right values.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
# Test distance.
matrix = dm.matrix(metric='jaccard')
self.assertTrue(np.array_equal(
[
[0.00, 0.75],
[0.75, 0.00],
],
matrix))
# Test similarity.
matrix = dm.matrix(metric='jaccard', similarity=True)
self.assertTrue(np.array_equal(
[
[1.00, 0.25],
[0.25, 1.00],
],
matrix))
def testJaccardMatrixWithOneQueryInCommonReturnDict(self):
"""
The Jaccard similarity between two references is the number of reads in
common (1) over the number of reads in the union (4) or 0.25. Check
that the distance and similarity matrice have the right values, asking
for a dictionary to be returned.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
'query4 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
# Test distance.
matrix = dm.matrix(metric='jaccard', returnDict=True)
self.assertEqual(
{
'ref1': {
'ref1': 0.00,
'ref2': 0.75,
},
'ref2': {
'ref1': 0.75,
'ref2': 0.00,
},
},
matrix)
# Test similarity.
matrix = dm.matrix(metric='jaccard', similarity=True, returnDict=True)
self.assertEqual(
{
'ref1': {
'ref1': 1.00,
'ref2': 0.25,
},
'ref2': {
'ref1': 0.25,
'ref2': 1.00,
},
},
matrix)
def testSoergelMatrixWithOneQueryInCommon(self):
"""
The Soergel similarity between two references with one query in common
when using a score tag given is the sum of the minimum scores (30) over
the sum of the maximum scores (50 + 10 + 60 = 120), or 1/4, and the
distance is 1.0 minus this, or 3/4. Check that the distance and
similarity matrice have the right values.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
# Test distance.
matrix = dm.matrix()
self.assertTrue(np.array_equal(
[
[0.00, 0.75],
[0.75, 0.00],
],
matrix))
# Test similarity.
matrix = dm.matrix(similarity=True)
self.assertTrue(np.array_equal(
[
[1.00, 0.25],
[0.25, 1.00],
],
matrix))
def testSoergelMatrixWithOneQueryInCommonExplicitReferenceIds(self):
"""
The Soergel similarity between two references with one query in common
when using a score tag given is the sum of the minimum scores (30) over
the sum of the maximum scores (50 + 10 + 60 = 120), or 1/4, and the
distance is 1.0 minus this, or 3/4. Check that the distance and
similarity matrice have the right values when an explicit list of
reference ids is passed (other references must be ignored).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'@SQ SN:ref3 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:30',
'query1 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:50',
'query2 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:10',
'query3 0 ref2 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
'query3 0 ref3 2 60 2=2X2M * 0 0 TCTAGG 123456 AS:i:60',
]).replace(' ', '\t')
dm = DistanceMatrix()
with dataFile(data) as filename:
dm.addFile(filename, scoreTag='AS')
# Test distance.
matrix = dm.matrix(referenceIds=('ref1', 'ref2'))
self.assertTrue(np.array_equal(
[
[0.00, 0.75],
[0.75, 0.00],
],
matrix))
# Test similarity.
matrix = dm.matrix(referenceIds=('ref1', 'ref2'), similarity=True)
self.assertTrue(np.array_equal(
[
[1.00, 0.25],
[0.25, 1.00],
],
matrix))
| [
"dark.reads.Read",
"os.close",
"json.dumps",
"dark.sam._hardClip",
"dark.sam.SAMFilter",
"numpy.array_equal",
"os.unlink",
"six.assertRaisesRegex",
"dark.sam.DistanceMatrix",
"dark.reads.ReadFilter",
"dark.sam.samReferencesToStr",
"io.StringIO",
"tempfile.mkstemp"
] | [((983, 992), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (990, 992), False, 'from tempfile import mkstemp\n'), ((1033, 1042), 'os.close', 'close', (['fd'], {}), '(fd)\n', (1038, 1042), False, 'from os import close, unlink, write\n'), ((1066, 1082), 'os.unlink', 'unlink', (['filename'], {}), '(filename)\n', (1072, 1082), False, 'from os import close, unlink, write\n'), ((1981, 1996), 'dark.sam.SAMFilter', 'SAMFilter', (['None'], {}), '(None)\n', (1990, 1996), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((3003, 3038), 'dark.sam.SAMFilter', 'SAMFilter', (['None'], {'storeQueryIds': '(True)'}), '(None, storeQueryIds=True)\n', (3012, 3038), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((43646, 43763), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'error', '_hardClip', '"""CGT"""', '"""123"""', '((CMATCH, 1), (CHARD_CLIP, 1), (CMATCH, 1))'], {}), "(self, ValueError, error, _hardClip, 'CGT', '123', ((\n CMATCH, 1), (CHARD_CLIP, 1), (CMATCH, 1)))\n", (43663, 43763), False, 'from six import assertRaisesRegex\n'), ((44066, 44191), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'error', '_hardClip', '"""CGT"""', '"""123"""', '((CHARD_CLIP, 1), (CHARD_CLIP, 1), (CHARD_CLIP, 1))'], {}), "(self, ValueError, error, _hardClip, 'CGT', '123', ((\n CHARD_CLIP, 1), (CHARD_CLIP, 1), (CHARD_CLIP, 1)))\n", (44083, 44191), False, 'from six import assertRaisesRegex\n'), ((46737, 46753), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (46751, 46753), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((47069, 47085), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (47083, 47085), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((47567, 47583), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (47581, 47583), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((48226, 48242), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (48240, 48242), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((48863, 48879), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (48877, 48879), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((49486, 49502), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (49500, 49502), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((50265, 50281), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (50279, 50281), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((50897, 50913), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (50911, 50913), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((51284, 51300), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (51298, 51300), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((51809, 51825), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (51823, 51825), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((52437, 52453), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (52451, 52453), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((53168, 53184), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (53182, 53184), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((53778, 53794), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (53792, 53794), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((54682, 54698), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (54696, 54698), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((55713, 55729), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (55727, 55729), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((56249, 56265), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (56263, 56265), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((57340, 57356), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (57354, 57356), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((58290, 58306), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (58304, 58306), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((59071, 59087), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (59085, 59087), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((60179, 60195), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (60193, 60195), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((61125, 61141), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (61139, 61141), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((62149, 62165), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (62163, 62165), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((62779, 62795), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (62793, 62795), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((62899, 62909), 'io.StringIO', 'StringIO', ([], {}), '()\n', (62907, 62909), False, 'from io import StringIO\n'), ((63544, 63560), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (63558, 63560), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((64446, 64462), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (64460, 64462), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((65861, 65877), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (65875, 65877), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((67614, 67630), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (67628, 67630), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((69211, 69227), 'dark.sam.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (69225, 69227), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((1529, 1574), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'referenceIds': "{'unknown'}"}), "(filename, referenceIds={'unknown'})\n", (1538, 1574), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((1695, 1765), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'UnknownReference', 'error', 'sam.referenceLengths'], {}), '(self, UnknownReference, error, sam.referenceLengths)\n', (1712, 1765), False, 'from six import assertRaisesRegex\n'), ((2527, 2546), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (2536, 2546), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((3508, 3547), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'storeQueryIds': '(True)'}), '(filename, storeQueryIds=True)\n', (3517, 3547), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((4057, 4076), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (4066, 4076), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((4647, 4689), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'filterRead': 'filterRead'}), '(filename, filterRead=filterRead)\n', (4656, 4689), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((5196, 5235), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'dropSecondary': '(True)'}), '(filename, dropSecondary=True)\n', (5205, 5235), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((5759, 5802), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'dropSupplementary': '(True)'}), '(filename, dropSupplementary=True)\n', (5768, 5802), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((6335, 6375), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'dropDuplicates': '(True)'}), '(filename, dropDuplicates=True)\n', (6344, 6375), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((6917, 6957), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'keepQCFailures': '(True)'}), '(filename, keepQCFailures=True)\n', (6926, 6957), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((7610, 7641), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'minScore': '(6)'}), '(filename, minScore=6)\n', (7619, 7641), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((8239, 8270), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'minScore': '(6)'}), '(filename, minScore=6)\n', (8248, 8270), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((8910, 8941), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'maxScore': '(6)'}), '(filename, maxScore=6)\n', (8919, 8941), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((9540, 9571), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'maxScore': '(6)'}), '(filename, maxScore=6)\n', (9549, 9571), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((10342, 10386), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'minScore': '(3)', 'maxScore': '(10)'}), '(filename, minScore=3, maxScore=10)\n', (10351, 10386), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((10954, 10973), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (10963, 10973), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((11647, 11666), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (11656, 11666), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((33966, 34023), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'InvalidSAM', 'error', 'list', 'queries'], {}), '(self, InvalidSAM, error, list, queries)\n', (33983, 34023), False, 'from six import assertRaisesRegex\n'), ((35562, 35619), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'InvalidSAM', 'error', 'list', 'queries'], {}), '(self, InvalidSAM, error, list, queries)\n', (35579, 35619), False, 'from six import assertRaisesRegex\n'), ((37155, 37212), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'InvalidSAM', 'error', 'list', 'queries'], {}), '(self, InvalidSAM, error, list, queries)\n', (37172, 37212), False, 'from six import assertRaisesRegex\n'), ((44435, 44474), 'dark.sam._hardClip', '_hardClip', (['"""CGT"""', '"""123"""', '((CMATCH, 3),)'], {}), "('CGT', '123', ((CMATCH, 3),))\n", (44444, 44474), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((44731, 44792), 'dark.sam._hardClip', '_hardClip', (['"""CAACGT"""', '"""123456"""', '((CHARD_CLIP, 3), (CMATCH, 3))'], {}), "('CAACGT', '123456', ((CHARD_CLIP, 3), (CMATCH, 3)))\n", (44740, 44792), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((45050, 45111), 'dark.sam._hardClip', '_hardClip', (['"""CAACGT"""', '"""123456"""', '((CMATCH, 2), (CHARD_CLIP, 4))'], {}), "('CAACGT', '123456', ((CMATCH, 2), (CHARD_CLIP, 4)))\n", (45059, 45111), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((45377, 45455), 'dark.sam._hardClip', '_hardClip', (['"""CAACGT"""', '"""123456"""', '((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3))'], {}), "('CAACGT', '123456', ((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3)))\n", (45386, 45455), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((45751, 45806), 'dark.sam._hardClip', '_hardClip', (['"""CGT"""', '"""123"""', '((CHARD_CLIP, 3), (CMATCH, 3))'], {}), "('CGT', '123', ((CHARD_CLIP, 3), (CMATCH, 3)))\n", (45760, 45806), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((46080, 46133), 'dark.sam._hardClip', '_hardClip', (['"""CA"""', '"""12"""', '((CMATCH, 2), (CHARD_CLIP, 4))'], {}), "('CA', '12', ((CMATCH, 2), (CHARD_CLIP, 4)))\n", (46089, 46133), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((46415, 46485), 'dark.sam._hardClip', '_hardClip', (['"""AA"""', '"""12"""', '((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3))'], {}), "('AA', '12', ((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3)))\n", (46424, 46485), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((63583, 63594), 'json.dumps', 'dumps', (['data'], {}), '(data)\n', (63588, 63594), False, 'from json import dumps, loads\n'), ((64648, 64698), 'numpy.array_equal', 'np.array_equal', (['[[0.0, 0.75], [0.75, 0.0]]', 'matrix'], {}), '([[0.0, 0.75], [0.75, 0.0]], matrix)\n', (64662, 64698), True, 'import numpy as np\n'), ((64889, 64939), 'numpy.array_equal', 'np.array_equal', (['[[1.0, 0.25], [0.25, 1.0]]', 'matrix'], {}), '([[1.0, 0.25], [0.25, 1.0]], matrix)\n', (64903, 64939), True, 'import numpy as np\n'), ((67800, 67850), 'numpy.array_equal', 'np.array_equal', (['[[0.0, 0.75], [0.75, 0.0]]', 'matrix'], {}), '([[0.0, 0.75], [0.75, 0.0]], matrix)\n', (67814, 67850), True, 'import numpy as np\n'), ((68023, 68073), 'numpy.array_equal', 'np.array_equal', (['[[1.0, 0.25], [0.25, 1.0]]', 'matrix'], {}), '([[1.0, 0.25], [0.25, 1.0]], matrix)\n', (68037, 68073), True, 'import numpy as np\n'), ((69426, 69476), 'numpy.array_equal', 'np.array_equal', (['[[0.0, 0.75], [0.75, 0.0]]', 'matrix'], {}), '([[0.0, 0.75], [0.75, 0.0]], matrix)\n', (69440, 69476), True, 'import numpy as np\n'), ((69680, 69730), 'numpy.array_equal', 'np.array_equal', (['[[1.0, 0.25], [0.25, 1.0]]', 'matrix'], {}), '([[1.0, 0.25], [0.25, 1.0]], matrix)\n', (69694, 69730), True, 'import numpy as np\n'), ((4599, 4622), 'dark.reads.ReadFilter', 'ReadFilter', ([], {'minLength': '(6)'}), '(minLength=6)\n', (4609, 4622), False, 'from dark.reads import Read, ReadFilter\n'), ((12970, 12989), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (12979, 12989), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((14047, 14066), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (14056, 14066), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((14599, 14618), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (14608, 14618), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((14690, 14732), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (14694, 14732), False, 'from dark.reads import Read, ReadFilter\n'), ((15113, 15132), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (15122, 15132), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((15204, 15246), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (15208, 15246), False, 'from dark.reads import Read, ReadFilter\n'), ((15636, 15655), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (15645, 15655), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((15727, 15769), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (15731, 15769), False, 'from dark.reads import Read, ReadFilter\n'), ((16161, 16180), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (16170, 16180), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((16252, 16294), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (16256, 16294), False, 'from dark.reads import Read, ReadFilter\n'), ((16762, 16781), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (16771, 16781), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((16866, 16908), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-CCTAGA---"""', '"""!654321!!!"""'], {}), "('query1', '-CCTAGA---', '!654321!!!')\n", (16870, 16908), False, 'from dark.reads import Read, ReadFilter\n'), ((17316, 17335), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (17325, 17335), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((17436, 17481), 'dark.reads.Read', 'Read', (['"""query1-rc"""', '"""-CCTAGA---"""', '"""!654321!!!"""'], {}), "('query1-rc', '-CCTAGA---', '!654321!!!')\n", (17440, 17481), False, 'from dark.reads import Read, ReadFilter\n'), ((17944, 17963), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (17953, 17963), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((18035, 18077), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (18039, 18077), False, 'from dark.reads import Read, ReadFilter\n'), ((18519, 18538), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (18528, 18538), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((18610, 18652), 'dark.reads.Read', 'Read', (['"""query1"""', '"""TCTAGG----"""', '"""ZZZZZZ!!!!"""'], {}), "('query1', 'TCTAGG----', 'ZZZZZZ!!!!')\n", (18614, 18652), False, 'from dark.reads import Read, ReadFilter\n'), ((19087, 19106), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (19096, 19106), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((19178, 19220), 'dark.reads.Read', 'Read', (['"""query1"""', '"""AGG-------"""', '"""ZZZ!!!!!!!"""'], {}), "('query1', 'AGG-------', 'ZZZ!!!!!!!')\n", (19182, 19220), False, 'from dark.reads import Read, ReadFilter\n'), ((19811, 19830), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (19820, 19830), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((19902, 19940), 'dark.reads.Read', 'Read', (['"""query1"""', 'seq[14:]', 'quality[14:]'], {}), "('query1', seq[14:], quality[14:])\n", (19906, 19940), False, 'from dark.reads import Read, ReadFilter\n'), ((20376, 20395), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (20385, 20395), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((20467, 20509), 'dark.reads.Read', 'Read', (['"""query1"""', '"""---TCTAGG-"""', '"""!!!ZZZZZZ!"""'], {}), "('query1', '---TCTAGG-', '!!!ZZZZZZ!')\n", (20471, 20509), False, 'from dark.reads import Read, ReadFilter\n'), ((20953, 20972), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (20962, 20972), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((21044, 21086), 'dark.reads.Read', 'Read', (['"""query1"""', '"""----TCTAGG"""', '"""!!!!ZZZZZZ"""'], {}), "('query1', '----TCTAGG', '!!!!ZZZZZZ')\n", (21048, 21086), False, 'from dark.reads import Read, ReadFilter\n'), ((21523, 21542), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (21532, 21542), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((21614, 21656), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-----TCTAG"""', '"""!!!!!ZZZZZ"""'], {}), "('query1', '-----TCTAG', '!!!!!ZZZZZ')\n", (21618, 21656), False, 'from dark.reads import Read, ReadFilter\n'), ((22131, 22150), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (22140, 22150), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((22222, 22264), 'dark.reads.Read', 'Read', (['"""query1"""', '"""TAGGCTGACT"""', '"""ZZZZZZZZZZ"""'], {}), "('query1', 'TAGGCTGACT', 'ZZZZZZZZZZ')\n", (22226, 22264), False, 'from dark.reads import Read, ReadFilter\n'), ((22816, 22835), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (22825, 22835), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((22907, 22949), 'dark.reads.Read', 'Read', (['"""query1"""', '"""TAGGCTGACT"""', '"""ZZZZZZZZZZ"""'], {}), "('query1', 'TAGGCTGACT', 'ZZZZZZZZZZ')\n", (22911, 22949), False, 'from dark.reads import Read, ReadFilter\n'), ((23394, 23413), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (23403, 23413), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((23485, 23527), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCGG-----"""', '"""!ZZZZ!!!!!"""'], {}), "('query1', '-TCGG-----', '!ZZZZ!!!!!')\n", (23489, 23527), False, 'from dark.reads import Read, ReadFilter\n'), ((24244, 24263), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (24253, 24263), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((24342, 24384), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCGG-----"""', '"""!ZZZZ!!!!!"""'], {}), "('query1', '-TCGG-----', '!ZZZZ!!!!!')\n", (24346, 24384), False, 'from dark.reads import Read, ReadFilter\n'), ((24422, 24466), 'dark.reads.Read', 'Read', (['"""query1/1"""', '"""---TCG----"""', '"""!!!ZZZ!!!!"""'], {}), "('query1/1', '---TCG----', '!!!ZZZ!!!!')\n", (24426, 24466), False, 'from dark.reads import Read, ReadFilter\n'), ((25125, 25144), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (25134, 25144), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((25216, 25258), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCNNTAGG-"""', '"""!ZZ!!ZZZZ!"""'], {}), "('query1', '-TCNNTAGG-', '!ZZ!!ZZZZ!')\n", (25220, 25258), False, 'from dark.reads import Read, ReadFilter\n'), ((25812, 25831), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (25821, 25831), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((25987, 26029), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TC??TAGG-"""', '"""+ZZ++ZZZZ+"""'], {}), "('query1', '-TC??TAGG-', '+ZZ++ZZZZ+')\n", (25991, 26029), False, 'from dark.reads import Read, ReadFilter\n'), ((26500, 26519), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (26509, 26519), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((26613, 26655), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCNNTAGG-"""', '""".ZZ..ZZZZ."""'], {}), "('query1', '-TCNNTAGG-', '.ZZ..ZZZZ.')\n", (26617, 26655), False, 'from dark.reads import Read, ReadFilter\n'), ((27201, 27220), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (27210, 27220), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((27376, 27418), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCXXTAGG-"""', '"""+ZZ++ZZZZ+"""'], {}), "('query1', '-TCXXTAGG-', '+ZZ++ZZZZ+')\n", (27380, 27418), False, 'from dark.reads import Read, ReadFilter\n'), ((27893, 27935), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'referenceIds': "{'ref2'}"}), "(filename, referenceIds={'ref2'})\n", (27902, 27935), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((28452, 28494), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'referenceIds': "{'ref1'}"}), "(filename, referenceIds={'ref1'})\n", (28461, 28494), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((28566, 28608), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (28570, 28608), False, 'from dark.reads import Read, ReadFilter\n'), ((29053, 29076), 'dark.reads.ReadFilter', 'ReadFilter', ([], {'minLength': '(6)'}), '(minLength=6)\n', (29063, 29076), False, 'from dark.reads import Read, ReadFilter\n'), ((29111, 29153), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'filterRead': 'filterRead'}), '(filename, filterRead=filterRead)\n', (29120, 29153), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((29225, 29267), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (29229, 29267), False, 'from dark.reads import Read, ReadFilter\n'), ((29681, 29720), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'dropSecondary': '(True)'}), '(filename, dropSecondary=True)\n', (29690, 29720), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((29792, 29834), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (29796, 29834), False, 'from dark.reads import Read, ReadFilter\n'), ((30265, 30308), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'dropSupplementary': '(True)'}), '(filename, dropSupplementary=True)\n', (30274, 30308), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((30380, 30422), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (30384, 30422), False, 'from dark.reads import Read, ReadFilter\n'), ((30862, 30902), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'dropDuplicates': '(True)'}), '(filename, dropDuplicates=True)\n', (30871, 30902), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((30974, 31016), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (30978, 31016), False, 'from dark.reads import Read, ReadFilter\n'), ((31455, 31474), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (31464, 31474), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((31575, 31617), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (31579, 31617), False, 'from dark.reads import Read, ReadFilter\n'), ((31655, 31697), 'dark.reads.Read', 'Read', (['"""query1"""', '"""--TC------"""', '"""!!ZZ!!!!!!"""'], {}), "('query1', '--TC------', '!!ZZ!!!!!!')\n", (31659, 31697), False, 'from dark.reads import Read, ReadFilter\n'), ((32208, 32227), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (32217, 32227), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((32313, 32355), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (32317, 32355), False, 'from dark.reads import Read, ReadFilter\n'), ((32393, 32437), 'dark.reads.Read', 'Read', (['"""query1/1"""', '"""--TC------"""', '"""!!ZZ!!!!!!"""'], {}), "('query1/1', '--TC------', '!!ZZ!!!!!!')\n", (32397, 32437), False, 'from dark.reads import Read, ReadFilter\n'), ((32504, 32548), 'dark.reads.Read', 'Read', (['"""query1/2"""', '"""--TCGA----"""', '"""!!ZZZZ!!!!"""'], {}), "('query1/2', '--TCGA----', '!!ZZZZ!!!!')\n", (32508, 32548), False, 'from dark.reads import Read, ReadFilter\n'), ((33027, 33067), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {'keepQCFailures': '(True)'}), '(filename, keepQCFailures=True)\n', (33036, 33067), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((33146, 33188), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (33150, 33188), False, 'from dark.reads import Read, ReadFilter\n'), ((33226, 33268), 'dark.reads.Read', 'Read', (['"""query2"""', '"""---TC-----"""', '"""!!!ZZ!!!!!"""'], {}), "('query2', '---TC-----', '!!!ZZ!!!!!')\n", (33230, 33268), False, 'from dark.reads import Read, ReadFilter\n'), ((33700, 33719), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (33709, 33719), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((34517, 34536), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (34526, 34536), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((34622, 34664), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCT------"""', '"""!ZZZ!!!!!!"""'], {}), "('query1', '-TCT------', '!ZZZ!!!!!!')\n", (34626, 34664), False, 'from dark.reads import Read, ReadFilter\n'), ((34702, 34744), 'dark.reads.Read', 'Read', (['"""query2"""', '"""-TCTA-----"""', '"""!ZZZZ!!!!!"""'], {}), "('query2', '-TCTA-----', '!ZZZZ!!!!!')\n", (34706, 34744), False, 'from dark.reads import Read, ReadFilter\n'), ((34782, 34826), 'dark.reads.Read', 'Read', (['"""query2/1"""', '"""-----TCTA-"""', '"""!!!!!ZZZZ!"""'], {}), "('query2/1', '-----TCTA-', '!!!!!ZZZZ!')\n", (34786, 34826), False, 'from dark.reads import Read, ReadFilter\n'), ((35296, 35315), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (35305, 35315), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((36122, 36141), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (36131, 36141), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((36227, 36269), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCT------"""', '"""!ZZZ!!!!!!"""'], {}), "('query1', '-TCT------', '!ZZZ!!!!!!')\n", (36231, 36269), False, 'from dark.reads import Read, ReadFilter\n'), ((36307, 36349), 'dark.reads.Read', 'Read', (['"""query2"""', '"""-TCTA-----"""', '"""!ZZZZ!!!!!"""'], {}), "('query2', '-TCTA-----', '!ZZZZ!!!!!')\n", (36311, 36349), False, 'from dark.reads import Read, ReadFilter\n'), ((36387, 36431), 'dark.reads.Read', 'Read', (['"""query2/1"""', '"""-----TCTA-"""', '"""!!!!!ZZZZ!"""'], {}), "('query2/1', '-----TCTA-', '!!!!!ZZZZ!')\n", (36391, 36431), False, 'from dark.reads import Read, ReadFilter\n'), ((36889, 36908), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (36898, 36908), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((37645, 37664), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (37654, 37664), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((37761, 37803), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!123456!!!"""'], {}), "('query1', '-TCTAGG---', '!123456!!!')\n", (37765, 37803), False, 'from dark.reads import Read, ReadFilter\n'), ((38039, 38081), 'dark.reads.Read', 'Read', (['"""query2"""', '"""-TC-------"""', '"""!78!!!!!!!"""'], {}), "('query2', '-TC-------', '!78!!!!!!!')\n", (38043, 38081), False, 'from dark.reads import Read, ReadFilter\n'), ((39285, 39304), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (39294, 39304), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((39415, 39453), 'dark.reads.Read', 'Read', (['"""query1"""', '"""TGGTT---"""', '"""45678!!!"""'], {}), "('query1', 'TGGTT---', '45678!!!')\n", (39419, 39453), False, 'from dark.reads import Read, ReadFilter\n'), ((39565, 39605), 'dark.reads.Read', 'Read', (['"""query1/1"""', '"""TGGTT---"""', '"""45678!!!"""'], {}), "('query1/1', 'TGGTT---', '45678!!!')\n", (39569, 39605), False, 'from dark.reads import Read, ReadFilter\n'), ((39714, 39754), 'dark.reads.Read', 'Read', (['"""query1/2"""', '"""GTT-----"""', '"""678!!!!!"""'], {}), "('query1/2', 'GTT-----', '678!!!!!')\n", (39718, 39754), False, 'from dark.reads import Read, ReadFilter\n'), ((39861, 39901), 'dark.reads.Read', 'Read', (['"""query1/3"""', '"""TTTTGGTT"""', '"""12345678"""'], {}), "('query1/3', 'TTTTGGTT', '12345678')\n", (39865, 39901), False, 'from dark.reads import Read, ReadFilter\n'), ((40693, 40712), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (40702, 40712), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((40808, 40850), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (40812, 40850), False, 'from dark.reads import Read, ReadFilter\n'), ((40957, 41001), 'dark.reads.Read', 'Read', (['"""query1/1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1/1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (40961, 41001), False, 'from dark.reads import Read, ReadFilter\n'), ((41828, 41847), 'dark.sam.SAMFilter', 'SAMFilter', (['filename'], {}), '(filename)\n', (41837, 41847), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((41943, 41985), 'dark.reads.Read', 'Read', (['"""query1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (41947, 41985), False, 'from dark.reads import Read, ReadFilter\n'), ((42092, 42136), 'dark.reads.Read', 'Read', (['"""query1/1"""', '"""-TCTAGG---"""', '"""!ZZZZZZ!!!"""'], {}), "('query1/1', '-TCTAGG---', '!ZZZZZZ!!!')\n", (42096, 42136), False, 'from dark.reads import Read, ReadFilter\n'), ((42718, 42746), 'dark.sam.samReferencesToStr', 'samReferencesToStr', (['filename'], {}), '(filename)\n', (42736, 42746), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n'), ((43158, 43196), 'dark.sam.samReferencesToStr', 'samReferencesToStr', (['filename'], {'indent': '(2)'}), '(filename, indent=2)\n', (43176, 43196), False, 'from dark.sam import PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference, InvalidSAM, samReferencesToStr, _hardClip, DistanceMatrix\n')] |
import matplotlib.pyplot as plt
from matplotlib import colors, ticker, cm
import numpy as np
from scipy.interpolate import interp1d, CubicSpline
def calc_wa(sep, alpha, d_obs):
# sep should be in au
# alpha should be in degrees
# d_obs should be in parsecs
# returns wa in radians
sep = sep*au_to_m
d_obs = d_obs*pc_to_m
alpha = alpha*np.pi/180.0
wa = np.arcsin(np.sin(alpha)*sep/d_obs)
return wa
def lambert_phase(alpha, degrees=False):
if degrees:
alpha = alpha*np.pi/180.0
return (np.sin(alpha) + (np.pi - alpha)*np.cos(alpha))/np.pi
def quantum_eff(wl):
if type(wl) == np.ndarray:
wlcopy = [wl[k] for k in range(len(wl))]
wlcopy = np.array([wlcopy]).reshape(len(wl))
for k in np.where(wlcopy<0.7)[0]:
wlcopy[k] = 0.7
elif wl < 0.7:
wlcopy = 0.7
else:
wlcopy = wl
return 0.9*(1.0-(wlcopy-0.7)/0.3)*0.9*0.98*0.865
def old_quantum_efficiency(wavelength):
#input wavelength unit: microns
if type(wavelength) == np.ndarray:
wlcopy = [wavelength[k] for k in range(len(wavelength))]
wlcopy = np.array([wlcopy]).reshape(len(wavelength))
for k in np.where(wlcopy<0.7)[0]:
wlcopy[k] = 0.7
elif wavelength < 0.7:
wlcopy = 0.7
else:
wlcopy = wavelength
return (0.87*(1.0-(wlcopy-0.7)/0.32))#*0.9*0.98*0.865 # factors
def quantum_efficiency(wavelength):
#input wavelength unit: microns
wls, eff = np.loadtxt('AuxiliaryData/ev2_qe_curve.dat',unpack=True)
qefunc = interp1d(wls/1000.0, eff/100.0)
return qefunc(wavelength)
def create_wl_range(start,end,R):
wlrange = [start]
wl = start
while wl < end:
wlnew = wl + wl/R
if wlnew < end:
wlrange.append(wlnew)
wl = wlnew
wlrange = np.array(wlrange)
return wlrange
def fixed_albedo(wavelength):
if type(wavelength) == np.ndarray:
return 0.25 + np.zeros(len(wavelength))
else:
return 0.25
def VIO(x):
return 1.0 + x*(-1.815) + x**2.0*(0.940) + x**3.0*(-2.399) + x**4.0*(4.990) + x**5.0*(-2.715)
def BL1(x):
return 1.0 + x*(-1.311) + x**2.0*(-2.382) + x**3.0*(5.893) + x**4.0*(-4.046) + x**5.0*(0.846)
def GRN(x):
return 1.0 + x*(-1.507) + x**2.0*(-0.363) + x**3.0*(-0.062) + x**4.0*(2.809) + x**5.0*(-1.876)
def RED(x):
return 1.0 + x*(-0.882) + x**2.0*(-3.923) + x**3.0*(8.142) + x**4.0*(-5.776) + x**5.0*(1.439)
def CB2(x):
return 1.0 + x*(-1.121) + x**2.0*(-1.720) + x**3.0*(1.776) + x**4.0*(1.757) + x**5.0*(-1.691)
def CB3(x):
return 1.0 + x*(-0.413) + x**2.0*(-6.932) + x**3.0*(11.388) + x**4.0*(-3.261) + x**5.0*(-1.783)
def avgMayorga(x):
return np.mean(np.array([VIO(x),BL1(x),GRN(x),RED(x),CB2(x),CB3(x)]))
def IFSavgMayorga(x):
return np.mean(np.array([GRN(x),RED(x),CB2(x),CB3(x)]))
def piecewise_empirical(wavelength, alpha, degrees=True):
# currently piece-wise, perhaps should make some gradual connections?
def VIO(x):
return 1.0 + x*(-1.815) + x**2.0*(0.940) + x**3.0*(-2.399) + x**4.0*(4.990) + x**5.0*(-2.715)
def BL1(x):
return 1.0 + x*(-1.311) + x**2.0*(-2.382) + x**3.0*(5.893) + x**4.0*(-4.046) + x**5.0*(0.846)
def GRN(x):
return 1.0 + x*(-1.507) + x**2.0*(-0.363) + x**3.0*(-0.062) + x**4.0*(2.809) + x**5.0*(-1.876)
def RED(x):
return 1.0 + x*(-0.882) + x**2.0*(-3.923) + x**3.0*(8.142) + x**4.0*(-5.776) + x**5.0*(1.439)
def CB2(x):
return 1.0 + x*(-1.121) + x**2.0*(-1.720) + x**3.0*(1.776) + x**4.0*(1.757) + x**5.0*(-1.691)
def CB3(x):
return 1.0 + x*(-0.413) + x**2.0*(-6.932) + x**3.0*(11.388) + x**4.0*(-3.261) + x**5.0*(-1.783)
x = alpha/180.0
oldwl = wavelength
phases = []
if type(oldwl) == float or type(oldwl)==np.float64:
wavelength = np.array([wavelength])
for k in range(len(wavelength)):
if wavelength[k] < 0.440:
phi = VIO(x)
if wavelength[k] > 0.440 and wavelength[k] < 0.500:
phi = BL1(x)
if wavelength[k] > 0.500 and wavelength[k] < 0.575:
phi = GRN(x)
if wavelength[k] > 0.575 and wavelength[k] < 0.650:
phi = 0.5*GRN(x) + 0.5*RED(x)
if wavelength[k] > 0.650 and wavelength[k] < 0.740:
phi = RED(x)
if wavelength[k] > 0.740 and wavelength[k] < 0.9:
phi = CB2(x)
if wavelength[k] > 0.9:
phi = CB3(x)
phases.append(phi)
return np.array(phases)
def avg_empirical(alpha, degrees=False):
def VIO(x):
return 1.0 + x*(-1.815) + x**2.0*(0.940) + x**3.0*(-2.399) + x**4.0*(4.990) + x**5.0*(-2.715)
def BL1(x):
return 1.0 + x*(-1.311) + x**2.0*(-2.382) + x**3.0*(5.893) + x**4.0*(-4.046) + x**5.0*(0.846)
def GRN(x):
return 1.0 + x*(-1.507) + x**2.0*(-0.363) + x**3.0*(-0.062) + x**4.0*(2.809) + x**5.0*(-1.876)
def RED(x):
return 1.0 + x*(-0.882) + x**2.0*(-3.923) + x**3.0*(8.142) + x**4.0*(-5.776) + x**5.0*(1.439)
def CB2(x):
return 1.0 + x*(-1.121) + x**2.0*(-1.720) + x**3.0*(1.776) + x**4.0*(1.757) + x**5.0*(-1.691)
def CB3(x):
return 1.0 + x*(-0.413) + x**2.0*(-6.932) + x**3.0*(11.388) + x**4.0*(-3.261) + x**5.0*(-1.783)
if degrees:
alpha = alpha*np.pi/180.0
alpha = alpha*180.0/np.pi
x = alpha/180.0
phi = (VIO(x)+BL1(x)+GRN(x)+RED(x)+CB2(x)+CB3(x))/6.0
return phi
def update_c_version(new_param_dict,old_param_dict,cperformance_table,tablesampling):
old_param_dict.update(new_param_dict)
rlamD,r_arcsec,Intensity,Contrast,coreThruput,PSFpeak,area_sqarcsec,occTrans = cperformance_table
tau_occ = interp1d(rlamD,occTrans,fill_value=0.0,bounds_error=False) # working angle dependent occulter transmission
tau_core = interp1d(rlamD,coreThruput,fill_value=0.0,bounds_error=False) # working angle dependendt
tau_PSF = interp1d(rlamD,coreThruput/occTrans,fill_value=0.0,bounds_error=False)
contrast_func = interp1d(rlamD,Contrast,fill_value=0.0,bounds_error=False)
Ipk_func = interp1d(rlamD,PSFpeak,fill_value=0.0,bounds_error=False)
wl_d = r_arcsec[0]*D/(rlamD[0]*rad_to_arcsec) *10.0**6.0 # the wavelength in microns for which the contrast table file was computed
mpixTable = np.mean(area_sqarcsec)*(np.pi/180.0/3600.0)**2.0/(tablesampling*wl_d*10.0**-6.0/D)**2 # a fixed value!!!
coronagraph_dict = {'rlamD':rlamD,'r_arcsec':r_arcsec,'contrast_func':contrast_func, 'Ipk_func':Ipk_func,
'area_sqarcsec':area_sqarcsec,'wl_d':wl_d,'mpixTable':mpixTable,'tau_occ':tau_occ,'tau_core':tau_core,}
old_param_dict.update(coronagraph_dict)
bandcenter, width = 0.506, 10.3
wavelengths_in_band1 = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
bandcenter, width = 0.575, 10.1
wavelengths_in_band2 = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
bandcenter, width = 0.661, 10.0
wavelengths_in_band6 = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
bandcenter, width = 0.883, 5.2
wavelengths_in_band8 = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
bandcenter, width = 0.721, 5.0
wavelengths_in_band7 = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
bandcenter, width = 0.940, 6.4
wavelengths_in_band9 = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
bandcenter, width = 0.760, 18.0
wavelengths_in_ifsband = create_wl_range(bandcenter*(1.0 - width/200.0), bandcenter*(1.0 + width/200.0), 50000)
COLORMAP = cm.nipy_spectral
CMAP2 = cm.nipy_spectral_r #cm.hot_r
plt.rcParams['font.family'] = 'serif'
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['xtick.direction']= 'in'
plt.rcParams['ytick.direction']='in'
plt.rcParams['ytick.major.size'] = 9
plt.rcParams['xtick.major.size'] = 9
plt.rcParams['ytick.major.width'] = 1.0
plt.rcParams['xtick.major.width'] = 1.0
plt.rcParams['ytick.minor.size'] = 3.5
plt.rcParams['xtick.minor.size'] = 3.5
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 14
# astrophysical constants and standards
rad_to_arcsec = 206265.0 # convert radians to arcsec
pc_to_m = 30855152366503100.0 # convert parsecs to meters
au_to_m = 149590000000.0 # convert AU to meters 149590000000
rsun = 1392000000.0 # radius of the sun in meters
rjup = 69911000.0 # radius of jupiter in meters 69911000
h = 6.62607004 * 10.0**-34.0 # planck constant m^2 kg s^-1
kb = 1.38064852 *10.0**-23.0 # Boltzman constant m^2 kg s^-2 K^-1
sb = 5.67 *10.**-8.0 # steffan boltzman constant W m^-2 K^-4
c = 299792458.0 # speed of light m s^-1
F0V = 3.6*10.0**-8.0 # zero mag V band flux W/m^2/um
solar_spec = 'AuxiliaryData/kurucz_solar_spec_1AU.dat' # specific flux density W / m^2 / micron, at 1AU
ref_wl, ref_flambda = np.loadtxt(solar_spec,unpack=True,usecols=(0,1))
fsun_lambda = interp1d(ref_wl,ref_flambda) # units still specific flux density
MsunV = 4.83 # absolute V magnitude of the sun
MzV = 23.0 # surface brightness of zodiacal light at 1 AU
FsolV = 1.86*10.0**3.0 # solar V band flux W/m^2/um at 1AU
MezV = 22.0 # magnitudes arcsec^-2
Lsun = 3.828*10.0**26.0# W
# things all versions of coronagraph have in common:
tau_fil = 0.9 # filter
tau_pol = 1.0 # polarizer
ast_constants_dict = { 'MsunV':MsunV,'MzV':MzV,'MezV':MezV, 'rjup':rjup,'rsun':rsun,
'fsun_lambda':fsun_lambda,'F0V':F0V,'FsolV':FsolV,
'h':h,'c':c,'au_to_m':au_to_m,'pc_to_m':pc_to_m,
'rad_to_arcsec':rad_to_arcsec,'tau_fil':tau_fil,'tau_pol':tau_pol}
# relating to assumptions about calibration
# and image subtraction with other stars
delta_compstar = 3.0 # difference in apparent magnitudes between target and comparison star
fratio = 10.0**(delta_compstar/2.5) #
t_comp = 0.2 # 20% of the time on the target star
vratio = 1.0/(fratio*t_comp) # variance ratio target/comparison
zodi_multiplier = 1 + vratio
detector_multiplier = 1 + t_comp # detector noise will increase with the additional time
ast_constants_dict.update({'zodi_multiplier':zodi_multiplier,'detector_multiplier':detector_multiplier})
## All the differenct versions of the coronagarph parameters...
## org_hlc_pars,org_spc_pars,cbe_hlc_pars, cbe_spc_pars, req_spc_pars, req_hlc_pars
#############################Original SPC
# TELESCOPE
D = 2.38
tau_obs = 0.835
Apm = np.pi*(D/2.0)**2.0*tau_obs
# DETECTOR
eta = old_quantum_efficiency
cte_cr_hot = 0.865*0.9*0.98
phc = 0.90
tfr = 30.0
qcic = 0.01
idark = 2.1*10.0**-4.0
sig_read = 0.00000001
# CORONAGRAPH
cperformance_table = np.loadtxt("AuxiliaryData/SPC_20170714_660_1masStr_1masJit_CBEFIT.csv",unpack=True,delimiter=',')
tablesampling = 0.2
tau_ref = 0.474
OWA = 2.7
IWA = 9.0
# IFS
wl_c = 0.6
mpix = 20.0
R = 50.0
# POST-PROCESSING
fpp = 1.0/10.0
# CREATE DICT
original_spc_dict = {'Apm':Apm,'eta':eta,'tfr':tfr,'qcic':qcic,'idark':idark,'phc':phc,'cte_cr_hot':cte_cr_hot,'R':R,
'sig_read':sig_read,'wl_c':wl_c,'fpp':fpp,'OWA':OWA,'IWA':IWA,'D':D,'mpix':mpix,'tau_obs':tau_obs,'tau_ref':tau_ref}
org_spc_pars = ast_constants_dict.copy()
update_c_version(original_spc_dict, org_spc_pars,cperformance_table,tablesampling)
########################### Original HLC
# TELESCOPE
D = 2.38
tau_obs = 0.835
Apm = np.pi*(D/2.0)**2.0*tau_obs
# DETECTOR
eta = old_quantum_efficiency
cte_cr_hot = 0.865*0.9*0.98
phc = 1.0
tfr = 30.0
qcic = 0.01
idark = 2.1*10.0**-4.0
sig_read = 0.00000001
ENF = np.sqrt(2.0)
# CORONAGRAPH
cperformance_table = np.loadtxt("AuxiliaryData/hlc_20161228_9_polall_0.4mas_jitter_results.txt",unpack=True)
tablesampling = 0.2
tau_ref = 0.474
OWA = 2.7
IWA = 9.0
# focal plane
wl_c = 0.6
mpix = 7.0
R = 50000.0
# POST-PROCESSING
fpp = 1.0/10.0
# CREATE DICT
original_hlc_dict = {'Apm':Apm,'eta':eta,'tfr':tfr,'qcic':qcic,'idark':idark,'phc':phc,'cte_cr_hot':cte_cr_hot,'R':R,
'sig_read':sig_read,'wl_c':wl_c,'fpp':fpp,'OWA':OWA,'IWA':IWA,'D':D,'mpix':mpix,'tau_obs':tau_obs,'tau_ref':tau_ref,'ENF':ENF}
org_hlc_pars = ast_constants_dict.copy()
update_c_version(original_hlc_dict, org_hlc_pars,cperformance_table,tablesampling)
############################ SPC CBE
# TELESCOPE
D = 2.37
tau_obs = 0.835
Apm = np.pi*(D/2.0)**2.0*tau_obs
# DETECTOR
eta = quantum_efficiency
cte_cr_hot = 0.934*0.874*0.983 # (CBE goes with 33% lifetime, 21 months at L2)
phc = 0.90
tfr = 80.0
qcic = 0.016
idark = 4.6*10.0**-4.0
sig_read = 0.00000001
# CORONAGRAPH
cperformance_table = np.loadtxt("AuxiliaryData/SPC_20170714_660_1masStr_1masJit_CBEFIT.csv",unpack=True,delimiter=',')
tablesampling = 0.2
tau_ref = 0.383
OWA = 2.7
IWA = 9.0
# IFS
wl_c = 0.66
mpix = 45.0 #54.0 # 26.5
R = 50.0
# POST-PROCESSING
fpp = 1.0/12.0
# CREATE DICT
cbe_spc_dict = {'Apm':Apm,'eta':eta,'tfr':tfr,'qcic':qcic,'idark':idark,'phc':phc,'cte_cr_hot':cte_cr_hot,'R':R,
'sig_read':sig_read,'wl_c':wl_c,'fpp':fpp,'OWA':OWA,'IWA':IWA,'D':D,'mpix':mpix,'tau_obs':tau_obs,'tau_ref':tau_ref}
cbe_spc_pars = ast_constants_dict.copy()
update_c_version(cbe_spc_dict, cbe_spc_pars,cperformance_table,tablesampling)
########################## HLC CBE
# TELESCOPE
D = 2.37
tau_obs = 0.835
Apm = np.pi*(D/2.0)**2.0*tau_obs
# DETECTOR
eta = quantum_efficiency # (CBE goes with 33% lifetime, 21 months at L2)
cte_cr_hot = 0.934*0.874*0.983
phc = 0.90
ENF=1.0
tfr = 10.0
qcic = 0.016
idark = 4.6*10.0**-4.0
sig_read = 0.00000001
# CORONAGRAPH
cperformance_table = np.loadtxt("AuxiliaryData/hlc_20161228_9_polall_0.4mas_jitter_results.txt",unpack=True)
tablesampling = 0.3
tau_ref = 0.573
OWA = 2.7
IWA = 9.0
# focal plane
wl_c = 0.508
mpix = 9.0 #4.0
R = 50000.0
# POST-PROCESSING
fpp = 1.0/12.0
# CREATE DICT
cbe_hlc_dict = {'Apm':Apm,'eta':eta,'tfr':tfr,'qcic':qcic,'idark':idark,'phc':phc,'cte_cr_hot':cte_cr_hot,'R':R,
'sig_read':sig_read,'wl_c':wl_c,'fpp':fpp,'OWA':OWA,'IWA':IWA,'D':D,'mpix':mpix,'tau_obs':tau_obs,'tau_ref':tau_ref,'ENF':ENF}
cbe_hlc_pars = ast_constants_dict.copy()
update_c_version(cbe_hlc_dict, cbe_hlc_pars,cperformance_table,tablesampling)
############################ SPC REQ
# TELESCOPE
D = 2.37
tau_obs = 0.835
Apm = np.pi*(D/2.0)**2.0*tau_obs
# DETECTOR
eta = quantum_efficiency
cte_cr_hot = 0.596*0.838*0.95 # (REQ goes with 63 months at L2)
phc = 0.90
tfr = 80.0
qcic = 0.0232
idark = 5.56*10.0**-4.0
sig_read = 0.00000001
# CORONAGRAPH
cperformance_table = np.loadtxt("AuxiliaryData/newSPC.dat",unpack=True)
tablesampling = 0.2
tau_ref = 0.337
OWA = 2.8
IWA = 8.6
# IFS
wl_c = 0.66 # where the det is nyquist sampled
mpix = 53.0
R = 50.0
# POST-PROCESSING
fpp = 1.0/12.0
req_spc_dict = {'Apm':Apm,'eta':eta,'tfr':tfr,'qcic':qcic,'idark':idark,'phc':phc,'cte_cr_hot':cte_cr_hot,'R':R,
'sig_read':sig_read,'wl_c':wl_c,'fpp':fpp,'OWA':OWA,'IWA':IWA,'D':D,'mpix':mpix,'tau_obs':tau_obs,'tau_ref':tau_ref}
req_spc_pars = ast_constants_dict.copy()
update_c_version(req_spc_dict, req_spc_pars,cperformance_table,tablesampling)
############################ HLC REQ
# TELESCOPE
D = 2.37
tau_obs = 0.835
Apm = np.pi*(D/2.0)**2.0*tau_obs
# DETECTOR
eta = quantum_efficiency # (REQ goes with 63 months at L2)
cte_cr_hot = 0.798*0.95*0.988
phc = 0.90
ENF = 1.0
tfr = 6.0
qcic = 0.0232
idark = 5.56*10.0**-4.0
sig_read = 0.00000001
# CORONAGRAPH
cperformance_table = np.loadtxt("AuxiliaryData/newHLC.dat",unpack=True)
tablesampling = 0.3
tau_ref = 0.573
OWA = 2.8
IWA = 8.6
# focal plane
wl_c = 0.508 # where the det is nyquist sampled
mpix = 11.0
R = 50000.0
# POST-PROCESSING
fpp = 1.0/12.0
# CREATE DICT
req_hlc_dict = {'Apm':Apm,'eta':eta,'tfr':tfr,'qcic':qcic,'idark':idark,'phc':phc,'cte_cr_hot':cte_cr_hot, 'R':R,
'sig_read':sig_read,'wl_c':wl_c,'fpp':fpp,'OWA':OWA,'IWA':IWA,'D':D,'mpix':mpix,'tau_obs':tau_obs,'tau_ref':tau_ref,'ENF':ENF}
req_hlc_pars = ast_constants_dict.copy()
update_c_version(req_hlc_dict, req_hlc_pars,cperformance_table,tablesampling)
allversions = [org_hlc_pars,org_spc_pars,cbe_hlc_pars, cbe_spc_pars, req_spc_pars, req_hlc_pars]
| [
"numpy.mean",
"numpy.sqrt",
"numpy.where",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.cos",
"numpy.sin",
"numpy.loadtxt"
] | [((9127, 9178), 'numpy.loadtxt', 'np.loadtxt', (['solar_spec'], {'unpack': '(True)', 'usecols': '(0, 1)'}), '(solar_spec, unpack=True, usecols=(0, 1))\n', (9137, 9178), True, 'import numpy as np\n'), ((9190, 9219), 'scipy.interpolate.interp1d', 'interp1d', (['ref_wl', 'ref_flambda'], {}), '(ref_wl, ref_flambda)\n', (9198, 9219), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((11049, 11152), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/SPC_20170714_660_1masStr_1masJit_CBEFIT.csv"""'], {'unpack': '(True)', 'delimiter': '""","""'}), "('AuxiliaryData/SPC_20170714_660_1masStr_1masJit_CBEFIT.csv',\n unpack=True, delimiter=',')\n", (11059, 11152), True, 'import numpy as np\n'), ((12054, 12066), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (12061, 12066), True, 'import numpy as np\n'), ((12110, 12202), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/hlc_20161228_9_polall_0.4mas_jitter_results.txt"""'], {'unpack': '(True)'}), "('AuxiliaryData/hlc_20161228_9_polall_0.4mas_jitter_results.txt',\n unpack=True)\n", (12120, 12202), True, 'import numpy as np\n'), ((13205, 13308), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/SPC_20170714_660_1masStr_1masJit_CBEFIT.csv"""'], {'unpack': '(True)', 'delimiter': '""","""'}), "('AuxiliaryData/SPC_20170714_660_1masStr_1masJit_CBEFIT.csv',\n unpack=True, delimiter=',')\n", (13215, 13308), True, 'import numpy as np\n'), ((14299, 14391), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/hlc_20161228_9_polall_0.4mas_jitter_results.txt"""'], {'unpack': '(True)'}), "('AuxiliaryData/hlc_20161228_9_polall_0.4mas_jitter_results.txt',\n unpack=True)\n", (14309, 14391), True, 'import numpy as np\n'), ((15378, 15429), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/newSPC.dat"""'], {'unpack': '(True)'}), "('AuxiliaryData/newSPC.dat', unpack=True)\n", (15388, 15429), True, 'import numpy as np\n'), ((16426, 16477), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/newHLC.dat"""'], {'unpack': '(True)'}), "('AuxiliaryData/newHLC.dat', unpack=True)\n", (16436, 16477), True, 'import numpy as np\n'), ((1499, 1556), 'numpy.loadtxt', 'np.loadtxt', (['"""AuxiliaryData/ev2_qe_curve.dat"""'], {'unpack': '(True)'}), "('AuxiliaryData/ev2_qe_curve.dat', unpack=True)\n", (1509, 1556), True, 'import numpy as np\n'), ((1569, 1604), 'scipy.interpolate.interp1d', 'interp1d', (['(wls / 1000.0)', '(eff / 100.0)'], {}), '(wls / 1000.0, eff / 100.0)\n', (1577, 1604), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((1841, 1858), 'numpy.array', 'np.array', (['wlrange'], {}), '(wlrange)\n', (1849, 1858), True, 'import numpy as np\n'), ((4528, 4544), 'numpy.array', 'np.array', (['phases'], {}), '(phases)\n', (4536, 4544), True, 'import numpy as np\n'), ((5717, 5778), 'scipy.interpolate.interp1d', 'interp1d', (['rlamD', 'occTrans'], {'fill_value': '(0.0)', 'bounds_error': '(False)'}), '(rlamD, occTrans, fill_value=0.0, bounds_error=False)\n', (5725, 5778), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((5839, 5903), 'scipy.interpolate.interp1d', 'interp1d', (['rlamD', 'coreThruput'], {'fill_value': '(0.0)', 'bounds_error': '(False)'}), '(rlamD, coreThruput, fill_value=0.0, bounds_error=False)\n', (5847, 5903), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((5943, 6018), 'scipy.interpolate.interp1d', 'interp1d', (['rlamD', '(coreThruput / occTrans)'], {'fill_value': '(0.0)', 'bounds_error': '(False)'}), '(rlamD, coreThruput / occTrans, fill_value=0.0, bounds_error=False)\n', (5951, 6018), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((6034, 6095), 'scipy.interpolate.interp1d', 'interp1d', (['rlamD', 'Contrast'], {'fill_value': '(0.0)', 'bounds_error': '(False)'}), '(rlamD, Contrast, fill_value=0.0, bounds_error=False)\n', (6042, 6095), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((6108, 6168), 'scipy.interpolate.interp1d', 'interp1d', (['rlamD', 'PSFpeak'], {'fill_value': '(0.0)', 'bounds_error': '(False)'}), '(rlamD, PSFpeak, fill_value=0.0, bounds_error=False)\n', (6116, 6168), False, 'from scipy.interpolate import interp1d, CubicSpline\n'), ((3859, 3881), 'numpy.array', 'np.array', (['[wavelength]'], {}), '([wavelength])\n', (3867, 3881), True, 'import numpy as np\n'), ((542, 555), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (548, 555), True, 'import numpy as np\n'), ((767, 789), 'numpy.where', 'np.where', (['(wlcopy < 0.7)'], {}), '(wlcopy < 0.7)\n', (775, 789), True, 'import numpy as np\n'), ((1204, 1226), 'numpy.where', 'np.where', (['(wlcopy < 0.7)'], {}), '(wlcopy < 0.7)\n', (1212, 1226), True, 'import numpy as np\n'), ((6318, 6340), 'numpy.mean', 'np.mean', (['area_sqarcsec'], {}), '(area_sqarcsec)\n', (6325, 6340), True, 'import numpy as np\n'), ((395, 408), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (401, 408), True, 'import numpy as np\n'), ((574, 587), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (580, 587), True, 'import numpy as np\n'), ((714, 732), 'numpy.array', 'np.array', (['[wlcopy]'], {}), '([wlcopy])\n', (722, 732), True, 'import numpy as np\n'), ((1143, 1161), 'numpy.array', 'np.array', (['[wlcopy]'], {}), '([wlcopy])\n', (1151, 1161), True, 'import numpy as np\n')] |
import numpy as np
from itertools import combinations
import dask.array as dsa
from ..core import (
histogram,
_ensure_correctly_formatted_bins,
_ensure_correctly_formatted_range,
)
from .fixtures import empty_dask_array
import pytest
bins_int = 10
bins_str = "auto"
bins_arr = np.linspace(-4, 4, 10)
range_ = (0, 1)
@pytest.mark.parametrize("density", [False, True])
@pytest.mark.parametrize("block_size", [None, 1, 2])
@pytest.mark.parametrize("axis", [1, None])
@pytest.mark.parametrize("bins", [10, np.linspace(-4, 4, 10), "auto"])
@pytest.mark.parametrize("range_", [None, (-4, 4)])
def test_histogram_results_1d(block_size, density, axis, bins, range_):
nrows, ncols = 5, 20
# Setting the random seed here prevents np.testing.assert_allclose
# from failing beow. We should investigate this further.
np.random.seed(2)
data = np.random.randn(nrows, ncols)
h, bin_edges = histogram(
data, bins=bins, range=range_, axis=axis, block_size=block_size, density=density
)
expected_shape = (
(nrows, len(bin_edges[0]) - 1) if axis == 1 else (len(bin_edges[0]) - 1,)
)
assert h.shape == expected_shape
# make sure we get the same thing as numpy.histogram
if axis:
bins_np = np.histogram_bin_edges(
data, bins=bins, range=range_
) # Use same bins for all slices below
expected = np.stack(
[
np.histogram(data[i], bins=bins_np, range=range_, density=density)[0]
for i in range(nrows)
]
)
else:
expected = np.histogram(data, bins=bins, range=range_, density=density)[0]
norm = nrows if (density and axis) else 1
np.testing.assert_allclose(h, expected / norm)
if density:
widths = np.diff(bin_edges)
integral = np.sum(h * widths)
np.testing.assert_allclose(integral, 1.0)
@pytest.mark.parametrize("block_size", [None, 1, 2])
def test_histogram_results_1d_weighted(block_size):
nrows, ncols = 5, 20
data = np.random.randn(nrows, ncols)
bins = np.linspace(-4, 4, 10)
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
weights = 2 * np.ones_like(data)
h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)
np.testing.assert_array_equal(2 * h, h_w)
# @pytest.mark.skip(reason="Weight broadcasting on numpy arrays is not yet implemented")
@pytest.mark.parametrize("block_size", [None, 1, 2, "auto"])
def test_histogram_results_1d_weighted_broadcasting(block_size):
nrows, ncols = 5, 20
data = np.random.randn(nrows, ncols)
bins = np.linspace(-4, 4, 10)
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
weights = 2 * np.ones((1, ncols))
h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)
np.testing.assert_array_equal(2 * h, h_w)
@pytest.mark.parametrize("block_size", [None, 1, 2])
def test_histogram_right_edge(block_size):
"""Test that last bin is both left- and right-edge inclusive as it
is for numpy.histogram
"""
nrows, ncols = 5, 20
data = np.ones((nrows, ncols))
bins = np.array([0, 0.5, 1]) # All data at rightmost edge
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
assert h.shape == (nrows, len(bins) - 1)
# make sure we get the same thing as histogram (all data in the last bin)
hist, _ = np.histogram(data, bins=bins)
np.testing.assert_array_equal(hist, h.sum(axis=0))
# now try with no axis
h_na, _ = histogram(data, bins=bins, block_size=block_size)
np.testing.assert_array_equal(hist, h_na)
def test_histogram_results_2d():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b])
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b])
np.testing.assert_array_equal(hist, h)
def test_histogram_results_2d_density():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True)
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(
data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b], density=True
)
np.testing.assert_allclose(hist, h)
# check integral is 1
widths_a = np.diff(bins_a)
widths_b = np.diff(bins_b)
areas = np.outer(widths_a, widths_b)
integral = np.sum(hist * areas)
np.testing.assert_allclose(integral, 1.0)
def test_histogram_results_3d_density():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
data_c = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
nbins_c = 9
bins_c = np.linspace(-4, 4, nbins_c + 1)
h, _ = histogram(
data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], density=True
)
assert h.shape == (nbins_a, nbins_b, nbins_c)
hist, _ = np.histogramdd(
(data_a.ravel(), data_b.ravel(), data_c.ravel()),
bins=[bins_a, bins_b, bins_c],
density=True,
)
np.testing.assert_allclose(hist, h)
# check integral is 1
widths_a = np.diff(bins_a)
widths_b = np.diff(bins_b)
widths_c = np.diff(bins_c)
areas = np.einsum("i,j,k", widths_a, widths_b, widths_c)
integral = np.sum(hist * areas)
np.testing.assert_allclose(integral, 1.0)
@pytest.mark.parametrize("block_size", [None, 5, "auto"])
@pytest.mark.parametrize("use_dask", [False, True])
def test_histogram_shape(use_dask, block_size):
"""These tests just verify that arrays with the right shape come out.
They don't verify correctness."""
shape = 10, 15, 12, 20
if use_dask:
b = empty_dask_array(shape, chunks=(1,) + shape[1:])
else:
b = np.random.randn(*shape)
bins = np.linspace(-4, 4, 27)
# no axis
c, _ = histogram(b, bins=bins, block_size=block_size)
assert c.shape == (len(bins) - 1,)
# same thing
for axis in [(0, 1, 2, 3), (0, 1, 3, 2), (3, 2, 1, 0), (3, 2, 0, 1)]:
c, _ = histogram(b, bins=bins, axis=axis)
assert c.shape == (len(bins) - 1,)
if use_dask:
assert isinstance(c, dsa.Array)
# scalar axis (check positive and negative)
for axis in list(range(4)) + list(range(-1, -5, -1)):
c, _ = histogram(b, bins=bins, axis=axis, block_size=block_size)
shape = list(b.shape)
del shape[axis]
expected_shape = tuple(shape) + (len(bins) - 1,)
assert c.shape == expected_shape
if use_dask:
assert isinstance(c, dsa.Array)
# two axes
for i, j in combinations(range(4), 2):
axis = (i, j)
c, _ = histogram(b, bins=bins, axis=axis, block_size=block_size)
shape = list(b.shape)
partial_shape = [shape[k] for k in range(b.ndim) if k not in axis]
expected_shape = tuple(partial_shape) + (len(bins) - 1,)
assert c.shape == expected_shape
if use_dask:
assert isinstance(c, dsa.Array)
def test_histogram_dask():
""" Test that fails with dask arrays and inappropriate bins"""
shape = 10, 15, 12, 20
b = empty_dask_array(shape, chunks=(1,) + shape[1:])
histogram(b, bins=bins_arr) # Should work when bins is all numpy arrays
with pytest.raises(TypeError): # Should fail otherwise
histogram(b, bins=bins_int)
histogram(b, bins=bins_str)
histogram(b, b, bins=[bins_arr, bins_int])
@pytest.mark.parametrize(
"in_out",
[
(bins_int, 1, [bins_int]), # ( bins_in, n_args, bins_out )
(bins_str, 1, [bins_str]),
(bins_arr, 1, [bins_arr]),
([bins_int], 1, [bins_int]),
(bins_int, 2, 2 * [bins_int]),
(bins_str, 2, 2 * [bins_str]),
(bins_arr, 2, 2 * [bins_arr]),
([bins_int, bins_str, bins_arr], 3, [bins_int, bins_str, bins_arr]),
([bins_arr], 2, None),
(None, 1, None),
([bins_arr, bins_arr], 1, None),
],
)
def test_ensure_correctly_formatted_bins(in_out):
""" Test the helper function _ensure_correctly_formatted_bins"""
bins_in, n, bins_expected = in_out
if bins_expected is not None:
bins = _ensure_correctly_formatted_bins(bins_in, n)
assert bins == bins_expected
else:
with pytest.raises((ValueError, TypeError)):
_ensure_correctly_formatted_bins(bins_in, n)
@pytest.mark.parametrize(
"in_out",
[
(range_, 1, [range_]), # ( range_in, n_args, range_out )
(range_, 2, [range_, range_]),
([range_, range_], 2, [range_, range_]),
([(range_[0],)], 1, None),
([range_], 2, None),
([range_, range_], 1, None),
],
)
def test_ensure_correctly_formatted_range(in_out):
""" Test the helper function _ensure_correctly_formatted_range"""
range_in, n, range_expected = in_out
if range_expected is not None:
range_ = _ensure_correctly_formatted_range(range_in, n)
assert range_ == range_expected
else:
with pytest.raises(ValueError):
_ensure_correctly_formatted_range(range_in, n)
| [
"numpy.ones_like",
"numpy.histogram",
"numpy.ones",
"numpy.testing.assert_allclose",
"numpy.diff",
"pytest.mark.parametrize",
"numpy.linspace",
"numpy.array",
"numpy.outer",
"numpy.random.seed",
"numpy.sum",
"numpy.einsum",
"numpy.histogram_bin_edges",
"pytest.raises",
"numpy.random.rand... | [((295, 317), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(10)'], {}), '(-4, 4, 10)\n', (306, 317), True, 'import numpy as np\n'), ((337, 386), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""density"""', '[False, True]'], {}), "('density', [False, True])\n", (360, 386), False, 'import pytest\n'), ((388, 439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_size"""', '[None, 1, 2]'], {}), "('block_size', [None, 1, 2])\n", (411, 439), False, 'import pytest\n'), ((441, 483), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[1, None]'], {}), "('axis', [1, None])\n", (464, 483), False, 'import pytest\n'), ((556, 606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""range_"""', '[None, (-4, 4)]'], {}), "('range_', [None, (-4, 4)])\n", (579, 606), False, 'import pytest\n'), ((1902, 1953), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_size"""', '[None, 1, 2]'], {}), "('block_size', [None, 1, 2])\n", (1925, 1953), False, 'import pytest\n'), ((2438, 2497), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_size"""', "[None, 1, 2, 'auto']"], {}), "('block_size', [None, 1, 2, 'auto'])\n", (2461, 2497), False, 'import pytest\n'), ((2907, 2958), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_size"""', '[None, 1, 2]'], {}), "('block_size', [None, 1, 2])\n", (2930, 2958), False, 'import pytest\n'), ((5925, 5981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_size"""', "[None, 5, 'auto']"], {}), "('block_size', [None, 5, 'auto'])\n", (5948, 5981), False, 'import pytest\n'), ((5983, 6033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_dask"""', '[False, True]'], {}), "('use_dask', [False, True])\n", (6006, 6033), False, 'import pytest\n'), ((8011, 8413), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_out"""', '[(bins_int, 1, [bins_int]), (bins_str, 1, [bins_str]), (bins_arr, 1, [\n bins_arr]), ([bins_int], 1, [bins_int]), (bins_int, 2, 2 * [bins_int]),\n (bins_str, 2, 2 * [bins_str]), (bins_arr, 2, 2 * [bins_arr]), ([\n bins_int, bins_str, bins_arr], 3, [bins_int, bins_str, bins_arr]), ([\n bins_arr], 2, None), (None, 1, None), ([bins_arr, bins_arr], 1, None)]'], {}), "('in_out', [(bins_int, 1, [bins_int]), (bins_str, 1,\n [bins_str]), (bins_arr, 1, [bins_arr]), ([bins_int], 1, [bins_int]), (\n bins_int, 2, 2 * [bins_int]), (bins_str, 2, 2 * [bins_str]), (bins_arr,\n 2, 2 * [bins_arr]), ([bins_int, bins_str, bins_arr], 3, [bins_int,\n bins_str, bins_arr]), ([bins_arr], 2, None), (None, 1, None), ([\n bins_arr, bins_arr], 1, None)])\n", (8034, 8413), False, 'import pytest\n'), ((8943, 9160), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_out"""', '[(range_, 1, [range_]), (range_, 2, [range_, range_]), ([range_, range_], 2,\n [range_, range_]), ([(range_[0],)], 1, None), ([range_], 2, None), ([\n range_, range_], 1, None)]'], {}), "('in_out', [(range_, 1, [range_]), (range_, 2, [\n range_, range_]), ([range_, range_], 2, [range_, range_]), ([(range_[0]\n ,)], 1, None), ([range_], 2, None), ([range_, range_], 1, None)])\n", (8966, 9160), False, 'import pytest\n'), ((840, 857), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (854, 857), True, 'import numpy as np\n'), ((869, 898), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (884, 898), True, 'import numpy as np\n'), ((1711, 1757), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['h', '(expected / norm)'], {}), '(h, expected / norm)\n', (1737, 1757), True, 'import numpy as np\n'), ((2042, 2071), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (2057, 2071), True, 'import numpy as np\n'), ((2083, 2105), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(10)'], {}), '(-4, 4, 10)\n', (2094, 2105), True, 'import numpy as np\n'), ((2304, 2345), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['(2 * h)', 'h_w'], {}), '(2 * h, h_w)\n', (2333, 2345), True, 'import numpy as np\n'), ((2599, 2628), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (2614, 2628), True, 'import numpy as np\n'), ((2640, 2662), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(10)'], {}), '(-4, 4, 10)\n', (2651, 2662), True, 'import numpy as np\n'), ((2862, 2903), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['(2 * h)', 'h_w'], {}), '(2 * h, h_w)\n', (2891, 2903), True, 'import numpy as np\n'), ((3144, 3167), 'numpy.ones', 'np.ones', (['(nrows, ncols)'], {}), '((nrows, ncols))\n', (3151, 3167), True, 'import numpy as np\n'), ((3179, 3200), 'numpy.array', 'np.array', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (3187, 3200), True, 'import numpy as np\n'), ((3439, 3468), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'bins'}), '(data, bins=bins)\n', (3451, 3468), True, 'import numpy as np\n'), ((3620, 3661), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hist', 'h_na'], {}), '(hist, h_na)\n', (3649, 3661), True, 'import numpy as np\n'), ((3735, 3764), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (3750, 3764), True, 'import numpy as np\n'), ((3778, 3807), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (3793, 3807), True, 'import numpy as np\n'), ((3837, 3868), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_a + 1)'], {}), '(-4, 4, nbins_a + 1)\n', (3848, 3868), True, 'import numpy as np\n'), ((3899, 3930), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_b + 1)'], {}), '(-4, 4, nbins_b + 1)\n', (3910, 3930), True, 'import numpy as np\n'), ((4125, 4163), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hist', 'h'], {}), '(hist, h)\n', (4154, 4163), True, 'import numpy as np\n'), ((4245, 4274), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (4260, 4274), True, 'import numpy as np\n'), ((4288, 4317), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (4303, 4317), True, 'import numpy as np\n'), ((4347, 4378), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_a + 1)'], {}), '(-4, 4, nbins_a + 1)\n', (4358, 4378), True, 'import numpy as np\n'), ((4409, 4440), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_b + 1)'], {}), '(-4, 4, nbins_b + 1)\n', (4420, 4440), True, 'import numpy as np\n'), ((4677, 4712), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['hist', 'h'], {}), '(hist, h)\n', (4703, 4712), True, 'import numpy as np\n'), ((4755, 4770), 'numpy.diff', 'np.diff', (['bins_a'], {}), '(bins_a)\n', (4762, 4770), True, 'import numpy as np\n'), ((4786, 4801), 'numpy.diff', 'np.diff', (['bins_b'], {}), '(bins_b)\n', (4793, 4801), True, 'import numpy as np\n'), ((4814, 4842), 'numpy.outer', 'np.outer', (['widths_a', 'widths_b'], {}), '(widths_a, widths_b)\n', (4822, 4842), True, 'import numpy as np\n'), ((4858, 4878), 'numpy.sum', 'np.sum', (['(hist * areas)'], {}), '(hist * areas)\n', (4864, 4878), True, 'import numpy as np\n'), ((4883, 4924), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['integral', '(1.0)'], {}), '(integral, 1.0)\n', (4909, 4924), True, 'import numpy as np\n'), ((5006, 5035), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (5021, 5035), True, 'import numpy as np\n'), ((5049, 5078), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (5064, 5078), True, 'import numpy as np\n'), ((5092, 5121), 'numpy.random.randn', 'np.random.randn', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (5107, 5121), True, 'import numpy as np\n'), ((5151, 5182), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_a + 1)'], {}), '(-4, 4, nbins_a + 1)\n', (5162, 5182), True, 'import numpy as np\n'), ((5213, 5244), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_b + 1)'], {}), '(-4, 4, nbins_b + 1)\n', (5224, 5244), True, 'import numpy as np\n'), ((5274, 5305), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(nbins_c + 1)'], {}), '(-4, 4, nbins_c + 1)\n', (5285, 5305), True, 'import numpy as np\n'), ((5623, 5658), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['hist', 'h'], {}), '(hist, h)\n', (5649, 5658), True, 'import numpy as np\n'), ((5701, 5716), 'numpy.diff', 'np.diff', (['bins_a'], {}), '(bins_a)\n', (5708, 5716), True, 'import numpy as np\n'), ((5732, 5747), 'numpy.diff', 'np.diff', (['bins_b'], {}), '(bins_b)\n', (5739, 5747), True, 'import numpy as np\n'), ((5763, 5778), 'numpy.diff', 'np.diff', (['bins_c'], {}), '(bins_c)\n', (5770, 5778), True, 'import numpy as np\n'), ((5791, 5839), 'numpy.einsum', 'np.einsum', (['"""i,j,k"""', 'widths_a', 'widths_b', 'widths_c'], {}), "('i,j,k', widths_a, widths_b, widths_c)\n", (5800, 5839), True, 'import numpy as np\n'), ((5855, 5875), 'numpy.sum', 'np.sum', (['(hist * areas)'], {}), '(hist * areas)\n', (5861, 5875), True, 'import numpy as np\n'), ((5880, 5921), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['integral', '(1.0)'], {}), '(integral, 1.0)\n', (5906, 5921), True, 'import numpy as np\n'), ((6357, 6379), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(27)'], {}), '(-4, 4, 27)\n', (6368, 6379), True, 'import numpy as np\n'), ((1263, 1316), 'numpy.histogram_bin_edges', 'np.histogram_bin_edges', (['data'], {'bins': 'bins', 'range': 'range_'}), '(data, bins=bins, range=range_)\n', (1285, 1316), True, 'import numpy as np\n'), ((1792, 1810), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (1799, 1810), True, 'import numpy as np\n'), ((1830, 1848), 'numpy.sum', 'np.sum', (['(h * widths)'], {}), '(h * widths)\n', (1836, 1848), True, 'import numpy as np\n'), ((1857, 1898), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['integral', '(1.0)'], {}), '(integral, 1.0)\n', (1883, 1898), True, 'import numpy as np\n'), ((522, 544), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(10)'], {}), '(-4, 4, 10)\n', (533, 544), True, 'import numpy as np\n'), ((2193, 2211), 'numpy.ones_like', 'np.ones_like', (['data'], {}), '(data)\n', (2205, 2211), True, 'import numpy as np\n'), ((2750, 2769), 'numpy.ones', 'np.ones', (['(1, ncols)'], {}), '((1, ncols))\n', (2757, 2769), True, 'import numpy as np\n'), ((6322, 6345), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (6337, 6345), True, 'import numpy as np\n'), ((7834, 7858), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7847, 7858), False, 'import pytest\n'), ((1597, 1657), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'bins', 'range': 'range_', 'density': 'density'}), '(data, bins=bins, range=range_, density=density)\n', (1609, 1657), True, 'import numpy as np\n'), ((8843, 8881), 'pytest.raises', 'pytest.raises', (['(ValueError, TypeError)'], {}), '((ValueError, TypeError))\n', (8856, 8881), False, 'import pytest\n'), ((9576, 9601), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9589, 9601), False, 'import pytest\n'), ((1436, 1502), 'numpy.histogram', 'np.histogram', (['data[i]'], {'bins': 'bins_np', 'range': 'range_', 'density': 'density'}), '(data[i], bins=bins_np, range=range_, density=density)\n', (1448, 1502), True, 'import numpy as np\n')] |
# Define and solve equality constrained QP
from cvxpy.reductions.solvers.qp_solvers.qp_solver import QpSolver
import cvxpy.settings as cps
import cvxpy.interface as intf
import cvxpy.settings as s
import scipy.sparse as spa
from cvxpy.reductions import Solution
from cvxpy.constraints import Zero
import numpy as np
# from pypardiso import spsolve
# from pypardiso.pardiso_wrapper import PyPardisoError
from scipy.sparse.linalg import spsolve
from scipy.sparse.linalg import factorized
from scikits.umfpack import UmfpackWarning
import time
import warnings
import mlopt.settings as stg
KKT = "KKT"
class CatchSingularMatrixWarnings(object):
def __init__(self):
self.catcher = warnings.catch_warnings()
def __enter__(self):
self.catcher.__enter__()
warnings.simplefilter("ignore", UmfpackWarning)
warnings.filterwarnings(
"ignore",
message="divide by zero encountered in double_scalars"
)
def __exit__(self, *args):
self.catcher.__exit__()
def create_kkt_matrix(data):
"""Create KKT matrix from data."""
A_con = data[cps.A + "_red"]
n_con = A_con.shape[0]
O_con = spa.csc_matrix((n_con, n_con))
# Create KKT linear system
KKT = spa.vstack([spa.hstack([data[cps.P], A_con.T]),
spa.hstack([A_con, O_con])], format='csc')
return KKT
def create_kkt_rhs(data):
"""Create KKT rhs from data."""
return np.concatenate((-data[cps.Q], data[cps.B + "_red"]))
def create_kkt_system(data):
"""Create KKT linear system from data."""
KKT = create_kkt_matrix(data)
rhs = create_kkt_rhs(data)
return KKT, rhs
def factorize_kkt_matrix(KKT):
with CatchSingularMatrixWarnings():
return factorized(KKT)
class KKTSolver(QpSolver):
"""KKT solver for equality constrained QPs"""
SUPPORTED_CONSTRAINTS = [Zero] # Support only equality constraints
def name(self):
return KKT
def import_solver(self):
pass
def invert(self, solution, inverse_data):
attr = {s.SOLVE_TIME: solution['time']}
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['cost']
primal_vars = {
KKTSolver.VAR_ID:
intf.DEFAULT_INTF.const_to_matrix(np.array(solution['x']))
}
# Build dual variables
n_eq, n_ineq = inverse_data['n_eq'], inverse_data['n_ineq']
# equalities
y_eq = solution['y'][:n_eq]
# only dual variables for inequalities (not integer variables)
y_ineq = np.zeros(n_ineq)
n_tight = np.sum(inverse_data['tight_constraints'])
y_ineq[inverse_data['tight_constraints']] = \
solution['y'][n_eq:n_eq + n_tight]
y = np.concatenate([y_eq, y_ineq])
dual_vars = {KKTSolver.DUAL_VAR_ID: y}
else:
primal_vars = None
dual_vars = None
opt_val = np.inf
if status == s.UNBOUNDED:
opt_val = -np.inf
return Solution(status, opt_val, primal_vars, dual_vars, attr)
def solve_via_data(self, data, warm_start, verbose,
solver_opts,
solver_cache=None):
n_var = data[cps.P].shape[0]
n_con = len(data[cps.B + "_red"]) # Only equality constraints
stg.logger.debug("Solving %d x %d linear system A x = b " %
(n_var + n_con, n_var + n_con))
if solver_cache is None:
stg.logger.debug("Not using KKT solver cache")
KKT, rhs = create_kkt_system(data)
t_start = time.time()
with CatchSingularMatrixWarnings():
x = spsolve(KKT, rhs, use_umfpack=True)
t_end = time.time()
else:
stg.logger.debug("Using KKT solver cache")
rhs = create_kkt_rhs(data)
t_start = time.time()
with CatchSingularMatrixWarnings():
x = solver_cache['factors'](rhs)
t_end = time.time()
# Get results
results = {}
results['x'] = x[:n_var]
results['y'] = x[n_var:]
if np.any(np.isnan(results['x'])):
results['status'] = s.INFEASIBLE
else:
results['status'] = s.OPTIMAL
results['cost'] = \
.5 * results['x'].T.dot(data['P'].dot(results['x'])) \
+ data['q'].dot(results['x'])
results['time'] = t_end - t_start
return results
# # Add solver to CVXPY solvers
# QP_SOLVERS.insert(0, KKT)
# SOLVER_MAP_QP[KKT] = KKTSolver()
# INSTALLED_SOLVERS.append(KKT)
| [
"cvxpy.reductions.Solution",
"scipy.sparse.linalg.spsolve",
"warnings.catch_warnings",
"numpy.sum",
"numpy.zeros",
"scipy.sparse.linalg.factorized",
"scipy.sparse.hstack",
"numpy.isnan",
"numpy.concatenate",
"numpy.array",
"warnings.simplefilter",
"scipy.sparse.csc_matrix",
"time.time",
"w... | [((1175, 1205), 'scipy.sparse.csc_matrix', 'spa.csc_matrix', (['(n_con, n_con)'], {}), '((n_con, n_con))\n', (1189, 1205), True, 'import scipy.sparse as spa\n'), ((1451, 1503), 'numpy.concatenate', 'np.concatenate', (["(-data[cps.Q], data[cps.B + '_red'])"], {}), "((-data[cps.Q], data[cps.B + '_red']))\n", (1465, 1503), True, 'import numpy as np\n'), ((695, 720), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (718, 720), False, 'import warnings\n'), ((788, 835), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UmfpackWarning'], {}), "('ignore', UmfpackWarning)\n", (809, 835), False, 'import warnings\n'), ((845, 939), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""divide by zero encountered in double_scalars"""'}), "('ignore', message=\n 'divide by zero encountered in double_scalars')\n", (868, 939), False, 'import warnings\n'), ((1757, 1772), 'scipy.sparse.linalg.factorized', 'factorized', (['KKT'], {}), '(KKT)\n', (1767, 1772), False, 'from scipy.sparse.linalg import factorized\n'), ((3121, 3176), 'cvxpy.reductions.Solution', 'Solution', (['status', 'opt_val', 'primal_vars', 'dual_vars', 'attr'], {}), '(status, opt_val, primal_vars, dual_vars, attr)\n', (3129, 3176), False, 'from cvxpy.reductions import Solution\n'), ((3431, 3526), 'mlopt.settings.logger.debug', 'stg.logger.debug', (["('Solving %d x %d linear system A x = b ' % (n_var + n_con, n_var + n_con))"], {}), "('Solving %d x %d linear system A x = b ' % (n_var + n_con,\n n_var + n_con))\n", (3447, 3526), True, 'import mlopt.settings as stg\n'), ((1260, 1294), 'scipy.sparse.hstack', 'spa.hstack', (['[data[cps.P], A_con.T]'], {}), '([data[cps.P], A_con.T])\n', (1270, 1294), True, 'import scipy.sparse as spa\n'), ((1318, 1344), 'scipy.sparse.hstack', 'spa.hstack', (['[A_con, O_con]'], {}), '([A_con, O_con])\n', (1328, 1344), True, 'import scipy.sparse as spa\n'), ((2641, 2657), 'numpy.zeros', 'np.zeros', (['n_ineq'], {}), '(n_ineq)\n', (2649, 2657), True, 'import numpy as np\n'), ((2681, 2722), 'numpy.sum', 'np.sum', (["inverse_data['tight_constraints']"], {}), "(inverse_data['tight_constraints'])\n", (2687, 2722), True, 'import numpy as np\n'), ((2848, 2878), 'numpy.concatenate', 'np.concatenate', (['[y_eq, y_ineq]'], {}), '([y_eq, y_ineq])\n', (2862, 2878), True, 'import numpy as np\n'), ((3594, 3640), 'mlopt.settings.logger.debug', 'stg.logger.debug', (['"""Not using KKT solver cache"""'], {}), "('Not using KKT solver cache')\n", (3610, 3640), True, 'import mlopt.settings as stg\n'), ((3712, 3723), 'time.time', 'time.time', ([], {}), '()\n', (3721, 3723), False, 'import time\n'), ((3848, 3859), 'time.time', 'time.time', ([], {}), '()\n', (3857, 3859), False, 'import time\n'), ((3887, 3929), 'mlopt.settings.logger.debug', 'stg.logger.debug', (['"""Using KKT solver cache"""'], {}), "('Using KKT solver cache')\n", (3903, 3929), True, 'import mlopt.settings as stg\n'), ((3993, 4004), 'time.time', 'time.time', ([], {}), '()\n', (4002, 4004), False, 'import time\n'), ((4123, 4134), 'time.time', 'time.time', ([], {}), '()\n', (4132, 4134), False, 'import time\n'), ((4264, 4286), 'numpy.isnan', 'np.isnan', (["results['x']"], {}), "(results['x'])\n", (4272, 4286), True, 'import numpy as np\n'), ((3792, 3827), 'scipy.sparse.linalg.spsolve', 'spsolve', (['KKT', 'rhs'], {'use_umfpack': '(True)'}), '(KKT, rhs, use_umfpack=True)\n', (3799, 3827), False, 'from scipy.sparse.linalg import spsolve\n'), ((2333, 2356), 'numpy.array', 'np.array', (["solution['x']"], {}), "(solution['x'])\n", (2341, 2356), True, 'import numpy as np\n')] |
import numpy as np
import azure.functions as func
import json
from .app import upload
from azure.storage.blob import BlobServiceClient, BlobClient, ContentSettings
import uuid
def main(req: func.HttpRequest) -> func.HttpResponse:
headers = {
"Content-type": "application/json",
"Access-Control-Allow-Origin": "*"
}
#fetching the file passed in through form data
fetched_file = req.files['file']
# make a unique filename, to avoid replacement upon same name uploads
filename = str(uuid.uuid4()) + ".jpg"
#converting the file into a byte stream
filestream = fetched_file.stream
filestream.seek(0)
# save image to azure storage blob
#wrap inside try and catch to prevent errors thrown upon same image trying to be saved
try:
blob = BlobClient.from_connection_string(conn_str= "DefaultEndpointsProtocol=https;AccountName=breastmodelsdgp;AccountKey=Z4feRa5pxvpxsD7MUwatkHD/977VCcUiT9g5OmqFVzp1nqmYER0wHwpLQfHxIAEF3pyntsTuB2ZWKY3YRQ8ojw==", container_name="images", blob_name=filename)
cnt_settings = ContentSettings(content_type="image/jpeg")
blob.upload_blob(filestream.read(), blob_type="BlockBlob", content_settings=cnt_settings)
except:
print("same image uploaded")
which_model = req.params.get('model')
blob_data = blob.download_blob()
blob_data_as_bytes = blob_data.content_as_bytes()
#convert it into a numpy array, so that it can be passed into opencv
np_blob_array = np.fromstring(blob_data_as_bytes, dtype='uint8')
regular_image_url = f"https://breastmodelsdgp.blob.core.windows.net/images/{filename}"
prediction, prediction_percentage, superimposed_image_url = upload(np_blob_array, which_model)
return func.HttpResponse(json.dumps([{"predition": prediction, "prediction_percentage": prediction_percentage, "regular_image_url": regular_image_url, "superimposed_image_url": superimposed_image_url}]), status_code = 200, headers = headers)
| [
"json.dumps",
"uuid.uuid4",
"azure.storage.blob.ContentSettings",
"azure.storage.blob.BlobClient.from_connection_string",
"numpy.fromstring"
] | [((1496, 1544), 'numpy.fromstring', 'np.fromstring', (['blob_data_as_bytes'], {'dtype': '"""uint8"""'}), "(blob_data_as_bytes, dtype='uint8')\n", (1509, 1544), True, 'import numpy as np\n'), ((804, 1063), 'azure.storage.blob.BlobClient.from_connection_string', 'BlobClient.from_connection_string', ([], {'conn_str': '"""DefaultEndpointsProtocol=https;AccountName=breastmodelsdgp;AccountKey=Z4feRa5pxvpxsD7MUwatkHD/977VCcUiT9g5OmqFVzp1nqmYER0wHwpLQfHxIAEF3pyntsTuB2ZWKY3YRQ8ojw=="""', 'container_name': '"""images"""', 'blob_name': 'filename'}), "(conn_str=\n 'DefaultEndpointsProtocol=https;AccountName=breastmodelsdgp;AccountKey=Z4feRa5pxvpxsD7MUwatkHD/977VCcUiT9g5OmqFVzp1nqmYER0wHwpLQfHxIAEF3pyntsTuB2ZWKY3YRQ8ojw=='\n , container_name='images', blob_name=filename)\n", (837, 1063), False, 'from azure.storage.blob import BlobServiceClient, BlobClient, ContentSettings\n'), ((1078, 1120), 'azure.storage.blob.ContentSettings', 'ContentSettings', ([], {'content_type': '"""image/jpeg"""'}), "(content_type='image/jpeg')\n", (1093, 1120), False, 'from azure.storage.blob import BlobServiceClient, BlobClient, ContentSettings\n'), ((1765, 1950), 'json.dumps', 'json.dumps', (["[{'predition': prediction, 'prediction_percentage': prediction_percentage,\n 'regular_image_url': regular_image_url, 'superimposed_image_url':\n superimposed_image_url}]"], {}), "([{'predition': prediction, 'prediction_percentage':\n prediction_percentage, 'regular_image_url': regular_image_url,\n 'superimposed_image_url': superimposed_image_url}])\n", (1775, 1950), False, 'import json\n'), ((522, 534), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (532, 534), False, 'import uuid\n')] |
import os
from math import pi
import numpy as np
import scipy as sp
from scipy import stats as spst
import matplotlib.pyplot as plt
import cifar10_data
import classifier_cnn
def ensemble_part_norm():
# ####################################################
# make data
# ####################################################
# dog and cat images in cifar10
# dog label = 1, cat label = 0
dog_cat = cifar10_data.Cifar10_Dog_Cat()
dog_cat.make_binary_data()
# ####################################################
# make cnn model classifing dog and cat
# 犬と猫を分類するCNN作成
# ####################################################
ENSEMBLE_NUM = 10
SAVED_MODEL_NAMES = [os.path.join(os.getcwd(),'saved_models_ensemble','trained_model' + str(i) + '.h5') for i in range(ENSEMBLE_NUM)]
## train model
## train acurracy 0.9227, loss 0.2634
## test acurracy 0.8630, loss 0.3913
cnns = []
do_training = True
if do_training:
for i in range(ENSEMBLE_NUM):
train_sample_num = len(dog_cat.x_train)
if ENSEMBLE_NUM != 1:
bagging_idx = np.random.choice(np.arange(train_sample_num), size=train_sample_num, replace=True)
else:
bagging_idx = np.arange(train_sample_num)
cnn = classifier_cnn.BinaryClassifierCnnWithPartNormDist()
cnn.built_model()
cnn.train_model(dog_cat.x_train[bagging_idx], dog_cat.y_train[bagging_idx],
dog_cat.x_test, dog_cat.y_test,
epochs=100, batch_size=64, alpha=None)
cnn.save_model(save_file_name=SAVED_MODEL_NAMES[i])
cnns.append(cnn)
## load trained model
if not do_training:
for i in range(ENSEMBLE_NUM):
cnn = classifier_cnn.BinaryClassifierCnnWithPartNormDist()
cnn.load_model(SAVED_MODEL_NAMES[i])
cnns.append(cnn)
# ####################################################
# predicted result
# ####################################################
def ensemble_expec_unc(_x):
_expecs_pred = []
_vars_pred = []
for i in range(ENSEMBLE_NUM):
_y_pred = cnns[i].model.predict(_x)
_expec = (_y_pred[:,0])[:,np.newaxis]
_var = (_y_pred[:,1])[:,np.newaxis]
_expecs_pred.append(_expec)
_vars_pred.append(_var)
_expecs_pred = np.array(_expecs_pred)
_vars_pred = np.array(_vars_pred)
_I = 0.5 * (sp.special.erf((1.0 - _expecs_pred) / np.sqrt(2.0 * _vars_pred)) - sp.special.erf((0.0 - _expecs_pred) / np.sqrt(2.0 * _vars_pred)))
_f1 = spst.norm.pdf(1.0, loc=_expecs_pred, scale=np.sqrt(_vars_pred))
_f0 = spst.norm.pdf(0.0, loc=_expecs_pred, scale=np.sqrt(_vars_pred))
_mean_x = np.average(_expecs_pred - _vars_pred / _I * (_f1 - _f0), axis=0)
_mean_x2 = np.average(np.square(_expecs_pred) + _vars_pred - _vars_pred / _I * ((_expecs_pred + 1) * _f1 - _expecs_pred * _f0), axis=0)
_ave_expec = np.average(_expecs_pred, axis=0)
_ave_std = np.sqrt(_mean_x2 - np.square(_mean_x))
return _ave_expec, _ave_std
# train, test, y
y_train_cnn, unc_train_cnn = ensemble_expec_unc(dog_cat.x_train)
y_test_cnn, unc_test_cnn = ensemble_expec_unc(dog_cat.x_test)
# another label
label_dict = {
0 : 'airplane',
1 : 'automobile',
2 : 'bird',
3 : 'cat',
4 : 'deer',
5 : 'dog',
6 : 'frog',
7 : 'horse',
8 : 'ship',
9 : 'truck',
}
ys_another_cnn = []
uncs_another_cnn = []
for key in label_dict.keys():
_prob, _unc = ensemble_expec_unc(cifar10_data.Cifar10_1Label(label=key).x_train)
ys_another_cnn.append(_prob.flatten())
uncs_another_cnn.append(_unc.flatten())
# save result dir
SAVE_RESULT_DIR = os.path.join(os.getcwd(),'result_ensemble')
if not os.path.isdir(SAVE_RESULT_DIR):
os.makedirs(SAVE_RESULT_DIR)
# ######################
# accuracy
# ######################
# accuracy
def calc_acc(_y, _pre_y):
return 1 - np.average(np.logical_xor(_y > 0.5, _pre_y > 0.5))
def print_calc_acc(_y_train, _pre_y_train, _y_test, _pre_y_test):
print(' train acc, test acc : {0:.3f}, {1:.3f}'.format(
calc_acc(_y_train, _pre_y_train), calc_acc(_y_test, _pre_y_test)))
# normal cnn train acc, test acc : 0.923, 0.863
print_calc_acc(dog_cat.y_train, y_train_cnn, dog_cat.y_test, y_test_cnn)
# ##########################
# ROC and AUC of test data
# ##########################
def roc_and_auc(_y, _pre_y, _std, save_file_name):
_posi_num = np.sum(_y > 0.5)
_nega_num = np.sum(_y < 0.5)
# tpf and fpr using y threshold
_threshold_y = np.linspace(0, 1, num=100)
_tpr1, _fpr1 = [], []
for _thre_y in _threshold_y:
_tpr1.append(np.sum(np.logical_and(_y > 0.5, _pre_y > _thre_y)) / _posi_num)
_fpr1.append(np.sum(np.logical_and(_y < 0.5, _pre_y > _thre_y)) / _nega_num)
# tpf and fpr using y + std threshold
_threshold_a = np.tan(np.linspace(-pi*0.5+1e-6, pi*0.5-1e-6, num=100))
_tpr2, _fpr2 = [], []
for _thre_a in _threshold_a:
_tpr2.append(np.sum(np.logical_and(_y > 0.5, _pre_y > 0.5 + _thre_a * _std)) / _posi_num)
_fpr2.append(np.sum(np.logical_and(_y < 0.5, _pre_y > 0.5 + _thre_a * _std)) / _nega_num)
# fig
_fig = plt.figure()
_ax = _fig.add_subplot(111)
_ax.plot(_fpr1, _tpr1, label='threshold = y')
_ax.plot(_fpr2, _tpr2, label='threshold = 0.5 + a * std')
_ax.set_xlim(0, 1)
_ax.set_ylim(0, 1)
_ax.set_title('ROC curve')
_ax.set_xlabel('FPR')
_ax.set_ylabel('TPR')
_ax.grid(which='major',color='black',linestyle='-')
_ax.plot([0,1], [0,1], color = "black")
_ax.legend()
_fig.savefig(save_file_name)
plt.clf()
return
roc_and_auc(dog_cat.y_test, y_test_cnn, unc_test_cnn, os.path.join(SAVE_RESULT_DIR, 'roc_curve.png'))
# ###############################
# histgram of std of predicted y
# ###############################
def plot_std_histgram(_data_list, _label_list, save_dir):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(_data_list, label=_label_list, density=True, stacked=False)
ax.set_title('Normalized histgram of uncertainty coef')
ax.set_xlabel('uncertainty coef')
ax.set_ylabel('Normalized frequency')
ax.legend()
#plt.show()
fig.savefig(save_dir)
plt.clf()
plot_std_histgram([unc_train_cnn, unc_test_cnn], ['train', 'test'], os.path.join(SAVE_RESULT_DIR, 'hist_unc.png'))
plot_std_histgram(uncs_another_cnn, list(label_dict.values()), os.path.join(SAVE_RESULT_DIR, 'hist_unc_another.png'))
# ###############################
# uncertainty vs predicted y
# ###############################
max_unc = 0.0
max_unc = np.maximum(max_unc, np.max(unc_train_cnn))
max_unc = np.maximum(max_unc, np.max(unc_test_cnn))
for unc_another_cnn in uncs_another_cnn:
max_unc = np.maximum(max_unc, np.max(unc_another_cnn))
min_unc = 0.0
min_unc = np.minimum(min_unc, np.min(unc_train_cnn))
min_unc = np.minimum(min_unc, np.min(unc_test_cnn))
for unc_another_cnn in uncs_another_cnn:
min_unc = np.minimum(min_unc, np.min(unc_another_cnn))
def plot_unc_vs_predicted_y(_uncs, _pre_ys, _labels, save_file_name):
_fig = plt.figure()
_ax = _fig.add_subplot(111)
for _unc, _pre_y, _label in zip(_uncs, _pre_ys, _labels):
_ax.scatter(_unc, _pre_y, s=10, alpha=0.1, label=_label)
_ax.set_xlim(min_unc, max_unc)
_ax.set_ylim(0, 1.0)
_ax.set_title('uncertainty coef vs predicted probability')
_ax.set_xlabel('uncertainty coef')
_ax.set_ylabel('predicted probability')
_ax.grid(which='major',color='black',linestyle='-')
_ax.legend()
#plt.show()
_fig.savefig(save_file_name)
plt.clf()
plot_unc_vs_predicted_y([unc_train_cnn], [y_train_cnn], ['train'],
os.path.join(SAVE_RESULT_DIR, 'unc_vs_prob_train.png'))
plot_unc_vs_predicted_y([unc_test_cnn], [y_test_cnn], ['test'],
os.path.join(SAVE_RESULT_DIR, 'unc_vs_prob_test.png'))
for _y_ano, _unc_ano, _label in zip(ys_another_cnn, uncs_another_cnn, list(label_dict.values())):
plot_unc_vs_predicted_y([_unc_ano], [_y_ano], [_label],
os.path.join(SAVE_RESULT_DIR, 'unc_vs_prob_' + _label + '.png'))
ensemble_part_norm()
| [
"numpy.sqrt",
"numpy.array",
"cifar10_data.Cifar10_1Label",
"numpy.arange",
"numpy.max",
"numpy.linspace",
"os.path.isdir",
"numpy.min",
"numpy.average",
"numpy.logical_xor",
"numpy.square",
"cifar10_data.Cifar10_Dog_Cat",
"classifier_cnn.BinaryClassifierCnnWithPartNormDist",
"os.makedirs"... | [((441, 471), 'cifar10_data.Cifar10_Dog_Cat', 'cifar10_data.Cifar10_Dog_Cat', ([], {}), '()\n', (469, 471), False, 'import cifar10_data\n'), ((2541, 2563), 'numpy.array', 'np.array', (['_expecs_pred'], {}), '(_expecs_pred)\n', (2549, 2563), True, 'import numpy as np\n'), ((2586, 2606), 'numpy.array', 'np.array', (['_vars_pred'], {}), '(_vars_pred)\n', (2594, 2606), True, 'import numpy as np\n'), ((2944, 3008), 'numpy.average', 'np.average', (['(_expecs_pred - _vars_pred / _I * (_f1 - _f0))'], {'axis': '(0)'}), '(_expecs_pred - _vars_pred / _I * (_f1 - _f0), axis=0)\n', (2954, 3008), True, 'import numpy as np\n'), ((3178, 3210), 'numpy.average', 'np.average', (['_expecs_pred'], {'axis': '(0)'}), '(_expecs_pred, axis=0)\n', (3188, 3210), True, 'import numpy as np\n'), ((4075, 4086), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4084, 4086), False, 'import os\n'), ((4118, 4148), 'os.path.isdir', 'os.path.isdir', (['SAVE_RESULT_DIR'], {}), '(SAVE_RESULT_DIR)\n', (4131, 4148), False, 'import os\n'), ((4159, 4187), 'os.makedirs', 'os.makedirs', (['SAVE_RESULT_DIR'], {}), '(SAVE_RESULT_DIR)\n', (4170, 4187), False, 'import os\n'), ((4917, 4933), 'numpy.sum', 'np.sum', (['(_y > 0.5)'], {}), '(_y > 0.5)\n', (4923, 4933), True, 'import numpy as np\n'), ((4955, 4971), 'numpy.sum', 'np.sum', (['(_y < 0.5)'], {}), '(_y < 0.5)\n', (4961, 4971), True, 'import numpy as np\n'), ((5039, 5065), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(100)'}), '(0, 1, num=100)\n', (5050, 5065), True, 'import numpy as np\n'), ((5752, 5764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5762, 5764), True, 'import matplotlib.pyplot as plt\n'), ((6257, 6266), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6264, 6266), True, 'import matplotlib.pyplot as plt\n'), ((6346, 6392), 'os.path.join', 'os.path.join', (['SAVE_RESULT_DIR', '"""roc_curve.png"""'], {}), "(SAVE_RESULT_DIR, 'roc_curve.png')\n", (6358, 6392), False, 'import os\n'), ((6598, 6610), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6608, 6610), True, 'import matplotlib.pyplot as plt\n'), ((6960, 6969), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6967, 6969), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7090), 'os.path.join', 'os.path.join', (['SAVE_RESULT_DIR', '"""hist_unc.png"""'], {}), "(SAVE_RESULT_DIR, 'hist_unc.png')\n", (7057, 7090), False, 'import os\n'), ((7160, 7213), 'os.path.join', 'os.path.join', (['SAVE_RESULT_DIR', '"""hist_unc_another.png"""'], {}), "(SAVE_RESULT_DIR, 'hist_unc_another.png')\n", (7172, 7213), False, 'import os\n'), ((7385, 7406), 'numpy.max', 'np.max', (['unc_train_cnn'], {}), '(unc_train_cnn)\n', (7391, 7406), True, 'import numpy as np\n'), ((7443, 7463), 'numpy.max', 'np.max', (['unc_test_cnn'], {}), '(unc_test_cnn)\n', (7449, 7463), True, 'import numpy as np\n'), ((7631, 7652), 'numpy.min', 'np.min', (['unc_train_cnn'], {}), '(unc_train_cnn)\n', (7637, 7652), True, 'import numpy as np\n'), ((7689, 7709), 'numpy.min', 'np.min', (['unc_test_cnn'], {}), '(unc_test_cnn)\n', (7695, 7709), True, 'import numpy as np\n'), ((7914, 7926), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7924, 7926), True, 'import matplotlib.pyplot as plt\n'), ((8485, 8494), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8492, 8494), True, 'import matplotlib.pyplot as plt\n'), ((8599, 8653), 'os.path.join', 'os.path.join', (['SAVE_RESULT_DIR', '"""unc_vs_prob_train.png"""'], {}), "(SAVE_RESULT_DIR, 'unc_vs_prob_train.png')\n", (8611, 8653), False, 'import os\n'), ((8754, 8807), 'os.path.join', 'os.path.join', (['SAVE_RESULT_DIR', '"""unc_vs_prob_test.png"""'], {}), "(SAVE_RESULT_DIR, 'unc_vs_prob_test.png')\n", (8766, 8807), False, 'import os\n'), ((756, 767), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (765, 767), False, 'import os\n'), ((1368, 1420), 'classifier_cnn.BinaryClassifierCnnWithPartNormDist', 'classifier_cnn.BinaryClassifierCnnWithPartNormDist', ([], {}), '()\n', (1418, 1420), False, 'import classifier_cnn\n'), ((1879, 1931), 'classifier_cnn.BinaryClassifierCnnWithPartNormDist', 'classifier_cnn.BinaryClassifierCnnWithPartNormDist', ([], {}), '()\n', (1929, 1931), False, 'import classifier_cnn\n'), ((5395, 5452), 'numpy.linspace', 'np.linspace', (['(-pi * 0.5 + 1e-06)', '(pi * 0.5 - 1e-06)'], {'num': '(100)'}), '(-pi * 0.5 + 1e-06, pi * 0.5 - 1e-06, num=100)\n', (5406, 5452), True, 'import numpy as np\n'), ((7550, 7573), 'numpy.max', 'np.max', (['unc_another_cnn'], {}), '(unc_another_cnn)\n', (7556, 7573), True, 'import numpy as np\n'), ((7796, 7819), 'numpy.min', 'np.min', (['unc_another_cnn'], {}), '(unc_another_cnn)\n', (7802, 7819), True, 'import numpy as np\n'), ((9009, 9072), 'os.path.join', 'os.path.join', (['SAVE_RESULT_DIR', "('unc_vs_prob_' + _label + '.png')"], {}), "(SAVE_RESULT_DIR, 'unc_vs_prob_' + _label + '.png')\n", (9021, 9072), False, 'import os\n'), ((1319, 1346), 'numpy.arange', 'np.arange', (['train_sample_num'], {}), '(train_sample_num)\n', (1328, 1346), True, 'import numpy as np\n'), ((2823, 2842), 'numpy.sqrt', 'np.sqrt', (['_vars_pred'], {}), '(_vars_pred)\n', (2830, 2842), True, 'import numpy as np\n'), ((2902, 2921), 'numpy.sqrt', 'np.sqrt', (['_vars_pred'], {}), '(_vars_pred)\n', (2909, 2921), True, 'import numpy as np\n'), ((3250, 3268), 'numpy.square', 'np.square', (['_mean_x'], {}), '(_mean_x)\n', (3259, 3268), True, 'import numpy as np\n'), ((3869, 3907), 'cifar10_data.Cifar10_1Label', 'cifar10_data.Cifar10_1Label', ([], {'label': 'key'}), '(label=key)\n', (3896, 3907), False, 'import cifar10_data\n'), ((4344, 4382), 'numpy.logical_xor', 'np.logical_xor', (['(_y > 0.5)', '(_pre_y > 0.5)'], {}), '(_y > 0.5, _pre_y > 0.5)\n', (4358, 4382), True, 'import numpy as np\n'), ((1203, 1230), 'numpy.arange', 'np.arange', (['train_sample_num'], {}), '(train_sample_num)\n', (1212, 1230), True, 'import numpy as np\n'), ((3040, 3063), 'numpy.square', 'np.square', (['_expecs_pred'], {}), '(_expecs_pred)\n', (3049, 3063), True, 'import numpy as np\n'), ((2670, 2695), 'numpy.sqrt', 'np.sqrt', (['(2.0 * _vars_pred)'], {}), '(2.0 * _vars_pred)\n', (2677, 2695), True, 'import numpy as np\n'), ((2737, 2762), 'numpy.sqrt', 'np.sqrt', (['(2.0 * _vars_pred)'], {}), '(2.0 * _vars_pred)\n', (2744, 2762), True, 'import numpy as np\n'), ((5168, 5210), 'numpy.logical_and', 'np.logical_and', (['(_y > 0.5)', '(_pre_y > _thre_y)'], {}), '(_y > 0.5, _pre_y > _thre_y)\n', (5182, 5210), True, 'import numpy as np\n'), ((5258, 5300), 'numpy.logical_and', 'np.logical_and', (['(_y < 0.5)', '(_pre_y > _thre_y)'], {}), '(_y < 0.5, _pre_y > _thre_y)\n', (5272, 5300), True, 'import numpy as np\n'), ((5546, 5601), 'numpy.logical_and', 'np.logical_and', (['(_y > 0.5)', '(_pre_y > 0.5 + _thre_a * _std)'], {}), '(_y > 0.5, _pre_y > 0.5 + _thre_a * _std)\n', (5560, 5601), True, 'import numpy as np\n'), ((5649, 5704), 'numpy.logical_and', 'np.logical_and', (['(_y < 0.5)', '(_pre_y > 0.5 + _thre_a * _std)'], {}), '(_y < 0.5, _pre_y > 0.5 + _thre_a * _std)\n', (5663, 5704), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""pre_processamento.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1pIAmElMKuzHOV9YnGVRT7CCD-99bMydI
Importação das bibliotecas utilizadas
"""
from numpy import any as numpy_any
"""Criação da funções para o pré-processamento de dados"""
def bfill_ffill_table(rows):
'''
Função utilizada para preencher os valores nulos com dados passados ou
futuros utlizando os comandos "bfill" e "ffill", partindo do princípio que
os dados do paciente não variaram muito dentro da janela de 2 horas de
admissão.
Parâmetros:
-----------
rows : linhas que correspondem a um mesmo paciente.
Retorno:
--------
Retorna as linhas preenchidas pelo "bfill" e "ffill".
'''
rows[rows.select_dtypes('float64').columns] = rows[rows.select_dtypes('float64').columns].fillna(method='bfill').fillna(method='ffill')
return rows
def select_window(rows, window='0-2', target_variable='ICU'):
'''
Função usada para selecionar as janelas de admissão e verificar se o
foi admitido na UTI em algum momento. Caso tenha sido admitido na UTI, irá
alterar o estado das janelas anteriores.
Parâmetros:
-----------
rows : linhas que correspondem a um mesmo paciente.
window : coluna correspondente à janela que será mantida
target_variable : nome da variável dependente, padrão : 'ICU'
Retono:
-------
Retorna o conjunto de dados preenchido
'''
if(numpy_any(rows[target_variable])):
rows.loc[rows["WINDOW"]== window, target_variable] = 1
return rows.loc[rows["WINDOW"] == window] | [
"numpy.any"
] | [((1539, 1571), 'numpy.any', 'numpy_any', (['rows[target_variable]'], {}), '(rows[target_variable])\n', (1548, 1571), True, 'from numpy import any as numpy_any\n')] |
import sys
import numpy as np
from seisflows.tools import unix
from seisflows.tools.array import loadnpy, savenpy
from seisflows.tools.array import grid2mesh, mesh2grid, stack
from seisflows.tools.tools import exists
from seisflows.config import ParameterError, custom_import
from seisflows.tools.math import nabla, tv
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
system = sys.modules['seisflows_system']
solver = sys.modules['seisflows_solver']
class total_variation(custom_import('postprocess', 'regularize')):
""" Adds regularization options to base class
So far, can only be used for 2D inversion, because the required spatial
derivative operator "nabla" is not yet available for 3D grids.
"""
def check(self):
""" Checks parameters and paths
"""
super(total_variation, self).check()
if not PAR.LAMBDA:
raise ValueError
if not hasattr(PAR, 'EPSILON'):
setattr(PAR, 'EPSILON', 0.)
def nabla(self, mesh, m, g):
M, grid = mesh2grid(g, mesh)
DM = tv(M, epsilon=PAR.EPSILON)
dm = grid2mesh(DM, grid, mesh)
return dm/np.mean(m)
| [
"numpy.mean",
"seisflows.config.custom_import",
"seisflows.tools.math.tv",
"seisflows.tools.array.mesh2grid",
"seisflows.tools.array.grid2mesh"
] | [((510, 552), 'seisflows.config.custom_import', 'custom_import', (['"""postprocess"""', '"""regularize"""'], {}), "('postprocess', 'regularize')\n", (523, 552), False, 'from seisflows.config import ParameterError, custom_import\n'), ((1075, 1093), 'seisflows.tools.array.mesh2grid', 'mesh2grid', (['g', 'mesh'], {}), '(g, mesh)\n', (1084, 1093), False, 'from seisflows.tools.array import grid2mesh, mesh2grid, stack\n'), ((1107, 1133), 'seisflows.tools.math.tv', 'tv', (['M'], {'epsilon': 'PAR.EPSILON'}), '(M, epsilon=PAR.EPSILON)\n', (1109, 1133), False, 'from seisflows.tools.math import nabla, tv\n'), ((1147, 1172), 'seisflows.tools.array.grid2mesh', 'grid2mesh', (['DM', 'grid', 'mesh'], {}), '(DM, grid, mesh)\n', (1156, 1172), False, 'from seisflows.tools.array import grid2mesh, mesh2grid, stack\n'), ((1191, 1201), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (1198, 1201), True, 'import numpy as np\n')] |
# Copyright All Rights Reserved.
"""Generates data for training/validation and save it to disk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import multiprocessing
import os
import yaml
from absl import app
from absl import flags
from absl import logging
from torch.utils import data
import numpy as np
import imageio
import torch
import torchvision
from tqdm import tqdm
from options import DataGenOptions
from datasets import ProcessedImageFolder
from datasets.data_prep.video_loader import Video
from datasets.data_prep.kitti_loader import KittiRaw
FLAGS = DataGenOptions().parse()
NUM_CHUNKS = 100
def _generate_data():
r"""
Extract sequences from dataset_dir and store them in save_dir.
"""
os.makedirs(FLAGS.save_dir, exist_ok=True)
if FLAGS.to_yaml:
# Save the options to a YAML configuration in save_dir
yaml_filename = os.path.join(FLAGS.save_dir, 'config.yaml')
with open(yaml_filename, 'w') as f:
yaml.dump(vars(FLAGS), f, default_flow_style=False)
global dataloader # pylint: disable=global-variable-undefined
if FLAGS.dataset_name == 'video':
dataloader = Video(
FLAGS.dataset_dir,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
data_format=FLAGS.data_format,
mask=FLAGS.mask,
batch_size=FLAGS.batch_size,
threshold=FLAGS.threshold,
intrinsics=FLAGS.intrinsics,
trim=FLAGS.trim,
crop=FLAGS.crop,
del_static_frames=FLAGS.del_static_frames,
augment_strategy=FLAGS.augment_strategy,
augment_shift_h=FLAGS.augment_shift_h,
fps=FLAGS.fps,
video_start=FLAGS.video_start,
video_end=FLAGS.video_end,
img_ext=FLAGS.save_img_ext
)
elif FLAGS.dataset_name == 'kitti_raw_eigen':
dataloader = KittiRaw(
FLAGS.dataset_dir,
split='eigen',
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
data_format=FLAGS.data_format,
mask=FLAGS.mask,
batch_size=FLAGS.batch_size,
threshold=FLAGS.threshold
)
elif FLAGS.dataset_name == 'kitti_raw_stereo':
dataloader = KittiRaw(
FLAGS.dataset_dir,
split='stereo',
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
data_format=FLAGS.data_format,
mask=FLAGS.mask,
batch_size=FLAGS.batch_size,
threshold=FLAGS.threshold
)
else:
raise ValueError('Unknown dataset')
all_frames = range(dataloader.num_train)
# Split into training/validation sets. Fixed seed for repeatability.
np.random.seed(8964)
num_cores = multiprocessing.cpu_count()
# number of processes while using multiple processes
# number of workers for using either a single or multiple processes
num_threads = num_cores if FLAGS.num_threads is None else FLAGS.num_threads
if FLAGS.single_process:
frame_chunks = list(all_frames)
else:
frame_chunks = np.array_split(all_frames, NUM_CHUNKS)
manager = multiprocessing.Manager()
all_examples = manager.dict()
pool = multiprocessing.Pool(num_threads)
with open(os.path.join(FLAGS.save_dir, 'train_files.txt'), 'w') as train_f:
with open(os.path.join(FLAGS.save_dir, 'val_files.txt'), 'w') as val_f:
logging.info('Generating data...')
for index, frame_chunk in enumerate(frame_chunks):
if FLAGS.single_process:
all_examples = _gen_example(frame_chunk, {})
if all_examples is None:
continue
else:
all_examples.clear()
pool.map(
_gen_example_star,
zip(frame_chunk, itertools.repeat(all_examples))
)
logging.info(
'Chunk %d/%d: saving %s entries...',
index + 1, NUM_CHUNKS, len(all_examples)
)
for _, example in all_examples.items():
if example:
s = example['folder_name']
frame = example['file_name']
if np.random.random() < 0.1:
val_f.write('%s %s\n' % (s, frame))
else:
train_f.write('%s %s\n' % (s, frame))
if not FLAGS.single_process:
pool.close()
pool.join()
if FLAGS.mask != 'none':
# Collect filenames of all processed images
img_dataset = ProcessedImageFolder(FLAGS.save_dir,
FLAGS.save_img_ext)
img_loader = torch.utils.data.DataLoader(
img_dataset,
batch_size=FLAGS.batch_size,
num_workers=num_threads
)
# Generate masks by batch
logging.info('Generating masks...')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
for imgs, img_filepaths in tqdm(img_loader):
mrcnn_results = dataloader.run_mrcnn_model(imgs.to(device))
for i in range(len(imgs)):
_gen_mask(mrcnn_results[i], img_filepaths[i], FLAGS.save_img_ext)
if FLAGS.dataset_name=='video' and FLAGS.delete_temp:
dataloader.delete_temp_images()
def _gen_example(i, all_examples=None):
r"""
Save one example to file. Also adds it to all_examples dict.
"""
add_to_file, example = dataloader.get_example_with_index(i)
if not example or dataloader.is_bad_sample(i):
return
image_seq_stack = _stack_image_seq(example['image_seq'])
example.pop('image_seq', None) # Free up memory.
intrinsics = example['intrinsics']
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
cx = intrinsics[0, 2]
cy = intrinsics[1, 2]
save_dir = os.path.join(FLAGS.save_dir, example['folder_name'])
os.makedirs(save_dir, exist_ok=True)
img_filepath = os.path.join(save_dir, f'{example["file_name"]}.{FLAGS.save_img_ext}')
imageio.imsave(img_filepath, image_seq_stack.astype(np.uint8))
cam_filepath = os.path.join(save_dir, '%s_cam.txt' % example['file_name'])
example['cam'] = '%f,0.,%f,0.,%f,%f,0.,0.,1.' % (fx, cx, fy, cy)
with open(cam_filepath, 'w') as cam_f:
cam_f.write(example['cam'])
if not add_to_file:
return
key = example['folder_name'] + '_' + example['file_name']
all_examples[key] = example
return all_examples
def _gen_example_star(params):
return _gen_example(*params)
def _gen_mask(mrcnn_result, img_filepath, save_img_ext):
f"""
Generate a mask and save it to file.
"""
mask_img = dataloader.generate_mask(mrcnn_result)
mask_filepath = img_filepath[:-(len(save_img_ext)+1)] + f'-fseg.{save_img_ext}'
imageio.imsave(mask_filepath, mask_img.astype(np.uint8))
def _gen_mask_star(params):
return _gen_mask(*params)
def _stack_image_seq(seq):
for i, im in enumerate(seq):
if i == 0:
res = im
else:
res = np.hstack((res, im))
return res
if __name__ == '__main__':
_generate_data()
| [
"os.makedirs",
"datasets.ProcessedImageFolder",
"numpy.hstack",
"numpy.random.random",
"tqdm.tqdm",
"os.path.join",
"absl.logging.info",
"multiprocessing.cpu_count",
"datasets.data_prep.kitti_loader.KittiRaw",
"numpy.array_split",
"torch.cuda.is_available",
"numpy.random.seed",
"multiprocess... | [((803, 845), 'os.makedirs', 'os.makedirs', (['FLAGS.save_dir'], {'exist_ok': '(True)'}), '(FLAGS.save_dir, exist_ok=True)\n', (814, 845), False, 'import os\n'), ((2981, 3001), 'numpy.random.seed', 'np.random.seed', (['(8964)'], {}), '(8964)\n', (2995, 3001), True, 'import numpy as np\n'), ((3019, 3046), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3044, 3046), False, 'import multiprocessing\n'), ((6262, 6314), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', "example['folder_name']"], {}), "(FLAGS.save_dir, example['folder_name'])\n", (6274, 6314), False, 'import os\n'), ((6319, 6355), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (6330, 6355), False, 'import os\n'), ((6375, 6445), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{example[\'file_name\']}.{FLAGS.save_img_ext}"""'], {}), '(save_dir, f"{example[\'file_name\']}.{FLAGS.save_img_ext}")\n', (6387, 6445), False, 'import os\n'), ((6532, 6591), 'os.path.join', 'os.path.join', (['save_dir', "('%s_cam.txt' % example['file_name'])"], {}), "(save_dir, '%s_cam.txt' % example['file_name'])\n", (6544, 6591), False, 'import os\n'), ((649, 665), 'options.DataGenOptions', 'DataGenOptions', ([], {}), '()\n', (663, 665), False, 'from options import DataGenOptions\n'), ((956, 999), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""config.yaml"""'], {}), "(FLAGS.save_dir, 'config.yaml')\n", (968, 999), False, 'import os\n'), ((1235, 1766), 'datasets.data_prep.video_loader.Video', 'Video', (['FLAGS.dataset_dir'], {'img_height': 'FLAGS.img_height', 'img_width': 'FLAGS.img_width', 'seq_length': 'FLAGS.seq_length', 'data_format': 'FLAGS.data_format', 'mask': 'FLAGS.mask', 'batch_size': 'FLAGS.batch_size', 'threshold': 'FLAGS.threshold', 'intrinsics': 'FLAGS.intrinsics', 'trim': 'FLAGS.trim', 'crop': 'FLAGS.crop', 'del_static_frames': 'FLAGS.del_static_frames', 'augment_strategy': 'FLAGS.augment_strategy', 'augment_shift_h': 'FLAGS.augment_shift_h', 'fps': 'FLAGS.fps', 'video_start': 'FLAGS.video_start', 'video_end': 'FLAGS.video_end', 'img_ext': 'FLAGS.save_img_ext'}), '(FLAGS.dataset_dir, img_height=FLAGS.img_height, img_width=FLAGS.\n img_width, seq_length=FLAGS.seq_length, data_format=FLAGS.data_format,\n mask=FLAGS.mask, batch_size=FLAGS.batch_size, threshold=FLAGS.threshold,\n intrinsics=FLAGS.intrinsics, trim=FLAGS.trim, crop=FLAGS.crop,\n del_static_frames=FLAGS.del_static_frames, augment_strategy=FLAGS.\n augment_strategy, augment_shift_h=FLAGS.augment_shift_h, fps=FLAGS.fps,\n video_start=FLAGS.video_start, video_end=FLAGS.video_end, img_ext=FLAGS\n .save_img_ext)\n', (1240, 1766), False, 'from datasets.data_prep.video_loader import Video\n'), ((3359, 3397), 'numpy.array_split', 'np.array_split', (['all_frames', 'NUM_CHUNKS'], {}), '(all_frames, NUM_CHUNKS)\n', (3373, 3397), True, 'import numpy as np\n'), ((3416, 3441), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (3439, 3441), False, 'import multiprocessing\n'), ((3495, 3528), 'multiprocessing.Pool', 'multiprocessing.Pool', (['num_threads'], {}), '(num_threads)\n', (3515, 3528), False, 'import multiprocessing\n'), ((4983, 5039), 'datasets.ProcessedImageFolder', 'ProcessedImageFolder', (['FLAGS.save_dir', 'FLAGS.save_img_ext'], {}), '(FLAGS.save_dir, FLAGS.save_img_ext)\n', (5003, 5039), False, 'from datasets import ProcessedImageFolder\n'), ((5104, 5202), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['img_dataset'], {'batch_size': 'FLAGS.batch_size', 'num_workers': 'num_threads'}), '(img_dataset, batch_size=FLAGS.batch_size,\n num_workers=num_threads)\n', (5131, 5202), False, 'import torch\n'), ((5288, 5323), 'absl.logging.info', 'logging.info', (['"""Generating masks..."""'], {}), "('Generating masks...')\n", (5300, 5323), False, 'from absl import logging\n'), ((5423, 5439), 'tqdm.tqdm', 'tqdm', (['img_loader'], {}), '(img_loader)\n', (5427, 5439), False, 'from tqdm import tqdm\n'), ((2033, 2277), 'datasets.data_prep.kitti_loader.KittiRaw', 'KittiRaw', (['FLAGS.dataset_dir'], {'split': '"""eigen"""', 'img_height': 'FLAGS.img_height', 'img_width': 'FLAGS.img_width', 'seq_length': 'FLAGS.seq_length', 'data_format': 'FLAGS.data_format', 'mask': 'FLAGS.mask', 'batch_size': 'FLAGS.batch_size', 'threshold': 'FLAGS.threshold'}), "(FLAGS.dataset_dir, split='eigen', img_height=FLAGS.img_height,\n img_width=FLAGS.img_width, seq_length=FLAGS.seq_length, data_format=\n FLAGS.data_format, mask=FLAGS.mask, batch_size=FLAGS.batch_size,\n threshold=FLAGS.threshold)\n", (2041, 2277), False, 'from datasets.data_prep.kitti_loader import KittiRaw\n'), ((3544, 3591), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""train_files.txt"""'], {}), "(FLAGS.save_dir, 'train_files.txt')\n", (3556, 3591), False, 'import os\n'), ((3702, 3736), 'absl.logging.info', 'logging.info', (['"""Generating data..."""'], {}), "('Generating data...')\n", (3714, 3736), False, 'from absl import logging\n'), ((5351, 5376), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5374, 5376), False, 'import torch\n'), ((7471, 7491), 'numpy.hstack', 'np.hstack', (['(res, im)'], {}), '((res, im))\n', (7480, 7491), True, 'import numpy as np\n'), ((2454, 2699), 'datasets.data_prep.kitti_loader.KittiRaw', 'KittiRaw', (['FLAGS.dataset_dir'], {'split': '"""stereo"""', 'img_height': 'FLAGS.img_height', 'img_width': 'FLAGS.img_width', 'seq_length': 'FLAGS.seq_length', 'data_format': 'FLAGS.data_format', 'mask': 'FLAGS.mask', 'batch_size': 'FLAGS.batch_size', 'threshold': 'FLAGS.threshold'}), "(FLAGS.dataset_dir, split='stereo', img_height=FLAGS.img_height,\n img_width=FLAGS.img_width, seq_length=FLAGS.seq_length, data_format=\n FLAGS.data_format, mask=FLAGS.mask, batch_size=FLAGS.batch_size,\n threshold=FLAGS.threshold)\n", (2462, 2699), False, 'from datasets.data_prep.kitti_loader import KittiRaw\n'), ((3628, 3673), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""val_files.txt"""'], {}), "(FLAGS.save_dir, 'val_files.txt')\n", (3640, 3673), False, 'import os\n'), ((4162, 4192), 'itertools.repeat', 'itertools.repeat', (['all_examples'], {}), '(all_examples)\n', (4178, 4192), False, 'import itertools\n'), ((4618, 4636), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4634, 4636), True, 'import numpy as np\n')] |
import numpy
from fvm import utils
from fvm import BoundaryConditions
from fvm import Discretization
class CylindricalDiscretization(Discretization):
'''Finite volume discretization of the incompressible Navier-Stokes
equations on a (possibly non-uniform) Arakawa C-grid in a
cylindrical coordinate system. For details on the implementation
and the ordering of the variables, see the Discretization class.
'''
def __init__(self, parameters, nr, ntheta, nz, dim, dof, r=None, theta=None, z=None):
self.parameters = parameters
if self.parameters.get('Grid Stretching', False) or 'Grid Stretching Factor' in self.parameters.keys():
r = utils.create_stretched_coordinate_vector(
self.parameters.get('R-min', 0.0), self.parameters.get('R-max', 1.0), nr,
self.parameters.get('Grid Stretching Factor', 1.5)) if r is None else r
else:
r = utils.create_uniform_coordinate_vector(
self.parameters.get('R-min', 0.0), self.parameters.get('R-max', 1.0), nr) if r is None else r
theta = utils.create_uniform_coordinate_vector(
self.parameters.get('Theta-min', 0.0), self.parameters.get('Theta-max', 2 * numpy.pi), ntheta) \
if theta is None else theta
Discretization.__init__(self, parameters, nr, ntheta, nz, dim, dof, r, theta, z)
self.y_periodic = True
if self.parameters.get('Z-periodic', False):
self.z_periodic = True
def _linear_part_2D(self):
'''Compute the linear part of the equation in case the domain is 2D.
In case Re = 0 we instead compute the linear part for the Stokes
problem.'''
Re = self.get_parameter('Reynolds Number')
if Re == 0:
Re = 1
return 1 / Re * (self.iruscale(self.u_rr()) + self.iru2scale(self.u_tt() - self.value_u() - 2 * self.v_t_u())
+ self.irvscale(self.v_rr()) + self.irv2scale(self.v_tt() - self.value_v() + 2 * self.u_t_v())) \
- (self.p_r() + self.irvscale(self.p_t())) \
+ self.div()
def _linear_part_3D(self):
'''Compute the linear part of the equation in case the domain is 3D.
In case Re = 0 we instead compute the linear part for the Stokes
problem.'''
Re = self.get_parameter('Reynolds Number')
if Re == 0:
Re = 1
return 1 / Re * (self.iruscale(self.u_rr()) + self.iru2scale(- self.value_u())
+ self.u_zz()
+ self.irvscale(self.v_rr()) + self.irv2scale(- self.value_v())
+ self.v_zz()
+ self.irvscale(self.w_rr()) + self.w_zz()) \
- (self.p_r() + self.irvscale(self.p_t()) + self.p_z()) \
+ self.div()
def nonlinear_part(self, state):
'''Compute the nonlinear part of the equation. In case Re = 0 this
does nothing.'''
state_mtx = utils.create_padded_state_mtx(state, self.nx, self.ny, self.nz, self.dof,
self.x_periodic, self.y_periodic, self.z_periodic)
atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
Re = self.get_parameter('Reynolds Number')
if Re == 0:
return (atomJ, atomF)
self.u_u_r(atomJ, atomF, state_mtx)
self.u_v_r(atomJ, atomF, state_mtx)
self.v_u_t(atomJ, atomF, state_mtx)
self.v_v_t(atomJ, atomF, state_mtx)
self.v_v(atomJ, atomF, state_mtx)
self.u_v(atomJ, atomF, state_mtx)
if self.dim > 2:
self.u_w_r(atomJ, atomF, state_mtx)
self.v_w_t(atomJ, atomF, state_mtx)
self.w_u_z(atomJ, atomF, state_mtx)
self.w_v_z(atomJ, atomF, state_mtx)
self.w_w_z(atomJ, atomF, state_mtx)
atomJ += atomF
return (atomJ, atomF)
def boundaries(self, atom):
'''Compute boundary conditions for the currently defined problem type.'''
# TODO: Make it possible to interface this from the outside.
boundary_conditions = BoundaryConditions(self.nx, self.ny, self.nz, self.dim, self.dof, self.x, self.y, self.z)
frc = numpy.zeros(self.nx * self.ny * self.nz * self.dof)
if self.problem_type_equals('Taylor-Couette'):
vo = self.get_parameter('Outer Angular Velocity', 2)
vi = self.get_parameter('Inner Angular Velocity', 1)
frc += boundary_conditions.moving_lid_east(atom, vo * self.x[self.nx-1])
frc += boundary_conditions.moving_lid_west(atom, vi * self.x[-1])
if self.dim <= 2 or self.nz <= 1:
return frc
asym = self.get_parameter('Asymmetry Parameter')
frc2 = numpy.zeros([self.nx, self.ny, self.nz, self.dof])
frc2[self.nx-1, 0, :, 2] = asym * numpy.cos(self.z[0:self.nz] / self.z[self.nz-1] * numpy.pi)
frc += utils.create_state_vec(frc2, self.nx, self.ny, self.nz, self.dof)
if not self.z_periodic:
boundary_conditions.no_slip_top(atom)
boundary_conditions.no_slip_bottom(atom)
else:
raise Exception('Invalid problem type %s' % self.get_parameter('Problem Type'))
return frc
# Below are all of the discretizations of separate parts of
# equations that we can solve using FVM. This takes into account
# non-uniform grids. New discretizations such as derivatives have
# to be implemented in a similar way.
def iruscale(self, atom):
'''Scale atom by 1/r at the location of u'''
for i in range(self.nx):
atom[i, :, :, :, :, :, :, :] /= self.x[i]
return atom
def irvscale(self, atom):
'''Scale atom by 1/r at the location of v'''
for i in range(self.nx):
atom[i, :, :, :, :, :, :, :] /= (self.x[i] + self.x[i-1]) / 2
return atom
def iru2scale(self, atom):
'''Scale atom by 1/r^2 at the location of u'''
for i in range(self.nx):
atom[i, :, :, :, :, :, :, :] /= self.x[i] * self.x[i]
return atom
def irv2scale(self, atom):
'''Scale atom by 1/r^2 at the location of v'''
for i in range(self.nx):
atom[i, :, :, :, :, :, :, :] /= (self.x[i] + self.x[i-1]) * (self.x[i] + self.x[i-1]) / 4
return atom
@staticmethod
def _u_rr(atom, i, j, k, x, y, z):
# distance between u[i] and u[i-1]
dx = x[i] - x[i-1]
rv = x[i-1] + dx / 2
# distance between u[i+1] and u[i]
dxp1 = x[i+1] - x[i]
rvp1 = x[i] + dxp1 / 2
# volume size in the y direction
dy = y[j] - y[j-1]
# volume size in the z direction
dz = z[k] - z[k-1]
# second order finite difference
atom[0] = rv / dx * dy * dz
atom[2] = rvp1 / dxp1 * dy * dz
atom[1] = -atom[0] - atom[2]
def u_rr(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
CylindricalDiscretization._u_rr(atom[i, j, k, 0, 0, :, 1, 1], i, j, k, self.x, self.y, self.z)
return atom
def v_tt(self):
return self.v_yy()
@staticmethod
def _v_rr(atom, i, j, k, x, y, z):
# distance between v[i] and v[i-1]
dx = (x[i] - x[i-2]) / 2
# distance between v[i+1] and v[i]
dxp1 = (x[i+1] - x[i-1]) / 2
# volume size in the y direction
dy = (y[j+1] - y[j-1]) / 2
# volume size in the z direction
dz = z[k] - z[k-1]
# second order finite difference
atom[0] = x[i-1] / dx * dy * dz
atom[2] = x[i] / dxp1 * dy * dz
atom[1] = -atom[0] - atom[2]
def u_tt(self):
return self.u_yy()
def v_rr(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
CylindricalDiscretization._v_rr(atom[i, j, k, 1, 1, :, 1, 1], i, j, k, self.x, self.y, self.z)
return atom
def w_tt(self):
return self.w_yy()
def w_rr(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
CylindricalDiscretization._v_rr(atom[i, j, k, 2, 2, :, 1, 1], i, k, j, self.x, self.z, self.y)
return atom
def p_r(self):
return self.p_x()
def p_t(self):
return self.p_y()
def v_t_u(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
Discretization._backward_u_y(atom[i, j, k, 0, 1, 1, :, 1], i, j, k, self.x, self.y, self.z)
Discretization._backward_u_y(atom[i, j, k, 0, 1, 2, :, 1], i, j, k, self.x, self.y, self.z)
atom[i, j, k, 0, 1, :, :, :] /= 2
return atom
def u_t_v(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
Discretization._forward_u_x(atom[i, j, k, 1, 0, 0, :, 1], j, i, k, self.y, self.x, self.z)
Discretization._forward_u_x(atom[i, j, k, 1, 0, 1, :, 1], j, i, k, self.y, self.x, self.z)
atom[i, j, k, 1, 0, :, :, :] /= 2
return atom
@staticmethod
def _value_u(atom, i, j, k, x, y, z):
# volume size in the x direction
dx = (x[i+1] - x[i-1]) / 2
# volume size in the y direction
dy = y[j] - y[j-1]
# volume size in the z direction
dz = z[k] - z[k-1]
atom[1] = dx * dy * dz
def value_u(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
CylindricalDiscretization._value_u(atom[i, j, k, 0, 0, :, 1, 1], i, j, k, self.x, self.y, self.z)
return atom
def value_v(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
CylindricalDiscretization._value_u(atom[i, j, k, 1, 1, :, 1, 1], j, i, k, self.y, self.x, self.z)
return atom
@staticmethod
def _backward_u_r(atom, i, j, k, x, y, z):
# volume size in the y direction
dy = y[j] - y[j-1]
# volume size in the z direction
dz = z[k] - z[k-1]
# backward difference
atom[1] = x[i] * dy * dz
atom[0] = -x[i-1] * dy * dz
def u_r(self):
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
CylindricalDiscretization._backward_u_r(atom[i, j, k, self.dim, 0, :, 1, 1], i, j, k,
self.x, self.y, self.z)
return atom
def div(self):
if self.dim == 2:
return self.irvscale(self.u_r() + self.v_y())
return self.irvscale(self.u_r() + self.v_y()) + self.w_z()
def u_u_r(self, atomJ, atomF, state):
Discretization.u_u_x(self, atomJ, atomF, state)
def u_v_r(self, atomJ, atomF, state):
Discretization.u_v_x(self, atomJ, atomF, state)
def u_w_r(self, atomJ, atomF, state):
Discretization.u_w_x(self, atomJ, atomF, state)
def v_u_t(self, atomJ_in, atomF_in, state):
atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
Discretization.v_u_y(self, atomJ, atomF, state)
self.iruscale(atomJ)
self.iruscale(atomF)
atomJ_in += atomJ
atomF_in += atomF
def v_v_t(self, atomJ_in, atomF_in, state):
atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
Discretization.v_v_y(self, atomJ, atomF, state)
self.irvscale(atomJ)
self.irvscale(atomF)
atomJ_in += atomJ
atomF_in += atomF
def v_w_t(self, atomJ_in, atomF_in, state):
atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
Discretization.v_w_y(self, atomJ, atomF, state)
self.irvscale(atomJ)
self.irvscale(atomF)
atomJ_in += atomJ
atomF_in += atomF
def v_v(self, atomJ_in, atomF_in, state):
averages_v = self.weighted_average_x(state[:, :, :, 1])
averages_v = (averages_v[:, 0:self.ny, :] + averages_v[:, 1:self.ny+1, :]) / 2
atom = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atom_value = numpy.zeros(1)
atom_average = numpy.zeros(2)
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
Discretization._mass_x(atom_value, i, j, k, self.x, self.y, self.z)
Discretization._weighted_average(atom_average, i, self.x)
atom[i, j, k, 0, 1, 1:3, 0, 1] += atom_value * atom_average * averages_v[i, j, k+1] * 1 / 2
atom[i, j, k, 0, 1, 1:3, 1, 1] += atom_value * atom_average * averages_v[i, j, k+1] * 1 / 2
self.iruscale(atom)
atomJ_in += atom
atomF_in += atom
def u_v(self, atomJ_in, atomF_in, state):
averages_u = self.weighted_average_y(state[:, :, :, 0])
averages_u = (averages_u[0:self.nx, :, :] + averages_u[1:self.nx+1, :, :]) / 2
averages_v = state[1:self.nx+1, 1:self.ny+1, 1:self.nz+1, 1]
atomJ = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atomF = numpy.zeros([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])
atom_value = numpy.zeros(1)
atom_average = numpy.zeros(2)
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
Discretization._mass_x(atom_value, j, i, k, self.y, self.x, self.z)
atomF[i, j, k, 1, 1, 1, 1, 1] -= atom_value * averages_u[i, j, k+1]
Discretization._weighted_average(atom_average, j, self.y)
atomJ[i, j, k, 1, 0, 0, 1:3, 1] -= atom_value * atom_average * averages_v[i, j, k] * 1 / 2
atomJ[i, j, k, 1, 0, 1, 1:3, 1] -= atom_value * atom_average * averages_v[i, j, k] * 1 / 2
self.irvscale(atomF)
self.irvscale(atomJ)
atomJ_in += atomJ
atomF_in += atomF
| [
"fvm.Discretization.u_v_x",
"fvm.Discretization._mass_x",
"fvm.Discretization.u_w_x",
"fvm.utils.create_state_vec",
"fvm.utils.create_padded_state_mtx",
"fvm.BoundaryConditions",
"fvm.Discretization.v_v_y",
"fvm.Discretization.__init__",
"numpy.zeros",
"fvm.Discretization.v_w_y",
"fvm.Discretiza... | [((1305, 1390), 'fvm.Discretization.__init__', 'Discretization.__init__', (['self', 'parameters', 'nr', 'ntheta', 'nz', 'dim', 'dof', 'r', 'theta', 'z'], {}), '(self, parameters, nr, ntheta, nz, dim, dof, r, theta, z\n )\n', (1328, 1390), False, 'from fvm import Discretization\n'), ((2998, 3126), 'fvm.utils.create_padded_state_mtx', 'utils.create_padded_state_mtx', (['state', 'self.nx', 'self.ny', 'self.nz', 'self.dof', 'self.x_periodic', 'self.y_periodic', 'self.z_periodic'], {}), '(state, self.nx, self.ny, self.nz, self.dof,\n self.x_periodic, self.y_periodic, self.z_periodic)\n', (3027, 3126), False, 'from fvm import utils\n'), ((3190, 3259), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (3201, 3259), False, 'import numpy\n'), ((3276, 3345), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (3287, 3345), False, 'import numpy\n'), ((4251, 4344), 'fvm.BoundaryConditions', 'BoundaryConditions', (['self.nx', 'self.ny', 'self.nz', 'self.dim', 'self.dof', 'self.x', 'self.y', 'self.z'], {}), '(self.nx, self.ny, self.nz, self.dim, self.dof, self.x,\n self.y, self.z)\n', (4269, 4344), False, 'from fvm import BoundaryConditions\n'), ((4356, 4407), 'numpy.zeros', 'numpy.zeros', (['(self.nx * self.ny * self.nz * self.dof)'], {}), '(self.nx * self.ny * self.nz * self.dof)\n', (4367, 4407), False, 'import numpy\n'), ((7112, 7181), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (7123, 7181), False, 'import numpy\n'), ((8077, 8146), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (8088, 8146), False, 'import numpy\n'), ((8477, 8546), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (8488, 8546), False, 'import numpy\n'), ((8922, 8991), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (8933, 8991), False, 'import numpy\n'), ((9438, 9507), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (9449, 9507), False, 'import numpy\n'), ((10259, 10328), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (10270, 10328), False, 'import numpy\n'), ((10617, 10686), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (10628, 10686), False, 'import numpy\n'), ((11273, 11342), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (11284, 11342), False, 'import numpy\n'), ((11886, 11933), 'fvm.Discretization.u_u_x', 'Discretization.u_u_x', (['self', 'atomJ', 'atomF', 'state'], {}), '(self, atomJ, atomF, state)\n', (11906, 11933), False, 'from fvm import Discretization\n'), ((11985, 12032), 'fvm.Discretization.u_v_x', 'Discretization.u_v_x', (['self', 'atomJ', 'atomF', 'state'], {}), '(self, atomJ, atomF, state)\n', (12005, 12032), False, 'from fvm import Discretization\n'), ((12084, 12131), 'fvm.Discretization.u_w_x', 'Discretization.u_w_x', (['self', 'atomJ', 'atomF', 'state'], {}), '(self, atomJ, atomF, state)\n', (12104, 12131), False, 'from fvm import Discretization\n'), ((12197, 12266), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (12208, 12266), False, 'import numpy\n'), ((12283, 12352), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (12294, 12352), False, 'import numpy\n'), ((12362, 12409), 'fvm.Discretization.v_u_y', 'Discretization.v_u_y', (['self', 'atomJ', 'atomF', 'state'], {}), '(self, atomJ, atomF, state)\n', (12382, 12409), False, 'from fvm import Discretization\n'), ((12586, 12655), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (12597, 12655), False, 'import numpy\n'), ((12672, 12741), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (12683, 12741), False, 'import numpy\n'), ((12751, 12798), 'fvm.Discretization.v_v_y', 'Discretization.v_v_y', (['self', 'atomJ', 'atomF', 'state'], {}), '(self, atomJ, atomF, state)\n', (12771, 12798), False, 'from fvm import Discretization\n'), ((12975, 13044), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (12986, 13044), False, 'import numpy\n'), ((13061, 13130), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (13072, 13130), False, 'import numpy\n'), ((13140, 13187), 'fvm.Discretization.v_w_y', 'Discretization.v_w_y', (['self', 'atomJ', 'atomF', 'state'], {}), '(self, atomJ, atomF, state)\n', (13160, 13187), False, 'from fvm import Discretization\n'), ((13513, 13582), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (13524, 13582), False, 'import numpy\n'), ((13605, 13619), 'numpy.zeros', 'numpy.zeros', (['(1)'], {}), '(1)\n', (13616, 13619), False, 'import numpy\n'), ((13643, 13657), 'numpy.zeros', 'numpy.zeros', (['(2)'], {}), '(2)\n', (13654, 13657), False, 'import numpy\n'), ((14523, 14592), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (14534, 14592), False, 'import numpy\n'), ((14609, 14678), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3]'], {}), '([self.nx, self.ny, self.nz, self.dof, self.dof, 3, 3, 3])\n', (14620, 14678), False, 'import numpy\n'), ((14701, 14715), 'numpy.zeros', 'numpy.zeros', (['(1)'], {}), '(1)\n', (14712, 14715), False, 'import numpy\n'), ((14739, 14753), 'numpy.zeros', 'numpy.zeros', (['(2)'], {}), '(2)\n', (14750, 14753), False, 'import numpy\n'), ((4912, 4962), 'numpy.zeros', 'numpy.zeros', (['[self.nx, self.ny, self.nz, self.dof]'], {}), '([self.nx, self.ny, self.nz, self.dof])\n', (4923, 4962), False, 'import numpy\n'), ((5088, 5153), 'fvm.utils.create_state_vec', 'utils.create_state_vec', (['frc2', 'self.nx', 'self.ny', 'self.nz', 'self.dof'], {}), '(frc2, self.nx, self.ny, self.nz, self.dof)\n', (5110, 5153), False, 'from fvm import utils\n'), ((5009, 5070), 'numpy.cos', 'numpy.cos', (['(self.z[0:self.nz] / self.z[self.nz - 1] * numpy.pi)'], {}), '(self.z[0:self.nz] / self.z[self.nz - 1] * numpy.pi)\n', (5018, 5070), False, 'import numpy\n'), ((9123, 9218), 'fvm.Discretization._backward_u_y', 'Discretization._backward_u_y', (['atom[i, j, k, 0, 1, 1, :, 1]', 'i', 'j', 'k', 'self.x', 'self.y', 'self.z'], {}), '(atom[i, j, k, 0, 1, 1, :, 1], i, j, k, self.x,\n self.y, self.z)\n', (9151, 9218), False, 'from fvm import Discretization\n'), ((9235, 9330), 'fvm.Discretization._backward_u_y', 'Discretization._backward_u_y', (['atom[i, j, k, 0, 1, 2, :, 1]', 'i', 'j', 'k', 'self.x', 'self.y', 'self.z'], {}), '(atom[i, j, k, 0, 1, 2, :, 1], i, j, k, self.x,\n self.y, self.z)\n', (9263, 9330), False, 'from fvm import Discretization\n'), ((9639, 9733), 'fvm.Discretization._forward_u_x', 'Discretization._forward_u_x', (['atom[i, j, k, 1, 0, 0, :, 1]', 'j', 'i', 'k', 'self.y', 'self.x', 'self.z'], {}), '(atom[i, j, k, 1, 0, 0, :, 1], j, i, k, self.y,\n self.x, self.z)\n', (9666, 9733), False, 'from fvm import Discretization\n'), ((9750, 9844), 'fvm.Discretization._forward_u_x', 'Discretization._forward_u_x', (['atom[i, j, k, 1, 0, 1, :, 1]', 'j', 'i', 'k', 'self.y', 'self.x', 'self.z'], {}), '(atom[i, j, k, 1, 0, 1, :, 1], j, i, k, self.y,\n self.x, self.z)\n', (9777, 9844), False, 'from fvm import Discretization\n'), ((13789, 13856), 'fvm.Discretization._mass_x', 'Discretization._mass_x', (['atom_value', 'i', 'j', 'k', 'self.x', 'self.y', 'self.z'], {}), '(atom_value, i, j, k, self.x, self.y, self.z)\n', (13811, 13856), False, 'from fvm import Discretization\n'), ((13877, 13934), 'fvm.Discretization._weighted_average', 'Discretization._weighted_average', (['atom_average', 'i', 'self.x'], {}), '(atom_average, i, self.x)\n', (13909, 13934), False, 'from fvm import Discretization\n'), ((14885, 14952), 'fvm.Discretization._mass_x', 'Discretization._mass_x', (['atom_value', 'j', 'i', 'k', 'self.y', 'self.x', 'self.z'], {}), '(atom_value, j, i, k, self.y, self.x, self.z)\n', (14907, 14952), False, 'from fvm import Discretization\n'), ((15062, 15119), 'fvm.Discretization._weighted_average', 'Discretization._weighted_average', (['atom_average', 'j', 'self.y'], {}), '(atom_average, j, self.y)\n', (15094, 15119), False, 'from fvm import Discretization\n')] |
import numpy as np
from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model
from test.util import wrap_template, generate_kernel_test_case
from webdnn.frontend.onnx import ONNXConverter
@wrap_template
def template(x_shape, axes, keepdims=None, description: str = ""):
vx = np.random.rand(*x_shape)
vy = np.mean(vx, axis=tuple(axes), keepdims=True if keepdims is None else keepdims)
x = make_tensor_value_info("x", vx.shape)
y = make_tensor_value_info("y", vy.shape)
kwargs = {"axes": axes}
if keepdims is not None:
kwargs["keepdims"] = keepdims
operator = make_node("ReduceMean", ["x"], ["y"], **kwargs)
model = make_model([operator], [x], [y])
graph = ONNXConverter().convert(model)
assert tuple(vy.shape) == tuple(graph.outputs[0].shape), f"vy: {vy.shape}, graph.outputs[0]: {graph.outputs[0].shape}"
generate_kernel_test_case(
description=f"[ONNX] ReduceMean {description}",
graph=graph,
backend=["webgpu", "webgl", "webassembly"],
inputs={graph.inputs[0]: vx},
expected={graph.outputs[0]: vy},
)
def test():
template(x_shape=[1, 3, 4, 5], axes=[2])
def test_keepdim():
template(x_shape=[1, 3, 4, 5], axes=[2], keepdims=True)
def test_not_keepdim():
template(x_shape=[1, 3, 4, 5], axes=[2], keepdims=False)
def test_multi_axes():
template(x_shape=[1, 3, 4, 5], axes=[2, 3])
def test_all_axes():
template(x_shape=[1, 3, 4, 5], axes=[0, 1, 2, 3])
| [
"test.runtime.frontend_test.onnx_test.util.make_tensor_value_info",
"numpy.random.rand",
"test.util.generate_kernel_test_case",
"test.runtime.frontend_test.onnx_test.util.make_model",
"webdnn.frontend.onnx.ONNXConverter",
"test.runtime.frontend_test.onnx_test.util.make_node"
] | [((323, 347), 'numpy.random.rand', 'np.random.rand', (['*x_shape'], {}), '(*x_shape)\n', (337, 347), True, 'import numpy as np\n'), ((445, 482), 'test.runtime.frontend_test.onnx_test.util.make_tensor_value_info', 'make_tensor_value_info', (['"""x"""', 'vx.shape'], {}), "('x', vx.shape)\n", (467, 482), False, 'from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model\n'), ((491, 528), 'test.runtime.frontend_test.onnx_test.util.make_tensor_value_info', 'make_tensor_value_info', (['"""y"""', 'vy.shape'], {}), "('y', vy.shape)\n", (513, 528), False, 'from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model\n'), ((640, 687), 'test.runtime.frontend_test.onnx_test.util.make_node', 'make_node', (['"""ReduceMean"""', "['x']", "['y']"], {}), "('ReduceMean', ['x'], ['y'], **kwargs)\n", (649, 687), False, 'from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model\n'), ((701, 733), 'test.runtime.frontend_test.onnx_test.util.make_model', 'make_model', (['[operator]', '[x]', '[y]'], {}), '([operator], [x], [y])\n', (711, 733), False, 'from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model\n'), ((906, 1108), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[ONNX] ReduceMean {description}"""', 'graph': 'graph', 'backend': "['webgpu', 'webgl', 'webassembly']", 'inputs': '{graph.inputs[0]: vx}', 'expected': '{graph.outputs[0]: vy}'}), "(description=f'[ONNX] ReduceMean {description}',\n graph=graph, backend=['webgpu', 'webgl', 'webassembly'], inputs={graph.\n inputs[0]: vx}, expected={graph.outputs[0]: vy})\n", (931, 1108), False, 'from test.util import wrap_template, generate_kernel_test_case\n'), ((747, 762), 'webdnn.frontend.onnx.ONNXConverter', 'ONNXConverter', ([], {}), '()\n', (760, 762), False, 'from webdnn.frontend.onnx import ONNXConverter\n')] |
import dash_html_components as html
import string
import os
import json
import urllib
import numpy as np
from dash.dependencies import Input, Output, State
from matscholar.rest import Rester
# Get the rester and random docs on import
rester = Rester()
local_dir = os.path.dirname(__file__)
with open(os.path.join(local_dir, "../static/data/sample_docs.json"), "r") as f:
sample_docs = json.load(f)
label_mapping = {
"MAT": "Material",
"APL": "Application",
"PRO": "Property",
"SPL": "Phase",
"SMT": "Synthesis",
"CMT": "Characterization",
"DSC": "Descriptor",
"PVL": "Property value",
"PUT": "Property unit"}
def highlight_entities(tagged_doc):
highlighted_doc = []
tagged_doc = [(token, tag) for sent in tagged_doc[0] for (token, tag) in sent]
for idx, (token, tag) in enumerate(tagged_doc):
if idx < len(tagged_doc) - 1:
next_token_punct = True if tagged_doc[idx+1][0] in string.punctuation else False
else:
next_token_punct = False
span = html.Span(token,
className="highlighted {}".format(tag),
style={"padding-right": "0px" if next_token_punct else "4px",
"background-clip": "content-box"})
highlighted_doc.append(span)
return highlighted_doc
def get_labels():
return [html.Span(label_mapping[key],
className="highlighted {}".format(key),
style={"padding-right": "10px", "background-clip": "content-box"})
for key in label_mapping]
def bind(app):
### Extract App Callbacks ###
@app.callback(
Output("extract-highlighted", "children"),
[Input("extract-button", "n_clicks")],
[State("extract-textarea", "value"),
State("normalize-radio", "value")])
def highlight_extracted(n_clicks, text, normalize):
if n_clicks is not None:
# Extract highlighted
return_type = "normalized" if normalize == "yes" else "concatenated"
result = rester.get_ner_tags([text], return_type=return_type)
tagged_doc = result["tags"]
relevance = result["relevance"][0]
highlighted = highlight_entities(tagged_doc)
#Add the warning
if not relevance:
warning = "WARNING!!! Our classifier has flagged this document as not relevant" \
" to inorganic materials science. Expect lower than optimum performance."
else:
warning = ""
# Update download link
doc = {"sentences": []}
for sent in tagged_doc[0]:
new_sent = []
for token, tag in sent:
new_sent.append({
"token": token,
"tag": tag
})
doc["sentences"].append(new_sent)
json_string = json.dumps(doc)
json_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(json_string)
return html.Div([html.Div(html.Label("Extracted Entity Tags:")),
html.Div(warning, style={"padding-bottom": "20px", "color": "red"}),
html.Div(highlighted),
html.Div(html.Label("Labels"), style={"padding-top": "15px"}),
html.Div(get_labels()),
html.Div(html.A("Download entities as json",
id="entity-download-link",
href=json_string,
download="tagged_docs.json",
target="_blank"),
style={"padding-top": "15px"})])
@app.callback(
Output('extract-textarea', 'value'),
[Input("extract-random", 'n_clicks')])
def get_random(n_clicks):
if n_clicks is not None:
return np.random.choice(sample_docs)
return ""
| [
"numpy.random.choice",
"dash.dependencies.Output",
"json.dumps",
"os.path.join",
"urllib.parse.quote",
"dash.dependencies.Input",
"os.path.dirname",
"dash_html_components.Div",
"dash_html_components.Label",
"json.load",
"dash.dependencies.State",
"matscholar.rest.Rester",
"dash_html_componen... | [((244, 252), 'matscholar.rest.Rester', 'Rester', ([], {}), '()\n', (250, 252), False, 'from matscholar.rest import Rester\n'), ((265, 290), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (280, 290), False, 'import os\n'), ((390, 402), 'json.load', 'json.load', (['f'], {}), '(f)\n', (399, 402), False, 'import json\n'), ((301, 359), 'os.path.join', 'os.path.join', (['local_dir', '"""../static/data/sample_docs.json"""'], {}), "(local_dir, '../static/data/sample_docs.json')\n", (313, 359), False, 'import os\n'), ((1672, 1713), 'dash.dependencies.Output', 'Output', (['"""extract-highlighted"""', '"""children"""'], {}), "('extract-highlighted', 'children')\n", (1678, 1713), False, 'from dash.dependencies import Input, Output, State\n'), ((3867, 3902), 'dash.dependencies.Output', 'Output', (['"""extract-textarea"""', '"""value"""'], {}), "('extract-textarea', 'value')\n", (3873, 3902), False, 'from dash.dependencies import Input, Output, State\n'), ((2972, 2987), 'json.dumps', 'json.dumps', (['doc'], {}), '(doc)\n', (2982, 2987), False, 'import json\n'), ((1724, 1759), 'dash.dependencies.Input', 'Input', (['"""extract-button"""', '"""n_clicks"""'], {}), "('extract-button', 'n_clicks')\n", (1729, 1759), False, 'from dash.dependencies import Input, Output, State\n'), ((1771, 1805), 'dash.dependencies.State', 'State', (['"""extract-textarea"""', '"""value"""'], {}), "('extract-textarea', 'value')\n", (1776, 1805), False, 'from dash.dependencies import Input, Output, State\n'), ((1816, 1849), 'dash.dependencies.State', 'State', (['"""normalize-radio"""', '"""value"""'], {}), "('normalize-radio', 'value')\n", (1821, 1849), False, 'from dash.dependencies import Input, Output, State\n'), ((4033, 4062), 'numpy.random.choice', 'np.random.choice', (['sample_docs'], {}), '(sample_docs)\n', (4049, 4062), True, 'import numpy as np\n'), ((3913, 3948), 'dash.dependencies.Input', 'Input', (['"""extract-random"""', '"""n_clicks"""'], {}), "('extract-random', 'n_clicks')\n", (3918, 3948), False, 'from dash.dependencies import Input, Output, State\n'), ((3047, 3078), 'urllib.parse.quote', 'urllib.parse.quote', (['json_string'], {}), '(json_string)\n', (3065, 3078), False, 'import urllib\n'), ((3185, 3252), 'dash_html_components.Div', 'html.Div', (['warning'], {'style': "{'padding-bottom': '20px', 'color': 'red'}"}), "(warning, style={'padding-bottom': '20px', 'color': 'red'})\n", (3193, 3252), True, 'import dash_html_components as html\n'), ((3283, 3304), 'dash_html_components.Div', 'html.Div', (['highlighted'], {}), '(highlighted)\n', (3291, 3304), True, 'import dash_html_components as html\n'), ((3117, 3153), 'dash_html_components.Label', 'html.Label', (['"""Extracted Entity Tags:"""'], {}), "('Extracted Entity Tags:')\n", (3127, 3153), True, 'import dash_html_components as html\n'), ((3344, 3364), 'dash_html_components.Label', 'html.Label', (['"""Labels"""'], {}), "('Labels')\n", (3354, 3364), True, 'import dash_html_components as html\n'), ((3489, 3620), 'dash_html_components.A', 'html.A', (['"""Download entities as json"""'], {'id': '"""entity-download-link"""', 'href': 'json_string', 'download': '"""tagged_docs.json"""', 'target': '"""_blank"""'}), "('Download entities as json', id='entity-download-link', href=\n json_string, download='tagged_docs.json', target='_blank')\n", (3495, 3620), True, 'import dash_html_components as html\n')] |
#!/usr/bin/env python
"""Implementation of Closed-Form Matting.
This module implements natural image matting method described in:
<NAME>, <NAME>, and <NAME>. "A closed-form solution to natural image matting."
IEEE Transactions on Pattern Analysis and Machine Intelligence 30.2 (2008): 228-242.
The code can be used in two ways:
1. By importing solve_foregound_background in your code:
```
import closed_form_matting
...
# For scribles input
alpha = closed_form_matting.closed_form_matting_with_scribbles(image, scribbles)
# For trimap input
alpha = closed_form_matting.closed_form_matting_with_trimap(image, trimap)
# For prior with confidence
alpha = closed_form_matting.closed_form_matting_with_prior(
image, prior, prior_confidence, optional_const_mask)
# To get Matting Laplacian for image
laplacian = compute_laplacian(image, optional_const_mask)
```
2. From command line:
```
# Scribbles input
./closed_form_matting.py input_image.png -s scribbles_image.png -o output_alpha.png
# Trimap input
./closed_form_matting.py input_image.png -t scribbles_image.png -o output_alpha.png
# Add flag --solve-fg to compute foreground color and output RGBA image instead
# of alpha.
```
"""
from __future__ import division
import logging
import cv2
import numpy as np
from numpy.lib.stride_tricks import as_strided
import scipy.sparse
import scipy.sparse.linalg
def _rolling_block(A, block=(3, 3)):
"""Applies sliding window to given matrix."""
shape = (A.shape[0] - block[0] + 1, A.shape[1] - block[1] + 1) + block
strides = (A.strides[0], A.strides[1]) + A.strides
return as_strided(A, shape=shape, strides=strides)
def compute_laplacian(img, mask=None, eps=10**(-7), win_rad=1):
"""Computes Matting Laplacian for a given image.
Args:
img: 3-dim numpy matrix with input image
mask: mask of pixels for which Laplacian will be computed.
If not set Laplacian will be computed for all pixels.
eps: regularization parameter controlling alpha smoothness
from Eq. 12 of the original paper. Defaults to 1e-7.
win_rad: radius of window used to build Matting Laplacian (i.e.
radius of omega_k in Eq. 12).
Returns: sparse matrix holding Matting Laplacian.
"""
win_size = (win_rad * 2 + 1) ** 2
h, w, d = img.shape
# Number of window centre indices in h, w axes
c_h, c_w = h - 2 * win_rad, w - 2 * win_rad
win_diam = win_rad * 2 + 1
indsM = np.arange(h * w).reshape((h, w))
ravelImg = img.reshape(h * w, d)
win_inds = _rolling_block(indsM, block=(win_diam, win_diam))
win_inds = win_inds.reshape(c_h, c_w, win_size)
if mask is not None:
mask = cv2.dilate(
mask.astype(np.uint8),
np.ones((win_diam, win_diam), np.uint8)
).astype(np.bool)
win_mask = np.sum(mask.ravel()[win_inds], axis=2)
win_inds = win_inds[win_mask > 0, :]
else:
win_inds = win_inds.reshape(-1, win_size)
winI = ravelImg[win_inds]
win_mu = np.mean(winI, axis=1, keepdims=True)
win_var = np.einsum('...ji,...jk ->...ik', winI, winI) / win_size - np.einsum('...ji,...jk ->...ik', win_mu, win_mu)
inv = np.linalg.inv(win_var + (eps/win_size)*np.eye(3))
X = np.einsum('...ij,...jk->...ik', winI - win_mu, inv)
vals = np.eye(win_size) - (1.0/win_size)*(1 + np.einsum('...ij,...kj->...ik', X, winI - win_mu))
nz_indsCol = np.tile(win_inds, win_size).ravel()
nz_indsRow = np.repeat(win_inds, win_size).ravel()
nz_indsVal = vals.ravel()
L = scipy.sparse.coo_matrix((nz_indsVal, (nz_indsRow, nz_indsCol)), shape=(h*w, h*w))
return L
def closed_form_matting_with_prior(image, prior, prior_confidence, consts_map=None):
"""Applies closed form matting with prior alpha map to image.
Args:
image: 3-dim numpy matrix with input image.
prior: matrix of same width and height as input image holding apriori alpha map.
prior_confidence: matrix of the same shape as prior hodling confidence of prior alpha.
consts_map: binary mask of pixels that aren't expected to change due to high
prior confidence.
Returns: 2-dim matrix holding computed alpha map.
"""
assert image.shape[:2] == prior.shape, ('prior must be 2D matrix with height and width equal '
'to image.')
assert image.shape[:2] == prior_confidence.shape, ('prior_confidence must be 2D matrix with '
'height and width equal to image.')
assert (consts_map is None) or image.shape[:2] == consts_map.shape, (
'consts_map must be 2D matrix with height and width equal to image.')
logging.info('Computing Matting Laplacian.')
laplacian = compute_laplacian(image, ~consts_map if consts_map is not None else None)
confidence = scipy.sparse.diags(prior_confidence.ravel())
logging.info('Solving for alpha.')
solution = scipy.sparse.linalg.spsolve(
laplacian + confidence,
prior.ravel() * prior_confidence.ravel()
)
alpha = np.minimum(np.maximum(solution.reshape(prior.shape), 0), 1)
return alpha
def closed_form_matting_with_trimap(image, trimap, trimap_confidence=100.0):
"""Apply Closed-Form matting to given image using trimap."""
assert image.shape[:2] == trimap.shape, ('trimap must be 2D matrix with height and width equal '
'to image.')
consts_map = (trimap < 0.1) | (trimap > 0.9)
return closed_form_matting_with_prior(image, trimap, trimap_confidence * consts_map, consts_map)
def closed_form_matting_with_scribbles(image, scribbles, scribbles_confidence=100.0):
"""Apply Closed-Form matting to given image using scribbles image."""
assert image.shape == scribbles.shape, 'scribbles must have exactly same shape as image.'
prior = np.sign(np.sum(scribbles - image, axis=2)) / 2 + 0.5
consts_map = prior != 0.5
return closed_form_matting_with_prior(
image,
prior,
scribbles_confidence * consts_map,
consts_map
)
closed_form_matting = closed_form_matting_with_trimap
def main():
import argparse
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser(description=__doc__)
arg_parser.add_argument('image', type=str, help='input image')
arg_parser.add_argument('-t', '--trimap', type=str, help='input trimap')
arg_parser.add_argument('-s', '--scribbles', type=str, help='input scribbles')
arg_parser.add_argument('-o', '--output', type=str, required=True, help='output image')
arg_parser.add_argument(
'--solve-fg', dest='solve_fg', action='store_true',
help='compute foreground color and output RGBA image'
)
args = arg_parser.parse_args()
image = cv2.imread(args.image, cv2.IMREAD_COLOR) / 255.0
if args.scribbles:
scribbles = cv2.imread(args.scribbles, cv2.IMREAD_COLOR) / 255.0
alpha = closed_form_matting_with_scribbles(image, scribbles)
elif args.trimap:
trimap = cv2.imread(args.trimap, cv2.IMREAD_GRAYSCALE) / 255.0
alpha = closed_form_matting_with_trimap(image, trimap)
else:
logging.error('Either trimap or scribbles must be specified.')
arg_parser.print_help()
exit(-1)
if args.solve_fg:
from solve_foreground_background import solve_foreground_background
foreground, _ = solve_foreground_background(image, alpha)
output = np.concatenate((foreground, alpha[:, :, np.newaxis]), axis=2)
else:
output = alpha
cv2.imwrite(args.output, output * 255.0)
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"numpy.mean",
"cv2.imwrite",
"numpy.eye",
"numpy.tile",
"numpy.repeat",
"argparse.ArgumentParser",
"solve_foreground_background.solve_foreground_background",
"numpy.ones",
"numpy.lib.stride_tricks.as_strided",
"numpy.sum",
"numpy.einsum",
"numpy.concatenate",
"loggin... | [((1853, 1896), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['A'], {'shape': 'shape', 'strides': 'strides'}), '(A, shape=shape, strides=strides)\n', (1863, 1896), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((3289, 3325), 'numpy.mean', 'np.mean', (['winI'], {'axis': '(1)', 'keepdims': '(True)'}), '(winI, axis=1, keepdims=True)\n', (3296, 3325), True, 'import numpy as np\n'), ((3517, 3568), 'numpy.einsum', 'np.einsum', (['"""...ij,...jk->...ik"""', '(winI - win_mu)', 'inv'], {}), "('...ij,...jk->...ik', winI - win_mu, inv)\n", (3526, 3568), True, 'import numpy as np\n'), ((4993, 5037), 'logging.info', 'logging.info', (['"""Computing Matting Laplacian."""'], {}), "('Computing Matting Laplacian.')\n", (5005, 5037), False, 'import logging\n'), ((5195, 5229), 'logging.info', 'logging.info', (['"""Solving for alpha."""'], {}), "('Solving for alpha.')\n", (5207, 5229), False, 'import logging\n'), ((6491, 6530), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (6510, 6530), False, 'import logging\n'), ((6548, 6592), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (6571, 6592), False, 'import argparse\n'), ((7901, 7941), 'cv2.imwrite', 'cv2.imwrite', (['args.output', '(output * 255.0)'], {}), '(args.output, output * 255.0)\n', (7912, 7941), False, 'import cv2\n'), ((3398, 3446), 'numpy.einsum', 'np.einsum', (['"""...ji,...jk ->...ik"""', 'win_mu', 'win_mu'], {}), "('...ji,...jk ->...ik', win_mu, win_mu)\n", (3407, 3446), True, 'import numpy as np\n'), ((3580, 3596), 'numpy.eye', 'np.eye', (['win_size'], {}), '(win_size)\n', (3586, 3596), True, 'import numpy as np\n'), ((7118, 7158), 'cv2.imread', 'cv2.imread', (['args.image', 'cv2.IMREAD_COLOR'], {}), '(args.image, cv2.IMREAD_COLOR)\n', (7128, 7158), False, 'import cv2\n'), ((7742, 7783), 'solve_foreground_background.solve_foreground_background', 'solve_foreground_background', (['image', 'alpha'], {}), '(image, alpha)\n', (7769, 7783), False, 'from solve_foreground_background import solve_foreground_background\n'), ((7801, 7862), 'numpy.concatenate', 'np.concatenate', (['(foreground, alpha[:, :, np.newaxis])'], {'axis': '(2)'}), '((foreground, alpha[:, :, np.newaxis]), axis=2)\n', (7815, 7862), True, 'import numpy as np\n'), ((2723, 2739), 'numpy.arange', 'np.arange', (['(h * w)'], {}), '(h * w)\n', (2732, 2739), True, 'import numpy as np\n'), ((3340, 3384), 'numpy.einsum', 'np.einsum', (['"""...ji,...jk ->...ik"""', 'winI', 'winI'], {}), "('...ji,...jk ->...ik', winI, winI)\n", (3349, 3384), True, 'import numpy as np\n'), ((3688, 3715), 'numpy.tile', 'np.tile', (['win_inds', 'win_size'], {}), '(win_inds, win_size)\n', (3695, 3715), True, 'import numpy as np\n'), ((3741, 3770), 'numpy.repeat', 'np.repeat', (['win_inds', 'win_size'], {}), '(win_inds, win_size)\n', (3750, 3770), True, 'import numpy as np\n'), ((7211, 7255), 'cv2.imread', 'cv2.imread', (['args.scribbles', 'cv2.IMREAD_COLOR'], {}), '(args.scribbles, cv2.IMREAD_COLOR)\n', (7221, 7255), False, 'import cv2\n'), ((7507, 7569), 'logging.error', 'logging.error', (['"""Either trimap or scribbles must be specified."""'], {}), "('Either trimap or scribbles must be specified.')\n", (7520, 7569), False, 'import logging\n'), ((3497, 3506), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3503, 3506), True, 'import numpy as np\n'), ((3619, 3668), 'numpy.einsum', 'np.einsum', (['"""...ij,...kj->...ik"""', 'X', '(winI - win_mu)'], {}), "('...ij,...kj->...ik', X, winI - win_mu)\n", (3628, 3668), True, 'import numpy as np\n'), ((6181, 6214), 'numpy.sum', 'np.sum', (['(scribbles - image)'], {'axis': '(2)'}), '(scribbles - image, axis=2)\n', (6187, 6214), True, 'import numpy as np\n'), ((7372, 7417), 'cv2.imread', 'cv2.imread', (['args.trimap', 'cv2.IMREAD_GRAYSCALE'], {}), '(args.trimap, cv2.IMREAD_GRAYSCALE)\n', (7382, 7417), False, 'import cv2\n'), ((3010, 3049), 'numpy.ones', 'np.ones', (['(win_diam, win_diam)', 'np.uint8'], {}), '((win_diam, win_diam), np.uint8)\n', (3017, 3049), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import os
import re
import tensorflow as tf
import seaborn as sns
import matplotlib.pyplot as plt
import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.preprocessing import image
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization
from tensorflow.keras import regularizers
from load_data import labels, X_train, X_test, X_val, y_train, y_test, y_val
# Defining the model for CC
inputs = Input(shape = (84,150, 3))
y = Conv2D(32, 5, activation='relu')(inputs)
y = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)
y = Conv2D(64, 5, activation='relu')(y)
y = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)
x = keras.layers.Flatten()(y)
x = Dense(128, activation= 'relu')(x)
outputs = Dense(1, activation='sigmoid')(x)
ConvMod_CC = Model(inputs, outputs)
ConvMod_CC.summary()
ConvMod_CC.compile( optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
CC_fit = ConvMod_CC.fit(X_train, labels.CC[y_train], epochs=25, batch_size=32, validation_data= (X_val, labels.CC[y_val]))
ConvMod_CC.save('models/plain_CC.h5')
np.save('plain_CC_training.npy', CC_fit.history)
# Defining the model for D
inputs = Input(shape = (84,150, 3))
y = Conv2D(32, 5, activation='relu')(inputs)
y = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)
y = Conv2D(64, 5, activation='relu')(y)
y = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)
x = keras.layers.Flatten()(y)
x = Dense(128, activation= 'relu')(x)
outputs = Dense(5, activation='softmax')(x)
ConvMod_D = Model(inputs, outputs)
ConvMod_D.summary()
# one hot encode labels for categorical_crossentropy loss
cat = OneHotEncoder()
one_hot_D_train = cat.fit_transform(np.array(labels.D[y_train]).reshape(-1, 1)).toarray()
print(one_hot_D_train.shape)
one_hot_D_val = cat.fit_transform(np.array(labels.D[y_val]).reshape(-1, 1)).toarray()
ConvMod_D.compile( optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
D_fit = ConvMod_D.fit(X_train,one_hot_D_train, epochs=25, batch_size=32, validation_data= (X_val, one_hot_D_val))
ConvMod_D.save('models/plain_D.h5')
np.save('plain_D_training.npy', D_fit.history)
# Defining the model for Y
inputs = Input(shape = (84,150, 3))
y = Conv2D(32, 5, activation='relu')(inputs)
y = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)
y = Conv2D(64, 5, activation='relu')(y)
y = MaxPool2D(pool_size=(2,2), strides=(2,2))(y)
x = keras.layers.Flatten()(y)
x = Dense(128, activation= 'relu')(x)
outputs = Dense(11, activation='softmax')(x)
ConvMod_Y = Model(inputs, outputs)
ConvMod_Y.summary()
ConvMod_Y.compile( optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
Y_fit = ConvMod_Y.fit(X_train, labels.Y[y_train], epochs=25, batch_size=32, validation_data= (X_val, labels.Y[y_val]))
ConvMod_Y.save('models/plain_Y.h5')
np.save('plain_Y_training.npy', Y_fit.history) | [
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Conv2D",
"keras.layers.Flatten",
"sklearn.preprocessing.OneHotEncoder",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Model",
"numpy.save",
"tensorflow.keras.layers.MaxPool2D"
] | [((623, 648), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(84, 150, 3)'}), '(shape=(84, 150, 3))\n', (628, 648), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((960, 982), 'tensorflow.keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (965, 982), False, 'from tensorflow.keras.models import Model\n'), ((1294, 1342), 'numpy.save', 'np.save', (['"""plain_CC_training.npy"""', 'CC_fit.history'], {}), "('plain_CC_training.npy', CC_fit.history)\n", (1301, 1342), True, 'import numpy as np\n'), ((1380, 1405), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(84, 150, 3)'}), '(shape=(84, 150, 3))\n', (1385, 1405), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1716, 1738), 'tensorflow.keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (1721, 1738), False, 'from tensorflow.keras.models import Model\n'), ((1823, 1838), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1836, 1838), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2327, 2373), 'numpy.save', 'np.save', (['"""plain_D_training.npy"""', 'D_fit.history'], {}), "('plain_D_training.npy', D_fit.history)\n", (2334, 2373), True, 'import numpy as np\n'), ((2411, 2436), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(84, 150, 3)'}), '(shape=(84, 150, 3))\n', (2416, 2436), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2748, 2770), 'tensorflow.keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2753, 2770), False, 'from tensorflow.keras.models import Model\n'), ((3086, 3132), 'numpy.save', 'np.save', (['"""plain_Y_training.npy"""', 'Y_fit.history'], {}), "('plain_Y_training.npy', Y_fit.history)\n", (3093, 3132), True, 'import numpy as np\n'), ((654, 686), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(5)'], {'activation': '"""relu"""'}), "(32, 5, activation='relu')\n", (660, 686), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((699, 742), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (708, 742), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((748, 780), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(5)'], {'activation': '"""relu"""'}), "(64, 5, activation='relu')\n", (754, 780), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((788, 831), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (797, 831), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((838, 860), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (858, 860), False, 'import keras\n'), ((868, 897), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (873, 897), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((912, 942), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (917, 942), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1411, 1443), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(5)'], {'activation': '"""relu"""'}), "(32, 5, activation='relu')\n", (1417, 1443), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1456, 1499), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (1465, 1499), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1505, 1537), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(5)'], {'activation': '"""relu"""'}), "(64, 5, activation='relu')\n", (1511, 1537), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1545, 1588), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (1554, 1588), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1595, 1617), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1615, 1617), False, 'import keras\n'), ((1625, 1654), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1630, 1654), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1669, 1699), 'tensorflow.keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""softmax"""'}), "(5, activation='softmax')\n", (1674, 1699), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2442, 2474), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(5)'], {'activation': '"""relu"""'}), "(32, 5, activation='relu')\n", (2448, 2474), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2487, 2530), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (2496, 2530), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2536, 2568), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(5)'], {'activation': '"""relu"""'}), "(64, 5, activation='relu')\n", (2542, 2568), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2576, 2619), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (2585, 2619), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2626, 2648), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (2646, 2648), False, 'import keras\n'), ((2656, 2685), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2661, 2685), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((2700, 2731), 'tensorflow.keras.layers.Dense', 'Dense', (['(11)'], {'activation': '"""softmax"""'}), "(11, activation='softmax')\n", (2705, 2731), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPool2D, GlobalAveragePooling2D, Concatenate, BatchNormalization\n'), ((1875, 1902), 'numpy.array', 'np.array', (['labels.D[y_train]'], {}), '(labels.D[y_train])\n', (1883, 1902), True, 'import numpy as np\n'), ((1992, 2017), 'numpy.array', 'np.array', (['labels.D[y_val]'], {}), '(labels.D[y_val])\n', (2000, 2017), True, 'import numpy as np\n')] |
#K-means Clustering
#%reset -f
#Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import DBSCAN
from pyclustertend import hopkins
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.metrics import silhouette_score
#Importing Dataset
df = pd.read_csv('appdata10.csv')
df_user = pd.DataFrame(np.arange(0,len(df)), columns=['user'])
df = pd.concat([df_user, df], axis=1)
df.info()
df.head()
df.tail()
df.columns.values
#Converting columns to Datatime
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
time_new = df['Timestamp'].iloc[0]
df['Hour'] = df['Timestamp'].apply(lambda time_new: time_new.hour)
df['Month'] = df['Timestamp'].apply(lambda time_new: time_new.month)
df['Day'] = df['Timestamp'].apply(lambda time_new: time_new.dayofweek)
df["hour"] = df.hour.str.slice(1, 3).astype(int)
#Data analysis
statistical = df.describe()
#Verifying null values
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
df.isna().any()
df.isna().sum()
#Define X
features = ['tipo_de_negociacao','percentual_venda', 'quantas_correcoes',
'quantos_pontos_avancou', 'quantos_pontos_retornados', 'amplitude']
X = df[features]
#Taking care of missing data
'''
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3] )
'''
#Encoding categorical data
'''
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelenconder_x = LabelEncoder()
X.iloc[:, 1] = labelenconder_x.fit_transform(X.iloc[:, 1])
onehotencoder_x = OneHotEncoder(categorical_features=[1])
X2 = pd.DataFrame(onehotencoder_x.fit_transform(X).toarray())
y = pd.DataFrame(labelenconder_x.fit_transform(y))
#Dummies Trap
X2 = X2.iloc[:, 1:]
X2 = X2.iloc[:,[0,1,2]]
X2 = X2.rename(columns={1:'pay_schedule_1', 2:'pay_schedule_2', 3:'pay_schedule_3'})
X = pd.concat([X,X2], axis=1)
X = X.drop(['pay_schedule'], axis=1)
'''
#Visualizing data
sns.pairplot(data=df, hue='target_names', vars= ['mean radius', 'mean texture', 'mean area', 'mean perimeter', 'mean smoothness'])
sns.countplot(x='target_names', data=df, label='Count')
sns.scatterplot(x='mean area', y='mean smoothness',hue='target_names', data=df)
plt.figure(figsize=(20,10))
sns.heatmap(data=df.corr(), annot=True, cmap='viridis')
# Hopkins Test
'''
the null hypothesis (no meaningfull cluster) happens when the hopkins test is
around 0.5 and the hopkins test tends to 0 when meaningful cluster exists in
the space. Usually, we can believe in the existence of clusters when the
hopkins score is bellow 0.25.
Here the value of the hopkins test is quite high but one could think there is
cluster in our subspace. BUT the hopkins test is highly influenced by outliers,
let's try once again with normalised data.
'''
hopkins(X, X.shape[0])
# Construção do modelo DBSCAN
dbscan = DBSCAN(eps = 0.2, min_samples = 5, metric = 'euclidean')
y_pred = dbscan.fit_predict(X)
# Construção do modelo mean shift
# bandwidth = Comprimento da Interação entre os exemplos, também conhecido como a largura de banda do algoritmo.
bandwidth = estimate_bandwidth(X, quantile = .1, n_samples = 500)
mean_shift = MeanShift(bandwidth = bandwidth, bin_seeding = True)
mean_shift.fit(X)
#Using the Elbow Method to find the optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1,11):
kmeans = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0) #n_init e max_iter são padrões.
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of Clusters')
plt.ylabel('WCSS')
plt.show() #Quando parar de cair exageradamente no gráfico, este será o número de cluster. Neste caso serão 5 cluesters
#Applying the K-means to Dataset
kmeans = KMeans(n_clusters=5, init='k-means++', n_init=10, max_iter=300, random_state=0)
kmeans.fit(X)
print(90*'_')
print("\nCount of features in each cluster")
print(90*'_')
pd.value_counts(kmeans.labels_, sort=False)
# Silhouette Score
labels = modelo_v1.labels_
silhouette_score(pca, labels, metric = 'euclidean')
# Function that creates a DataFrame with a column for Cluster Number
def pd_centers(featuresUsed, centers):
colNames = list(featuresUsed)
colNames.append('prediction')
# Zip with a column called 'prediction' (index)
Z = [np.append(A, index) for index, A in enumerate(centers)]
# Convert to pandas data frame for plotting
P = pd.DataFrame(Z, columns=colNames)
P['prediction'] = P['prediction'].astype(int)
return P
# Function that creates Parallel Plots
from itertools import cycle, islice
from pandas.plotting import parallel_coordinates
def parallel_plot(data):
my_colors = list(islice(cycle(['b', 'r', 'g', 'y', 'k']), None, len(data)))
plt.figure(figsize=(15,8)).gca().axes.set_ylim([-3,+3])
parallel_coordinates(data, 'prediction', color = my_colors, marker='o')
P = pd_centers(featuresUsed=features, centers=kmeans.cluster_centers_)
P
parallel_plot(P)
y_kmeans = kmeans.fit_predict(X)
#Visualising the clusters
plt.scatter(np.array(X)[y_kmeans == 0, 0], np.array(X)[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Careful')
plt.scatter(np.array(X)[y_kmeans == 1, 0], np.array(X)[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Standard')
plt.scatter(np.array(X)[y_kmeans == 2, 0], np.array(X)[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Target')
plt.scatter(np.array(X)[y_kmeans == 3, 0], np.array(X)[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Careless')
plt.scatter(np.array(X)[y_kmeans == 4, 0], np.array(X)[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Sensible')
plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s = 300 , c = 'yellow', label = 'Centroids')
plt.title('Clusters of Clients')
plt.xlabel('Annual Income (R$)')
plt.ylabel('Spending Score (1 - 100)')
plt.legend()
plt.show()
| [
"pandas.plotting.parallel_coordinates",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"pandas.value_counts",
"numpy.array",
"seaborn.scatterplot",
"sklearn.cluster.MeanShift",
"seaborn.pairplot",
"sklearn.cluster.DBSCAN",
"pandas.to_datetime",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.sc... | [((342, 370), 'pandas.read_csv', 'pd.read_csv', (['"""appdata10.csv"""'], {}), "('appdata10.csv')\n", (353, 370), True, 'import pandas as pd\n'), ((439, 471), 'pandas.concat', 'pd.concat', (['[df_user, df]'], {'axis': '(1)'}), '([df_user, df], axis=1)\n', (448, 471), True, 'import pandas as pd\n'), ((572, 603), 'pandas.to_datetime', 'pd.to_datetime', (["df['Timestamp']"], {}), "(df['Timestamp'])\n", (586, 603), True, 'import pandas as pd\n'), ((2055, 2188), 'seaborn.pairplot', 'sns.pairplot', ([], {'data': 'df', 'hue': '"""target_names"""', 'vars': "['mean radius', 'mean texture', 'mean area', 'mean perimeter',\n 'mean smoothness']"}), "(data=df, hue='target_names', vars=['mean radius',\n 'mean texture', 'mean area', 'mean perimeter', 'mean smoothness'])\n", (2067, 2188), True, 'import seaborn as sns\n'), ((2186, 2241), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""target_names"""', 'data': 'df', 'label': '"""Count"""'}), "(x='target_names', data=df, label='Count')\n", (2199, 2241), True, 'import seaborn as sns\n'), ((2242, 2327), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""mean area"""', 'y': '"""mean smoothness"""', 'hue': '"""target_names"""', 'data': 'df'}), "(x='mean area', y='mean smoothness', hue='target_names', data=df\n )\n", (2257, 2327), True, 'import seaborn as sns\n'), ((2322, 2350), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2332, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2894, 2916), 'pyclustertend.hopkins', 'hopkins', (['X', 'X.shape[0]'], {}), '(X, X.shape[0])\n', (2901, 2916), False, 'from pyclustertend import hopkins\n'), ((2957, 3007), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.2)', 'min_samples': '(5)', 'metric': '"""euclidean"""'}), "(eps=0.2, min_samples=5, metric='euclidean')\n", (2963, 3007), False, 'from sklearn.cluster import DBSCAN\n'), ((3205, 3255), 'sklearn.cluster.estimate_bandwidth', 'estimate_bandwidth', (['X'], {'quantile': '(0.1)', 'n_samples': '(500)'}), '(X, quantile=0.1, n_samples=500)\n', (3223, 3255), False, 'from sklearn.cluster import MeanShift, estimate_bandwidth\n'), ((3272, 3320), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)'}), '(bandwidth=bandwidth, bin_seeding=True)\n', (3281, 3320), False, 'from sklearn.cluster import MeanShift, estimate_bandwidth\n'), ((3678, 3707), 'matplotlib.pyplot.title', 'plt.title', (['"""The Elbow Method"""'], {}), "('The Elbow Method')\n", (3687, 3707), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Clusters"""'], {}), "('Number of Clusters')\n", (3718, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""WCSS"""'], {}), "('WCSS')\n", (3751, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3768, 3770), True, 'import matplotlib.pyplot as plt\n'), ((3923, 4002), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(5)', 'init': '"""k-means++"""', 'n_init': '(10)', 'max_iter': '(300)', 'random_state': '(0)'}), "(n_clusters=5, init='k-means++', n_init=10, max_iter=300, random_state=0)\n", (3929, 4002), False, 'from sklearn.cluster import KMeans\n'), ((4090, 4133), 'pandas.value_counts', 'pd.value_counts', (['kmeans.labels_'], {'sort': '(False)'}), '(kmeans.labels_, sort=False)\n', (4105, 4133), True, 'import pandas as pd\n'), ((4181, 4230), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['pca', 'labels'], {'metric': '"""euclidean"""'}), "(pca, labels, metric='euclidean')\n", (4197, 4230), False, 'from sklearn.metrics import silhouette_score\n'), ((5743, 5859), 'matplotlib.pyplot.scatter', 'plt.scatter', (['kmeans.cluster_centers_[:, 0]', 'kmeans.cluster_centers_[:, 1]'], {'s': '(300)', 'c': '"""yellow"""', 'label': '"""Centroids"""'}), "(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s\n =300, c='yellow', label='Centroids')\n", (5754, 5859), True, 'import matplotlib.pyplot as plt\n'), ((5860, 5892), 'matplotlib.pyplot.title', 'plt.title', (['"""Clusters of Clients"""'], {}), "('Clusters of Clients')\n", (5869, 5892), True, 'import matplotlib.pyplot as plt\n'), ((5893, 5925), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Annual Income (R$)"""'], {}), "('Annual Income (R$)')\n", (5903, 5925), True, 'import matplotlib.pyplot as plt\n'), ((5926, 5964), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Spending Score (1 - 100)"""'], {}), "('Spending Score (1 - 100)')\n", (5936, 5964), True, 'import matplotlib.pyplot as plt\n'), ((5965, 5977), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5975, 5977), True, 'import matplotlib.pyplot as plt\n'), ((5980, 5990), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5988, 5990), True, 'import matplotlib.pyplot as plt\n'), ((3487, 3566), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'i', 'init': '"""k-means++"""', 'n_init': '(10)', 'max_iter': '(300)', 'random_state': '(0)'}), "(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0)\n", (3493, 3566), False, 'from sklearn.cluster import KMeans\n'), ((4567, 4600), 'pandas.DataFrame', 'pd.DataFrame', (['Z'], {'columns': 'colNames'}), '(Z, columns=colNames)\n', (4579, 4600), True, 'import pandas as pd\n'), ((4943, 5012), 'pandas.plotting.parallel_coordinates', 'parallel_coordinates', (['data', '"""prediction"""'], {'color': 'my_colors', 'marker': '"""o"""'}), "(data, 'prediction', color=my_colors, marker='o')\n", (4963, 5012), False, 'from pandas.plotting import parallel_coordinates\n'), ((4460, 4479), 'numpy.append', 'np.append', (['A', 'index'], {}), '(A, index)\n', (4469, 4479), True, 'import numpy as np\n'), ((5178, 5189), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5186, 5189), True, 'import numpy as np\n'), ((5209, 5220), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5217, 5220), True, 'import numpy as np\n'), ((5291, 5302), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5299, 5302), True, 'import numpy as np\n'), ((5322, 5333), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5330, 5333), True, 'import numpy as np\n'), ((5406, 5417), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5414, 5417), True, 'import numpy as np\n'), ((5437, 5448), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5445, 5448), True, 'import numpy as np\n'), ((5520, 5531), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5528, 5531), True, 'import numpy as np\n'), ((5551, 5562), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5559, 5562), True, 'import numpy as np\n'), ((5635, 5646), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5643, 5646), True, 'import numpy as np\n'), ((5666, 5677), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5674, 5677), True, 'import numpy as np\n'), ((4833, 4865), 'itertools.cycle', 'cycle', (["['b', 'r', 'g', 'y', 'k']"], {}), "(['b', 'r', 'g', 'y', 'k'])\n", (4838, 4865), False, 'from itertools import cycle, islice\n'), ((4886, 4913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (4896, 4913), True, 'import matplotlib.pyplot as plt\n')] |
"""Evaluation function handler for sequential or cascaded."""
import numpy as np
import sys
import torch
import torch.nn.functional as F
class SequentialEvalLoop:
"""Evaluation loop for sequential model."""
def __init__(
self,
num_classes,
keep_logits=False,
keep_embeddings=False,
verbose=False
):
self.num_classes = num_classes
self.keep_logits = keep_logits
self.keep_embeddings = keep_embeddings
self.verbose = verbose
def __call__(self, net, loader, criterion, epoch_i, device):
net.eval()
batch_losses = []
batch_correct = []
batch_logits = []
batch_embeddings = []
ys = []
global embedding
sample_count = 0
# Embedding hook
def embedding_hook_fn(module, x, output): # pylint: disable=unused-argument
global embedding # pylint: disable=global-variable-undefined
embedding = x[0]
_ = net.fc.register_forward_hook(embedding_hook_fn)
for batch_i, (data, targets) in enumerate(loader):
if self.verbose:
sys.stdout.write(f"\rBatch {batch_i+1}/{len(loader)}")
sys.stdout.flush()
# One-hot-ify targets
y = torch.eye(self.num_classes)[targets]
sample_count += y.shape[0]
# Determine device placement
data = data.to(device, non_blocking=True)
# Forward pass
with torch.no_grad():
logits = net(data, t=0).cpu()
if self.keep_logits:
batch_logits.append(logits)
ys.append(targets.cpu())
if self.keep_embeddings:
batch_embeddings.append(embedding.cpu())
# Compute loss
loss = criterion(logits, y)
batch_losses.append(loss.item())
# Predictions
softmax = F.softmax(logits, dim=1)
y_pred = torch.argmax(softmax, dim=1)
# Updates running statistics
n_correct = torch.eq(targets, y_pred).sum().item()
batch_correct.append(n_correct)
batch_accs = np.sum(batch_correct) / float(sample_count)
logged_data = {}
if self.keep_logits:
logged_data["logits"] = torch.cat(batch_logits)
logged_data["y"] = torch.cat(ys)
if self.keep_embeddings:
logged_data["embeddings"] = torch.cat(batch_embeddings)
return batch_losses, batch_accs, logged_data
class CascadedEvalLoop(object):
"""Evaluation loop for cascaded model."""
def __init__(self, n_timesteps, num_classes,
keep_logits=False, keep_embeddings=False, verbose=False):
self.n_timesteps = n_timesteps
self.num_classes = num_classes
self.keep_logits = keep_logits
self.keep_embeddings = keep_embeddings
self.verbose = verbose
def __call__(self, net, loader, criterion, epoch_i, device):
net.eval()
batch_logits = []
batch_embeddings = []
ys = []
global embedding
# Embedding hook
def embedding_hook_fn(module, x, output): # pylint: disable=unused-argument
global embedding # pylint: disable=global-variable-undefined
embedding = x[0]
if net._multiple_fcs:
for i, fc in enumerate(net.fcs):
fc.register_forward_hook(embedding_hook_fn)
else:
net.fc.register_forward_hook(embedding_hook_fn)
batch_losses = []
batch_correct = []
sample_count = 0
for batch_i, (x, targets) in enumerate(loader):
if self.verbose:
sys.stdout.write(f"\rBatch {batch_i+1}/{len(loader)}")
sys.stdout.flush()
# One-hot-ify targets
y = torch.eye(self.num_classes)[targets]
sample_count += y.shape[0]
if self.keep_logits:
ys.append(targets)
# Determine device placement
x = x.to(device, non_blocking=True)
timestep_correct = []
timestep_losses = torch.zeros(self.n_timesteps)
timestep_logits = []
timestep_embeddings = []
for t in range(self.n_timesteps):
# Forward pass
with torch.no_grad():
logits_t = net(x, t).cpu()
if self.keep_logits:
timestep_logits.append(logits_t)
if self.keep_embeddings:
global embedding
timestep_embeddings.append(embedding)
# Compute loss
loss_i = criterion(logits_t, y)
# Log loss
timestep_losses[t] = loss_i.item()
# Predictions
softmax_t = F.softmax(logits_t, dim=1)
y_pred = torch.argmax(softmax_t, dim=1)
# Updates running accuracy statistics
n_correct = torch.eq(targets, y_pred).sum()
timestep_correct.append(n_correct)
# Update batch loss and compute average
batch_losses.append(timestep_losses)
batch_correct.append(torch.stack(timestep_correct))
if self.keep_logits:
# stack into shape=(time, batch, n_classes)
timestep_logits = torch.stack(timestep_logits)
batch_logits.append(timestep_logits)
if self.keep_embeddings:
timestep_embeddings = torch.stack(timestep_embeddings)
batch_embeddings.append(timestep_embeddings)
# Average over the batches per timestep
batch_losses = torch.stack(batch_losses).detach().numpy()
batch_correct = torch.stack(batch_correct).sum(dim=0)
batch_accs = batch_correct.cpu().detach().numpy() / float(sample_count)
# Compute loss and accuracy
logged_data = {}
if self.keep_logits:
# concat over batch dim into shape=(time, batch, n_classes)
batch_logits = torch.cat(batch_logits, dim=1)
ys = torch.cat(ys)
logged_data["logits"] = batch_logits
logged_data["y"] = ys
if self.keep_embeddings:
# concat over batch dim into shape=(time, batch, n_features, spatial_dim)
batch_embeddings = torch.cat(batch_embeddings, dim=1)
logged_data["embeddings"] = batch_embeddings
return batch_losses, batch_accs, logged_data
def get_eval_loop(n_timesteps, num_classes, cascaded, flags,
keep_logits=False, keep_embeddings=False,
verbose=False, tau_handler=None):
"""Retrieve sequential or cascaded eval function."""
if flags.train_mode == "baseline":
eval_fxn = SequentialEvalLoop(
num_classes,
keep_logits,
keep_embeddings,
verbose
)
elif flags.train_mode == "cascaded":
eval_fxn = CascadedEvalLoop(
n_timesteps,
num_classes,
keep_logits,
keep_embeddings,
verbose
)
elif flags.train_mode == "cascaded_seq":
eval_fxn = CascadedEvalLoop(
n_timesteps,
num_classes,
keep_logits,
keep_embeddings,
verbose
)
return eval_fxn
| [
"torch.nn.functional.softmax",
"torch.eye",
"torch.stack",
"torch.eq",
"numpy.sum",
"torch.no_grad",
"sys.stdout.flush",
"torch.zeros",
"torch.cat",
"torch.argmax"
] | [((1739, 1763), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1748, 1763), True, 'import torch.nn.functional as F\n'), ((1779, 1807), 'torch.argmax', 'torch.argmax', (['softmax'], {'dim': '(1)'}), '(softmax, dim=1)\n', (1791, 1807), False, 'import torch\n'), ((1956, 1977), 'numpy.sum', 'np.sum', (['batch_correct'], {}), '(batch_correct)\n', (1962, 1977), True, 'import numpy as np\n'), ((2081, 2104), 'torch.cat', 'torch.cat', (['batch_logits'], {}), '(batch_logits)\n', (2090, 2104), False, 'import torch\n'), ((2130, 2143), 'torch.cat', 'torch.cat', (['ys'], {}), '(ys)\n', (2139, 2143), False, 'import torch\n'), ((2218, 2245), 'torch.cat', 'torch.cat', (['batch_embeddings'], {}), '(batch_embeddings)\n', (2227, 2245), False, 'import torch\n'), ((3762, 3791), 'torch.zeros', 'torch.zeros', (['self.n_timesteps'], {}), '(self.n_timesteps)\n', (3773, 3791), False, 'import torch\n'), ((5456, 5486), 'torch.cat', 'torch.cat', (['batch_logits'], {'dim': '(1)'}), '(batch_logits, dim=1)\n', (5465, 5486), False, 'import torch\n'), ((5498, 5511), 'torch.cat', 'torch.cat', (['ys'], {}), '(ys)\n', (5507, 5511), False, 'import torch\n'), ((5722, 5756), 'torch.cat', 'torch.cat', (['batch_embeddings'], {'dim': '(1)'}), '(batch_embeddings, dim=1)\n', (5731, 5756), False, 'import torch\n'), ((1111, 1129), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1127, 1129), False, 'import sys\n'), ((1177, 1204), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (1186, 1204), False, 'import torch\n'), ((1364, 1379), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1377, 1379), False, 'import torch\n'), ((3449, 3467), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3465, 3467), False, 'import sys\n'), ((3506, 3533), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (3515, 3533), False, 'import torch\n'), ((4350, 4376), 'torch.nn.functional.softmax', 'F.softmax', (['logits_t'], {'dim': '(1)'}), '(logits_t, dim=1)\n', (4359, 4376), True, 'import torch.nn.functional as F\n'), ((4394, 4424), 'torch.argmax', 'torch.argmax', (['softmax_t'], {'dim': '(1)'}), '(softmax_t, dim=1)\n', (4406, 4424), False, 'import torch\n'), ((4684, 4713), 'torch.stack', 'torch.stack', (['timestep_correct'], {}), '(timestep_correct)\n', (4695, 4713), False, 'import torch\n'), ((4821, 4849), 'torch.stack', 'torch.stack', (['timestep_logits'], {}), '(timestep_logits)\n', (4832, 4849), False, 'import torch\n'), ((4963, 4995), 'torch.stack', 'torch.stack', (['timestep_embeddings'], {}), '(timestep_embeddings)\n', (4974, 4995), False, 'import torch\n'), ((5176, 5202), 'torch.stack', 'torch.stack', (['batch_correct'], {}), '(batch_correct)\n', (5187, 5202), False, 'import torch\n'), ((3926, 3941), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3939, 3941), False, 'import torch\n'), ((4492, 4517), 'torch.eq', 'torch.eq', (['targets', 'y_pred'], {}), '(targets, y_pred)\n', (4500, 4517), False, 'import torch\n'), ((5113, 5138), 'torch.stack', 'torch.stack', (['batch_losses'], {}), '(batch_losses)\n', (5124, 5138), False, 'import torch\n'), ((1862, 1887), 'torch.eq', 'torch.eq', (['targets', 'y_pred'], {}), '(targets, y_pred)\n', (1870, 1887), False, 'import torch\n')] |
import sys
import numpy as np
n, q, *lr = map(int, sys.stdin.read().split())
l, r = np.array(lr).reshape(q, 2).T
l -= 1
r -= 1
def main():
res = np.zeros(n + 1, dtype=np.int32)
np.add.at(res, l, 1)
np.subtract.at(res, r + 1, 1)
np.cumsum(res, out=res)
print("".join((res[:-1] & 1).astype(str)))
if __name__ == "__main__":
main()
| [
"numpy.subtract.at",
"numpy.array",
"numpy.zeros",
"numpy.add.at",
"numpy.cumsum",
"sys.stdin.read"
] | [((164, 195), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {'dtype': 'np.int32'}), '(n + 1, dtype=np.int32)\n', (172, 195), True, 'import numpy as np\n'), ((201, 221), 'numpy.add.at', 'np.add.at', (['res', 'l', '(1)'], {}), '(res, l, 1)\n', (210, 221), True, 'import numpy as np\n'), ((227, 256), 'numpy.subtract.at', 'np.subtract.at', (['res', '(r + 1)', '(1)'], {}), '(res, r + 1, 1)\n', (241, 256), True, 'import numpy as np\n'), ((262, 285), 'numpy.cumsum', 'np.cumsum', (['res'], {'out': 'res'}), '(res, out=res)\n', (271, 285), True, 'import numpy as np\n'), ((57, 73), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (71, 73), False, 'import sys\n'), ((91, 103), 'numpy.array', 'np.array', (['lr'], {}), '(lr)\n', (99, 103), True, 'import numpy as np\n')] |
import sys
import time
import yaml
import math
import signal
import datetime
import threading
import traceback
import numpy as np
from cvxopt import matrix, solvers
#from scipy.spatial import ConvexHull
import matplotlib.patches as ptc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# from actions import *
COLORS = [(0.0, 0.0, 0.0), (0.99, 0.0, 0.0), (0.0, 0.99, 0.0), (0.0, 0.0, 0.99), (0.99, 0.99, 0.0), (0.99, 0.0, 0.99), (0.0, 0.99, 0.99)]
global_boundary = []
xlim = []
ylim = []
test_type = 0
world = None
def is_in_space(p, tol):
global xlim, ylim
return xlim[0] - tol <= p[0] <= xlim[1] + tol and ylim[0] - tol <= p[1] <= ylim[1] + tol
def is_in_bounding_polygon(p, tol):
global global_boundary
pass
def angle_in_2pi(v):
angle = np.arctan2(v[1], v[0])
#if angle <= 0:
# angle += 2 * np.pi
return angle
def to_grid(x, y, x_off, y_off):
return (x - x_off, y - y_off)
#def get_convex_hull(V):
# hull = ConvexHull(V)
# return [V[vertex] for vertex in hull.vertices]
def appendGlobalBoundaries(B):
bottom_left = globals()['global_boundary'][0]
top_right = globals()['global_boundary'][3]
B.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))
B.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))
B.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))
B.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))
def angularSort(reference, vertices):
vectors = [p - reference for p in vertices]
indexed_angles = [(angle_in_2pi(vectors[i]), i) for i in range(len(vectors))]
#if self.name == "uav1":
# print("------")
# for i in range(len(vectors)):
# print(vectors[i], indexed_angles[i][0])
# print("------")
indexed_angles.sort()
return [vertices[i] for _, i in indexed_angles]
class StateBuffer:
def __init__(self):
self.buffers = dict()
def getState(self, name):
return self.buffers[name]
def getAllStates(self):
return dict(self.buffers)
def updateState(self, name, s):
self.buffers[name] = s
class Agent:
def __init__(self, name, init, goal, vmax):
self.name = name
self.move_thread = threading.Thread(name="{}_move".format(self.name), target=self.move)
self.sim_log = open('LOG_{}.txt'.format(self.name), 'w+')
self.terminate = False
self.phys_radius = 2.0
self.safe_radius = 3.0
self.comm_radius = 10.0
self.dt = 0.1
self.vmax = vmax
self.vmin = 0.5
self.velocity = np.zeros(2)
self.position = np.array(init, dtype=float)
self.voronoi_graph = []
#self.color = tuple(np.random.rand(3))
self.color = globals()['COLORS'][int(self.name[3:])]
self.inter_sort_type = [('angle', float), ('vector', np.ndarray)]
self.world = None
self.world_offset = (globals()['xlim'][0], globals()['ylim'][0])
self.frontier = set()
self._B = np.array([[1., 0.], [0., 1.], [1., 0.], [0., 1.]], dtype=float)
self.neighbours = dict()
# self.path = []
# self.curves = []
self.xhistory = []
self.yhistory = []
self.goal = np.array(goal, dtype=float)
self.goal_change = 10.
self.converged = False
self.H = matrix([[2., 0.], [0., 2.]], tc='d')
# STATE:
self.state = {'pos': self.position, 'vel': self.velocity, 'end': False}
self.advertiseState()
def initialize_world(self):
#global xlim, ylim
#W = xlim[1] - xlim[0]
#H = ylim[1] - ylim[0]
#self.world = np.zeros((H, W))
#grid_node = to_grid(self.position[0], self.position[1], xlim[1], ylim[1])
#v_act = valid_actions(self.world, grid_node)
#for act in v_act:
# applied_coord = apply_action_to_node(grid_node, act)
# pass
pass
def initialize(self):
#print("Initializing agent {}".format(self.name))
#print("Agent {} --> {}".format(self.name, self.goal))
self.move_thread.start()
def setGoal(self, g):
self.goal_change = np.linalg.norm(g - self.goal)
self.converged = self.goal_change <= 0.1
self.goal = np.array(g, dtype=float)
def hasReachedGoal(self):
return np.linalg.norm(self.goal - self.state['pos']) <= 0.1 and self.converged
def getCentroid(self):
### SOURCE: https://en.wikipedia.org/wiki/Centroid
# Calculate area with Shoelace Formula
area = 0
for i in range(len(self.voronoi_graph) - 1):
x_i, y_i = self.voronoi_graph[i]
x_j, y_j = self.voronoi_graph[i + 1]
area += x_i * y_j - x_j * y_i
area *= 0.5
# Calculate centroid of voronoi cell
Cx, Cy = 0, 0
for i in range(len(self.voronoi_graph) - 1):
x_i, y_i = self.voronoi_graph[i]
x_j, y_j = self.voronoi_graph[i + 1]
product = (x_i * y_j - x_j * y_i)
Cx += (x_i + x_j) * product
Cy += (y_i + y_j) * product
return np.array([Cx, Cy], dtype=float) / (6. * area)
def computeBisectors(self):
bisectors = [] # (normal, point)
cons, vals = [], []
tol = 0.1
for a, st in self.neighbours.items():
if st is None:
continue
if np.any(np.isnan(st['pos'])):
print(f'Agent {self.name} neighbour {a} has NaN!')
normal = (st['pos'] - self.state['pos']).round(4)
m = ((st['pos'] + self.state['pos']) * 0.5).round(4)
bisectors.append((normal, m))
cons.append(normal)
#vals.append(m.dot(normal) - self.safe_radius)
vals.append((m.dot(normal)).round(4))
# bottom_left = globals()['global_boundary'][0]
# top_right = globals()['global_boundary'][3]
# bisectors.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))
# bisectors.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))
# bisectors.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))
# bisectors.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))
appendGlobalBoundaries(bisectors)
A = np.array(cons, dtype=float)
b = np.array(vals, dtype=float)
self.voronoi_graph = []
for i in range(len(bisectors)):
n_i, m_i = bisectors[i]
d_i = m_i.dot(n_i)
for j in range(i + 1, len(bisectors)):
n_j, m_j = bisectors[j]
d_j = m_j.dot(n_j)
try:
A_ = np.array([n_i.round(4), n_j.round(4)], dtype=float)
b_ = np.array([d_i.round(4), d_j.round(4)], dtype=float)
p = (np.linalg.solve(A_, b_)).round(4)
except np.linalg.LinAlgError:
continue
except:
print(traceback.format_exc())
continue
if is_in_space(p, tol) and np.all(A.dot(p) <= b + 0.1):
self.voronoi_graph.append(p)
A_iq = matrix(np.array(cons), tc='d')
b_iq = matrix(np.array(vals), tc='d')
self.voronoi_graph = angularSort(self.position, self.voronoi_graph)
#self.voronoi_graph = get_convex_hull(self.voronoi_graph)
return A_iq, b_iq
def solveStep(self, A_iq, b_iq, _t=0):
v_next = self.state['vel']
if _t == 0:
## Buffered Voronoi Cell
if A_iq and b_iq:
solvers.options['show_progress'] = False
sol = solvers.qp(self.H, matrix(-2. * self.goal, tc='d'), A_iq, b_iq)
#print("Agent {} SOLN: {}".format(self.name, sol['x']))
v_next = (np.array(sol['x'][0]) - self.state['pos']) / self.dt
_norm = np.linalg.norm(v_next)
if _norm > self.vmax:
v_next = self.vmax * v_next / _norm
return v_next
elif _t == 1:
## <NAME>
if len(self.voronoi_graph):
self.voronoi_graph.append(self.voronoi_graph[0])
self.setGoal(self.getCentroid())
v_next = self.goal - self.state['pos']
_norm = np.linalg.norm(v_next)
if _norm > self.vmax:
v_next *= self.vmax / np.linalg.norm(v_next)
return v_next
print(f'Agent {self.name} stopped momentarily.')
return np.zeros(2)
def doStep(self, v_next):
x_, y_ = self.state['pos'][0], self.state['pos'][1]
self.xhistory.append(x_)
self.yhistory.append(y_)
self.state['pos'] = self.state['pos'] + self.dt * v_next
self.state['vel'] = v_next
def stepLog(self, _t=0):
if _t == 0:
self.sim_log.write('{} - pos: {} - vel: {} - at: {}\n'.format(self.name, self.position, self.velocity, datetime.datetime.now()))
elif _t == 1:
# Agent name; current position; next goal
#self.sim_log.write('{};{};{}\n'.format(self.name, self.position, self.goal))
#self.sim_log.write(f'{self.name};{self.voronoi_graph.dfs_traversal()}\n')
#self.sim_log.write(f'{self.name};{self.voronoi_graph}\n')
pass
def updateNeighbours(self):
for uav, st in globals()['buf'].buffers.items():
if uav == self.name or st is None:
continue
self.neighbours[uav] = dict(st)
def advertiseState(self):
globals()['buf'].updateState(self.name, self.state)
def stop(self):
self.terminate = True
def move(self):
test = globals()['test_type']
pre_flight_count = 20
#while not self.terminate and not self.hasReachedGoal():
while not self.terminate:
_start = time.time()
self.advertiseState()
self.updateNeighbours()
if pre_flight_count < 1:
A, b = self.computeBisectors()
v_next = self.solveStep(A, b, test)
self.doStep(v_next)
self.stepLog(test)
else:
pre_flight_count -= 1
_elapsed = time.time() - _start
fail_hard = _elapsed >= self.dt
if fail_hard:
#print('Agent {} failed hard real-time constraint at {}'.format(self.name, datetime.datetime.now()))
pass
else:
time.sleep(self.dt - _elapsed)
self.state['end'] = True
if self.hasReachedGoal():
print("Agent {} has reached goal at {}".format(self.name, datetime.datetime.now()))
self.sim_log.close()
class Simulator:
def __init__(self, pfile):
self.xlim = [-20, 80]
self.ylim = [-20, 80]
self.count = 0
self.agents = dict()
self.vmax = 0
self.iteration = 0
self.loadParams(pfile)
#self.logfile = open('SimulatorLog.txt', 'w+')
self.terminate = False
self.distance_thread = threading.Thread(name='distance_thread', target=self.checkCollision)
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
#self.fig, self.axs = plt.subplots(2)
self.ani = None
def loadParams(self, pfile):
params = None
with open(pfile) as P:
params = yaml.load(P, Loader=yaml.FullLoader)
self.xlim = np.array(params['xlim'], dtype=float)
self.ylim = np.array(params['ylim'], dtype=float)
self.count = params['count']
self.vmax = params['vmax']
globals()['test_type'] = params['test_type']
globals()['world'] = np.zeros((int(self.ylim[1] - self.ylim[0]), int(self.xlim[1] - self.xlim[0])), dtype=int)
globals()['xlim'] = np.array(self.xlim, dtype=float)
globals()['ylim'] = np.array(self.ylim, dtype=float)
#globals()['global_boundary'] = np.array([[i, j] for i in self.xlim for j in self.ylim], dtype=float)
globals()['global_boundary'] = np.array([vertex for vertex in params['bounding_polygon']], dtype=float)
#sorted_boundary = angularSort(np.mean(globals()['global_boundary'], axis=0), globals()['global_boundary'])
self.bounding_poly_plt = ptc.Polygon(angularSort(np.mean(globals()['global_boundary'], axis=0), globals()['global_boundary']),
color=(0, 0, 0), fill=False)
for entry in params['uav']:
self.agents[entry[0]] = Agent(entry[0], entry[1], entry[2], self.vmax)
def isDone(self):
return all([a.state['end'] for _, a in self.agents.items()])
def checkCollision(self):
if not self.agents:
return
try:
while not self.terminate:
ax, ay = list(zip(*[tuple(a.state['pos']) for _, a in self.agents.items()]))
X = np.array(ax, dtype=float)
Y = np.array(ay, dtype=float)
XX1, XX2 = np.meshgrid(X, X)
YY1, YY2 = np.meshgrid(Y, Y)
pairwise_dists = np.sqrt((XX2 - XX1) ** 2 + (YY2 - YY1) ** 2)
R, C = pairwise_dists.shape
for i in range(R):
for j in range(C):
if j < i and pairwise_dists[i, j] <= 2.0:
print('COLLISION between agents uav{} and uav{} at {}'.format(i, j, datetime.datetime.now()))
time.sleep(1)
except Exception:
print(traceback.format_exc())
def animate_motion(self, i):
self.ax.clear()
self.ax.set_xlim(self.xlim[0] - 5, self.xlim[1] + 5)
self.ax.set_ylim(self.ylim[0] - 5, self.ylim[1] + 5)
self.iteration += 1
for _, a in self.agents.items():
pos = a.state['pos']
vel = a.state['vel']
angle = np.arctan2(vel[1], vel[0])
circle = plt.Circle(tuple(pos), 2., color=a.color)
self.ax.quiver(pos[0], pos[1], np.cos(angle), np.sin(angle), color=a.color)
self.ax.add_artist(circle)
self.ax.plot(a.xhistory, a.yhistory, color=a.color)
self.ax.add_patch(self.bounding_poly_plt)
polygon = a.voronoi_graph
if len(polygon) < 3:
continue
poly = plt.Polygon(polygon, alpha=0.4, color=a.color)
self.ax.add_patch(poly)
def stop(self):
self.terminate = True
def run(self):
print("Run starts at {}".format(datetime.datetime.now()))
for _, a in self.agents.items():
a.initialize()
self.ani = animation.FuncAnimation(self.fig, self.animate_motion, interval=100)
#self.ani = animation.FuncAnimation(self.fig, self.animate_motion, frames=3000, interval=100)
#self.ani.save(f'lloyd_{self.count}_uav.mp4', writer='ffmpeg', fps=30)
self.distance_thread.start()
plt.show()
while not self.terminate and not self.isDone():
time.sleep(1)
for _, a in self.agents.items():
a.stop()
self.distance_thread.join()
#self.logfile.close()
print("Run done at {}".format(datetime.datetime.now()))
def ctrl_c_handler(signum, frame):
globals()['sim'].stop()
print('Closing...')
if __name__ == '__main__':
buf = StateBuffer()
sim = Simulator(sys.argv[1])
signal.signal(signal.SIGINT, ctrl_c_handler)
sim.run() | [
"numpy.sqrt",
"matplotlib.pyplot.Polygon",
"yaml.load",
"time.sleep",
"numpy.array",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.sin",
"cvxopt.matrix",
"numpy.meshgrid",
"numpy.isnan",
"numpy.cos",
"time.time",
"matplotlib.pyplot.show",
"traceback.format_exc",
"signal.signal",
"nump... | [((824, 846), 'numpy.arctan2', 'np.arctan2', (['v[1]', 'v[0]'], {}), '(v[1], v[0])\n', (834, 846), True, 'import numpy as np\n'), ((16127, 16171), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'ctrl_c_handler'], {}), '(signal.SIGINT, ctrl_c_handler)\n', (16140, 16171), False, 'import signal\n'), ((2765, 2776), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2773, 2776), True, 'import numpy as np\n'), ((2802, 2829), 'numpy.array', 'np.array', (['init'], {'dtype': 'float'}), '(init, dtype=float)\n', (2810, 2829), True, 'import numpy as np\n'), ((3201, 3272), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]], dtype=float)\n', (3209, 3272), True, 'import numpy as np\n'), ((3432, 3459), 'numpy.array', 'np.array', (['goal'], {'dtype': 'float'}), '(goal, dtype=float)\n', (3440, 3459), True, 'import numpy as np\n'), ((3542, 3582), 'cvxopt.matrix', 'matrix', (['[[2.0, 0.0], [0.0, 2.0]]'], {'tc': '"""d"""'}), "([[2.0, 0.0], [0.0, 2.0]], tc='d')\n", (3548, 3582), False, 'from cvxopt import matrix, solvers\n'), ((4392, 4421), 'numpy.linalg.norm', 'np.linalg.norm', (['(g - self.goal)'], {}), '(g - self.goal)\n', (4406, 4421), True, 'import numpy as np\n'), ((4493, 4517), 'numpy.array', 'np.array', (['g'], {'dtype': 'float'}), '(g, dtype=float)\n', (4501, 4517), True, 'import numpy as np\n'), ((6654, 6681), 'numpy.array', 'np.array', (['cons'], {'dtype': 'float'}), '(cons, dtype=float)\n', (6662, 6681), True, 'import numpy as np\n'), ((6695, 6722), 'numpy.array', 'np.array', (['vals'], {'dtype': 'float'}), '(vals, dtype=float)\n', (6703, 6722), True, 'import numpy as np\n'), ((11661, 11729), 'threading.Thread', 'threading.Thread', ([], {'name': '"""distance_thread"""', 'target': 'self.checkCollision'}), "(name='distance_thread', target=self.checkCollision)\n", (11677, 11729), False, 'import threading\n'), ((11752, 11764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11762, 11764), True, 'import matplotlib.pyplot as plt\n'), ((12059, 12096), 'numpy.array', 'np.array', (["params['xlim']"], {'dtype': 'float'}), "(params['xlim'], dtype=float)\n", (12067, 12096), True, 'import numpy as np\n'), ((12118, 12155), 'numpy.array', 'np.array', (["params['ylim']"], {'dtype': 'float'}), "(params['ylim'], dtype=float)\n", (12126, 12155), True, 'import numpy as np\n'), ((12433, 12465), 'numpy.array', 'np.array', (['self.xlim'], {'dtype': 'float'}), '(self.xlim, dtype=float)\n', (12441, 12465), True, 'import numpy as np\n'), ((12495, 12527), 'numpy.array', 'np.array', (['self.ylim'], {'dtype': 'float'}), '(self.ylim, dtype=float)\n', (12503, 12527), True, 'import numpy as np\n'), ((12679, 12751), 'numpy.array', 'np.array', (["[vertex for vertex in params['bounding_polygon']]"], {'dtype': 'float'}), "([vertex for vertex in params['bounding_polygon']], dtype=float)\n", (12687, 12751), True, 'import numpy as np\n'), ((15344, 15412), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.animate_motion'], {'interval': '(100)'}), '(self.fig, self.animate_motion, interval=100)\n', (15367, 15412), True, 'import matplotlib.animation as animation\n'), ((15643, 15653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15651, 15653), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1276), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {'dtype': 'float'}), '([1.0, 0.0], dtype=float)\n', (1251, 1276), True, 'import numpy as np\n'), ((1276, 1310), 'numpy.array', 'np.array', (['bottom_left'], {'dtype': 'float'}), '(bottom_left, dtype=float)\n', (1284, 1310), True, 'import numpy as np\n'), ((1328, 1361), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {'dtype': 'float'}), '([0.0, 1.0], dtype=float)\n', (1336, 1361), True, 'import numpy as np\n'), ((1361, 1395), 'numpy.array', 'np.array', (['bottom_left'], {'dtype': 'float'}), '(bottom_left, dtype=float)\n', (1369, 1395), True, 'import numpy as np\n'), ((1413, 1446), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {'dtype': 'float'}), '([1.0, 0.0], dtype=float)\n', (1421, 1446), True, 'import numpy as np\n'), ((1446, 1478), 'numpy.array', 'np.array', (['top_right'], {'dtype': 'float'}), '(top_right, dtype=float)\n', (1454, 1478), True, 'import numpy as np\n'), ((1496, 1529), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {'dtype': 'float'}), '([0.0, 1.0], dtype=float)\n', (1504, 1529), True, 'import numpy as np\n'), ((1529, 1561), 'numpy.array', 'np.array', (['top_right'], {'dtype': 'float'}), '(top_right, dtype=float)\n', (1537, 1561), True, 'import numpy as np\n'), ((5381, 5412), 'numpy.array', 'np.array', (['[Cx, Cy]'], {'dtype': 'float'}), '([Cx, Cy], dtype=float)\n', (5389, 5412), True, 'import numpy as np\n'), ((7574, 7588), 'numpy.array', 'np.array', (['cons'], {}), '(cons)\n', (7582, 7588), True, 'import numpy as np\n'), ((7621, 7635), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (7629, 7635), True, 'import numpy as np\n'), ((10398, 10409), 'time.time', 'time.time', ([], {}), '()\n', (10407, 10409), False, 'import time\n'), ((11999, 12035), 'yaml.load', 'yaml.load', (['P'], {'Loader': 'yaml.FullLoader'}), '(P, Loader=yaml.FullLoader)\n', (12008, 12035), False, 'import yaml\n'), ((14560, 14586), 'numpy.arctan2', 'np.arctan2', (['vel[1]', 'vel[0]'], {}), '(vel[1], vel[0])\n', (14570, 14586), True, 'import numpy as np\n'), ((15023, 15069), 'matplotlib.pyplot.Polygon', 'plt.Polygon', (['polygon'], {'alpha': '(0.4)', 'color': 'a.color'}), '(polygon, alpha=0.4, color=a.color)\n', (15034, 15069), True, 'import matplotlib.pyplot as plt\n'), ((15726, 15739), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15736, 15739), False, 'import time\n'), ((4567, 4612), 'numpy.linalg.norm', 'np.linalg.norm', (["(self.goal - self.state['pos'])"], {}), "(self.goal - self.state['pos'])\n", (4581, 4612), True, 'import numpy as np\n'), ((5680, 5699), 'numpy.isnan', 'np.isnan', (["st['pos']"], {}), "(st['pos'])\n", (5688, 5699), True, 'import numpy as np\n'), ((8317, 8339), 'numpy.linalg.norm', 'np.linalg.norm', (['v_next'], {}), '(v_next)\n', (8331, 8339), True, 'import numpy as np\n'), ((9000, 9011), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (9008, 9011), True, 'import numpy as np\n'), ((10784, 10795), 'time.time', 'time.time', ([], {}), '()\n', (10793, 10795), False, 'import time\n'), ((11055, 11085), 'time.sleep', 'time.sleep', (['(self.dt - _elapsed)'], {}), '(self.dt - _elapsed)\n', (11065, 11085), False, 'import time\n'), ((13551, 13576), 'numpy.array', 'np.array', (['ax'], {'dtype': 'float'}), '(ax, dtype=float)\n', (13559, 13576), True, 'import numpy as np\n'), ((13598, 13623), 'numpy.array', 'np.array', (['ay'], {'dtype': 'float'}), '(ay, dtype=float)\n', (13606, 13623), True, 'import numpy as np\n'), ((13652, 13669), 'numpy.meshgrid', 'np.meshgrid', (['X', 'X'], {}), '(X, X)\n', (13663, 13669), True, 'import numpy as np\n'), ((13698, 13715), 'numpy.meshgrid', 'np.meshgrid', (['Y', 'Y'], {}), '(Y, Y)\n', (13709, 13715), True, 'import numpy as np\n'), ((13750, 13794), 'numpy.sqrt', 'np.sqrt', (['((XX2 - XX1) ** 2 + (YY2 - YY1) ** 2)'], {}), '((XX2 - XX1) ** 2 + (YY2 - YY1) ** 2)\n', (13757, 13794), True, 'import numpy as np\n'), ((14127, 14140), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14137, 14140), False, 'import time\n'), ((14695, 14708), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (14701, 14708), True, 'import numpy as np\n'), ((14710, 14723), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (14716, 14723), True, 'import numpy as np\n'), ((15224, 15247), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15245, 15247), False, 'import datetime\n'), ((15915, 15938), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15936, 15938), False, 'import datetime\n'), ((8092, 8124), 'cvxopt.matrix', 'matrix', (['(-2.0 * self.goal)'], {'tc': '"""d"""'}), "(-2.0 * self.goal, tc='d')\n", (8098, 8124), False, 'from cvxopt import matrix, solvers\n'), ((8753, 8775), 'numpy.linalg.norm', 'np.linalg.norm', (['v_next'], {}), '(v_next)\n', (8767, 8775), True, 'import numpy as np\n'), ((9445, 9468), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9466, 9468), False, 'import datetime\n'), ((11228, 11251), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11249, 11251), False, 'import datetime\n'), ((14189, 14211), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (14209, 14211), False, 'import traceback\n'), ((8239, 8260), 'numpy.array', 'np.array', (["sol['x'][0]"], {}), "(sol['x'][0])\n", (8247, 8260), True, 'import numpy as np\n'), ((7203, 7226), 'numpy.linalg.solve', 'np.linalg.solve', (['A_', 'b_'], {}), '(A_, b_)\n', (7218, 7226), True, 'import numpy as np\n'), ((7370, 7392), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7390, 7392), False, 'import traceback\n'), ((8860, 8882), 'numpy.linalg.norm', 'np.linalg.norm', (['v_next'], {}), '(v_next)\n', (8874, 8882), True, 'import numpy as np\n'), ((14082, 14105), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14103, 14105), False, 'import datetime\n')] |
# Copyright (c) 2020, <NAME>. All rights reserved.
#
# This work is licensed under the MIT License.
# To view a copy of this license, visit https://opensource.org/licenses/MIT
import numpy as np
class BasicAction(object):
@staticmethod
def normalize_line_length(line_length, line_length_max):
# [0,line_length_max] -> [0,1]
assert line_length >= 0, "line_length should not be negative!"
normalized_line_length = line_length / line_length_max
return normalized_line_length
@staticmethod
def denormalize_line_length(normalized_line_length, line_length_max):
# [0,1] -> [0,line_length_max]
assert 0 <= normalized_line_length <= 1, "normalized_line_length should not be in [0, 1]!"
line_length = normalized_line_length * line_length_max
return line_length
@staticmethod
def normalize_turn_angle(turn_direction, turn_angle):
# L180 -> 180
# R180 -> -180
# [-180,180] -> [0,1]
assert turn_angle <= 180 and turn_angle >= 0, "angle should be in [0, 180]!"
if turn_direction == "L":
normalized_turn_angle = (turn_angle + 180) / 360
elif turn_direction == "R":
normalized_turn_angle = (180 - turn_angle) / 360
else:
raise Exception("Unsupported direction!")
return normalized_turn_angle
@staticmethod
def denormalize_turn_angle(normalized_turn_angle):
# L180 -> 180
# R180 -> -180
# [0,1] -> [-180,180]
assert normalized_turn_angle >= 0 and normalized_turn_angle <= 1, "normalized_turn_angle should be in [0, 1]!"
if normalized_turn_angle >= 0.5:
direction = "L"
angle = normalized_turn_angle * 360 - 180
else:
direction = "R"
angle = 180 - normalized_turn_angle * 360
return direction, angle
@staticmethod
def normalize_arc_angle(arc_angle):
# [-360,360] -> [0,1]
assert -360 <= arc_angle <= 360, "arc_angle should not be in [-360, 360]!"
normalized_arc_angle = (arc_angle + 360) / 720
return normalized_arc_angle
@staticmethod
def denormalize_arc_angle(normalized_arc_angle):
# [0,1] -> [-360,360]
assert 0 <= normalized_arc_angle <= 1, "normalized_arc_angle should not be in [0, 1]!"
arc_angle = normalized_arc_angle * 720 - 360
return arc_angle
@staticmethod
def get_action_type(action_string):
return action_string.split("_")[0]
class ArcAction(BasicAction):
def __init__(self, arc_angle, arc_type, turn_direction, turn_angle, arc_radius=0.5):
super(ArcAction, self).__init__()
self.arc_radius = arc_radius
self.arc_angle = arc_angle
self.arc_type = arc_type
self.turn_direction = turn_direction
self.turn_angle = turn_angle
self.name = "arc"
def __eq__(self, other):
if isinstance(other, ArcAction):
return self.export_to_action_string() == other.export_to_action_string()
return False
def export_to_action_string(self, arc_radius_normalizaton_factor=None):
if arc_radius_normalizaton_factor is not None:
normalized_arc_radius = BasicAction.normalize_line_length(line_length=self.arc_radius,
line_length_max=arc_radius_normalizaton_factor)
else:
normalized_arc_radius = self.arc_radius
normalized_arc_angle = BasicAction.normalize_arc_angle(arc_angle=self.arc_angle)
normalized_turn_angle = BasicAction.normalize_turn_angle(turn_direction=self.turn_direction,
turn_angle=self.turn_angle)
action_string = "{}_{}_{:.3f}_{:.3f}-{:.3f}".format(self.name, self.arc_type, normalized_arc_radius,
normalized_arc_angle, normalized_turn_angle)
return action_string
@classmethod
def import_from_action_string(cls, action_string, arc_radius_normalizaton_factor=None):
"""
Parse an line action_string.
For example, "arc_zigzag_0.5_0.7905-0.7500"
"""
movement, turn_angle = action_string.split("-")
turn_angle = float(turn_angle)
action_name, arc_type, arc_radius, arc_angle = movement.split("_")
arc_radius = float(arc_radius)
arc_angle = float(arc_angle)
if action_name != "arc":
raise Exception("The action string imported is not an arc action string!")
if arc_radius_normalizaton_factor is not None:
denormalized_arc_radius = BasicAction.denormalize_line_length(
normalized_line_length=arc_radius, line_length_max=arc_radius_normalizaton_factor)
else:
denormalized_arc_radius = arc_radius
denormalized_arc_angle = BasicAction.denormalize_arc_angle(normalized_arc_angle=arc_angle)
turn_direction, denormalized_turn_angle = BasicAction.denormalize_turn_angle(normalized_turn_angle=turn_angle)
return cls(arc_angle=denormalized_arc_angle, arc_type=arc_type, turn_direction=turn_direction,
turn_angle=denormalized_turn_angle, arc_radius=denormalized_arc_radius)
def __str__(self):
return self.export_to_action_string()
class LineAction(BasicAction):
def __init__(self, line_length, line_type, turn_direction, turn_angle):
super(LineAction, self).__init__()
self.line_length = line_length
self.line_type = line_type
self.turn_direction = turn_direction
self.turn_angle = turn_angle
self.name = "line"
def __eq__(self, other):
if isinstance(other, LineAction):
return self.export_to_action_string() == other.export_to_action_string()
return False
def export_to_action_string(self, line_length_normalization_factor=None):
if line_length_normalization_factor is not None:
normalized_line_length = BasicAction.normalize_line_length(
line_length=self.line_length, line_length_max=line_length_normalization_factor)
else:
normalized_line_length = self.line_length
normalized_turn_angle = BasicAction.normalize_turn_angle(turn_direction=self.turn_direction,
turn_angle=self.turn_angle)
action_string = "{}_{}_{:.3f}-{:.3f}".format(self.name, self.line_type, normalized_line_length,
normalized_turn_angle)
return action_string
@classmethod
def import_from_action_string(cls, action_string, line_length_normalization_factor=None):
"""
Parse an line action_string.
For example, "line_straight_0.7905-0.7500"
"""
movement, turn_angle = action_string.split("-")
turn_angle = float(turn_angle)
action_name, line_type, line_length = movement.split("_")
line_length = float(line_length)
if action_name != "line":
raise Exception("The action string imported is not a line action string!")
if line_length_normalization_factor is not None:
denormalized_line_length = BasicAction.denormalize_line_length(
normalized_line_length=line_length,
line_length_max=line_length_normalization_factor)
else:
denormalized_line_length = line_length
turn_direction, denormalized_turn_angle = BasicAction.denormalize_turn_angle(normalized_turn_angle=turn_angle)
return cls(line_length=denormalized_line_length, line_type=line_type, turn_direction=turn_direction,
turn_angle=denormalized_turn_angle)
def __str__(self):
return self.export_to_action_string()
class OneStrokeShape(object):
def __init__(self, basic_actions, start_coordinates=None, start_orientation=None, scaling_factors=None):
self.basic_actions = basic_actions
self.start_coordinates = start_coordinates
self.start_orientation = start_orientation
self.scaling_factors = scaling_factors
def __eq__(self, other):
if isinstance(other, OneStrokeShape):
return self.basic_actions == other.basic_actions
return False
def __str__(self):
return "[" + ", ".join(self.basic_actions) + "]" + ", " + "{}, {}, {}".format(
self.start_coordinates[0], self.start_coordinates[1], self.start_orientation)
def get_num_actions(self):
return len(self.basic_actions)
def get_actions(self):
return self.basic_actions
def get_action_string_list(self):
return [action.export_to_action_string() for action in self.basic_actions]
def set_start_coordinates(self, start_coordinates):
self.start_coordinates = start_coordinates
def set_start_orientation(self, start_orientation):
self.start_orientation = start_orientation
def set_scaling_factors(self, scaling_factors):
self.scaling_factors = scaling_factors
def set_consistent_scaling_factors(self, scaling_factor):
assert np.isscalar(scaling_factor) == True, "Setting consistent scaling factors only requires one scalar!"
self.scaling_factors = [scaling_factor] * len(self.basic_actions)
def get_start_coordinates(self):
return self.start_coordinates
def get_start_orientation(self):
return self.start_orientation
def get_scaling_factors(self):
return self.scaling_factors
@classmethod
def import_from_action_string_list(cls, action_string_list, line_length_normalization_factor=None,
arc_radius_normalizaton_factor=None):
basic_actions = []
for action_string in action_string_list:
action_type = BasicAction.get_action_type(action_string)
if action_type == "line":
action = LineAction.import_from_action_string(
action_string=action_string, line_length_normalization_factor=line_length_normalization_factor)
elif action_type == "arc":
action = ArcAction.import_from_action_string(
action_string=action_string, arc_radius_normalizaton_factor=arc_radius_normalizaton_factor)
else:
raise Exception("Unknown action type!")
basic_actions.append(action)
return cls(basic_actions=basic_actions, start_coordinates=None, start_orientation=None, scaling_factors=None)
class BongardImage(object):
def __init__(self, one_stroke_shapes):
self.one_stroke_shapes = one_stroke_shapes
def __eq__(self, other):
if isinstance(other, BongardImage):
return self.one_stroke_shapes == other.one_stroke_shapes
return False
def get_num_shapes(self):
return len(self.one_stroke_shapes)
def get_actions(self):
return [one_stroke_shape.get_actions() for one_stroke_shape in self.one_stroke_shapes]
def get_action_string_list(self):
return [one_stroke_shape.get_action_string_list() for one_stroke_shape in self.one_stroke_shapes]
def get_start_coordinates(self):
return [one_stroke_shape.get_start_coordinates() for one_stroke_shape in self.one_stroke_shapes]
def set_start_coordinates(self, start_coordinates):
assert isinstance(start_coordinates, list), "start_coordinates should be a list!"
assert len(start_coordinates) == len(
self.one_stroke_shapes), "The number of start_coordinates should be the same as the number of one_stroke_shapes!"
for i in range(len(start_coordinates)):
self.one_stroke_shapes[i].set_start_coordinates(start_coordinates=start_coordinates[i])
def get_start_orientations(self):
return [one_stroke_shape.get_start_orientation() for one_stroke_shape in self.one_stroke_shapes]
def set_start_orientations(self, start_orientations):
assert isinstance(start_orientations, list), "start_orientations should be a list!"
assert len(start_orientations) == len(
self.one_stroke_shapes), "The number of start_orientations should be the same as the number of one_stroke_shapes!"
for i in range(len(start_orientations)):
self.one_stroke_shapes[i].set_start_orientation(start_orientation=start_orientations[i])
def get_scaling_factors(self):
return [one_stroke_shape.get_scaling_factors() for one_stroke_shape in self.one_stroke_shapes]
def set_scaling_factors(self, scaling_factors):
assert isinstance(scaling_factors, list), "scaling_factors should be a list!"
assert len(scaling_factors) == len(
self.one_stroke_shapes), "The number of scaling_factors should be the same as the number of one_stroke_shapes!"
for i in range(len(scaling_factors)):
self.one_stroke_shapes[i].set_scaling_factors(scaling_factors=scaling_factors[i])
def set_consistent_scaling_factors(self, scaling_factors):
# Set the scaling factors consistent for each action in each individual shapes
assert isinstance(scaling_factors, list), "scaling_factors should be a list!"
assert len(scaling_factors) == len(
self.one_stroke_shapes), "The number of scaling_factors should be the same as the number of one_stroke_shapes!"
for i in range(len(scaling_factors)):
self.one_stroke_shapes[i].set_consistent_scaling_factors(scaling_factor=scaling_factors[i])
@classmethod
def import_from_action_string_list(cls, action_string_list, line_length_normalization_factor=None,
arc_radius_normalizaton_factor=None):
one_stroke_shapes = [OneStrokeShape.import_from_action_string_list(
action_string,
line_length_normalization_factor=line_length_normalization_factor,
arc_radius_normalizaton_factor=arc_radius_normalizaton_factor)
for action_string in action_string_list]
return cls(one_stroke_shapes=one_stroke_shapes)
class BongardProblem(object):
def __init__(self, positive_bongard_images, negative_bongard_images, problem_name=None,
problem_description=None, positive_rules=None, negative_rules=None):
self.positive_bongard_images = positive_bongard_images
self.negative_bongard_images = negative_bongard_images
self.problem_name = problem_name
self.problem_description = problem_description
self.positive_rules = positive_rules
self.negative_rules = negative_rules
def __eq__(self, other):
if isinstance(other, BongardProblem):
return self.positive_bongard_images == other.positive_bongard_images and \
self.negative_bongard_images == other.negative_bongard_images
return False
def get_problem_name(self):
return self.problem_name
def get_problem_description(self):
return self.problem_description
def get_positive_rules(self):
return self.positive_rules
def get_negative_rules(self):
return self.negative_rules
def get_positive_bongard_images(self):
return self.positive_bongard_images
def get_negative_bongard_images(self):
return self.negative_bongard_images
def get_actions(self):
return [[positive_bongard_image.get_actions() for positive_bongard_image in self.positive_bongard_images],
[negative_bongard_image.get_actions() for negative_bongard_image in self.negative_bongard_images]]
def get_action_string_list(self):
return [[positive_bongard_image.get_action_string_list() for positive_bongard_image in
self.positive_bongard_images],
[negative_bongard_image.get_action_string_list() for negative_bongard_image in
self.negative_bongard_images]]
def get_start_coordinates(self):
return [[positive_bongard_image.get_start_coordinates()
for positive_bongard_image in self.positive_bongard_images],
[negative_bongard_image.get_start_coordinates()
for negative_bongard_image in self.negative_bongard_images]]
def get_start_orientations(self):
return [[positive_bongard_image.get_start_orientations() for positive_bongard_image in
self.positive_bongard_images],
[negative_bongard_image.get_start_orientations() for negative_bongard_image in
self.negative_bongard_images]]
def get_scaling_factors(self):
return [
[positive_bongard_image.get_scaling_factors() for positive_bongard_image in self.positive_bongard_images],
[negative_bongard_image.get_scaling_factors() for negative_bongard_image in self.negative_bongard_images]]
@classmethod
def import_from_action_string_list(cls, action_string_list, line_length_normalization_factor=None,
arc_radius_normalizaton_factor=None):
positive_action_string_list = action_string_list[0]
negative_action_string_list = action_string_list[1]
positive_bongard_images = [BongardImage.import_from_action_string_list(
positive_action_string,
line_length_normalization_factor=line_length_normalization_factor,
arc_radius_normalizaton_factor=arc_radius_normalizaton_factor)
for positive_action_string in positive_action_string_list]
negative_bongard_images = [BongardImage.import_from_action_string_list(
negative_action_string,
line_length_normalization_factor=line_length_normalization_factor,
arc_radius_normalizaton_factor=arc_radius_normalizaton_factor)
for negative_action_string in negative_action_string_list]
return cls(positive_bongard_images=positive_bongard_images, negative_bongard_images=negative_bongard_images,
problem_name=None, problem_description=None, positive_rules=None, negative_rules=None)
| [
"numpy.isscalar"
] | [((9288, 9315), 'numpy.isscalar', 'np.isscalar', (['scaling_factor'], {}), '(scaling_factor)\n', (9299, 9315), True, 'import numpy as np\n')] |
from humpday.objectives.classic import CLASSIC_OBJECTIVES
import logging
import numpy as np
import math
import warnings
try:
from hebo.design_space.design_space import DesignSpace
from hebo.optimizers.hebo import HEBO
using_hebo = True
except ImportError:
using_hebo = False
if using_hebo:
logging.getLogger('hebo').setLevel(logging.ERROR)
def hebo_cube_factory(objective, n_trials, n_dim, with_count,n_suggestions=5):
global feval_count
feval_count = 0
variables = [{'name': 'u' + str(i), 'type': 'num', 'lb': 0., 'ub': 1.} for i in range(n_dim)]
space = DesignSpace().parse(variables)
opt = HEBO(space)
def _objective(params) -> np.ndarray:
global feval_count
feval_count += len(params.index)
return np.array([ objective(ui) for ui in params.values ])
n_batches = int(math.floor(n_trials/n_suggestions))
n_remainder = n_trials - n_suggestions*n_batches
for i in range(n_batches):
rec = opt.suggest(n_suggestions=n_suggestions) # <-- don't change this
opt.observe(rec, _objective(rec))
for i in range(n_remainder):
rec = opt.suggest(n_suggestions=1) # <-- don't change this
opt.observe(rec, _objective(rec))
best_val = opt.y.min()
best_ndx = np.argmin([y[0] for y in opt.y]) # I mean seriously, why make the user do this?
best_x = list(opt.X.values[best_ndx])
return (best_val, best_x, feval_count) if with_count else (best_val, best_x)
def hebo_sequential_cube(objective, n_trials, n_dim, with_count):
return hebo_cube_factory(objective=objective, n_trials=n_trials, n_dim=n_dim, with_count=with_count, n_suggestions=1)
def hebo_batch_cube(objective, n_trials, n_dim, with_count):
return hebo_cube_factory(objective=objective, n_trials=n_trials, n_dim=n_dim, with_count=with_count, n_suggestions=10)
HEBO_OPTIMIZERS = [hebo_sequential_cube, hebo_batch_cube]
else:
HEBO_OPTIMIZERS = []
if __name__=='__main__':
for objective in CLASSIC_OBJECTIVES:
print(' ')
print(objective.__name__)
import time
for optimizer in HEBO_OPTIMIZERS:
print(optimizer.__name__+'...')
st = time.time()
print((optimizer(objective, n_trials=12, n_dim=4, with_count=True)))
print(' ... took '+str(time.time()-st)+' seconds.') | [
"logging.getLogger",
"hebo.optimizers.hebo.HEBO",
"math.floor",
"hebo.design_space.design_space.DesignSpace",
"numpy.argmin",
"time.time"
] | [((664, 675), 'hebo.optimizers.hebo.HEBO', 'HEBO', (['space'], {}), '(space)\n', (668, 675), False, 'from hebo.optimizers.hebo import HEBO\n'), ((1358, 1390), 'numpy.argmin', 'np.argmin', (['[y[0] for y in opt.y]'], {}), '([y[0] for y in opt.y])\n', (1367, 1390), True, 'import numpy as np\n'), ((314, 339), 'logging.getLogger', 'logging.getLogger', (['"""hebo"""'], {}), "('hebo')\n", (331, 339), False, 'import logging\n'), ((895, 931), 'math.floor', 'math.floor', (['(n_trials / n_suggestions)'], {}), '(n_trials / n_suggestions)\n', (905, 931), False, 'import math\n'), ((2300, 2311), 'time.time', 'time.time', ([], {}), '()\n', (2309, 2311), False, 'import time\n'), ((619, 632), 'hebo.design_space.design_space.DesignSpace', 'DesignSpace', ([], {}), '()\n', (630, 632), False, 'from hebo.design_space.design_space import DesignSpace\n'), ((2430, 2441), 'time.time', 'time.time', ([], {}), '()\n', (2439, 2441), False, 'import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.