repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_utils/__init__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
| 781
| 45
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_utils/test_classes.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import collections
import random
import string
import pytest
import enpheeph.utils.classes
class TestIDGeneratorClass(object):
def test_increasing_id_for_same_class_objects(self):
class A(
enpheeph.utils.classes.IDGenerator,
use_shared=False,
reset_value=0,
shared_root_flag=True,
):
pass
a1 = A()
a2 = A()
assert a1.unique_instance_id == 0
assert a2.unique_instance_id == 1
def test_increasing_id_for_shared_subclasses(self):
class A(
enpheeph.utils.classes.IDGenerator,
use_shared=True,
reset_value=0,
shared_root_flag=True,
):
pass
class B(A, use_shared=True, reset_value=0, shared_root_flag=False):
pass
a1 = A()
a2 = A()
b1 = B()
b2 = B()
a3 = A()
b3 = B()
assert a1.unique_instance_id == 0
assert a2.unique_instance_id == a1.unique_instance_id + 1
assert b1.unique_instance_id == a2.unique_instance_id + 1
assert b2.unique_instance_id == b1.unique_instance_id + 1
assert a3.unique_instance_id == b2.unique_instance_id + 1
assert b3.unique_instance_id == a3.unique_instance_id + 1
def test_increasing_independent_id_for_shared_subclasses(self):
class A(
enpheeph.utils.classes.IDGenerator,
use_shared=True,
reset_value=0,
shared_root_flag=True,
):
pass
class B(A, use_shared=False, reset_value=0, shared_root_flag=False):
pass
a1 = A()
a2 = A()
b1 = B()
b2 = B()
a3 = A()
b3 = B()
assert a1.unique_instance_id == 0
assert a2.unique_instance_id == 1
assert b1.unique_instance_id == 0
assert b2.unique_instance_id == 1
assert a3.unique_instance_id == 2
assert b3.unique_instance_id == 2
def test_different_reset_value_id(self):
class A(
enpheeph.utils.classes.IDGenerator,
use_shared=True,
reset_value=10,
shared_root_flag=True,
):
pass
class B(A, use_shared=False, reset_value=23, shared_root_flag=False):
pass
a1 = A()
a2 = A()
b1 = B()
b2 = B()
a3 = A()
b3 = B()
assert a1.unique_instance_id == 10
assert a2.unique_instance_id == 11
assert b1.unique_instance_id == 23
assert b2.unique_instance_id == 24
assert a3.unique_instance_id == 12
assert b3.unique_instance_id == 25
def test_get_root_with_counter(self):
class A(
enpheeph.utils.classes.IDGenerator, use_shared=False, shared_root_flag=True
):
pass
class B(
enpheeph.utils.classes.IDGenerator, use_shared=True, shared_root_flag=False
):
pass
class C(A, use_shared=False, shared_root_flag=False):
pass
class D(C, use_shared=True, shared_root_flag=False):
pass
class E(
enpheeph.utils.classes.IDGenerator, use_shared=False, shared_root_flag=False
):
pass
class F(
enpheeph.utils.classes.IDGenerator, use_shared=True, shared_root_flag=False
):
pass
class G(
enpheeph.utils.classes.IDGenerator, use_shared=True, shared_root_flag=True
):
pass
assert A._get_root_with_id() == A
assert B._get_root_with_id() == enpheeph.utils.classes.IDGenerator
assert C._get_root_with_id() == C
assert D._get_root_with_id() == A
assert E._get_root_with_id() == E
assert F._get_root_with_id() == enpheeph.utils.classes.IDGenerator
assert G._get_root_with_id() == G
def test_setup_id_counter(self):
class A(
enpheeph.utils.classes.IDGenerator,
reset_value=10,
use_shared=False,
shared_root_flag=True,
):
pass
class B(A, use_shared=True, shared_root_flag=False):
pass
class C(A, use_shared=False, shared_root_flag=False):
pass
A._setup_id_counter(reset=True)
assert A().unique_instance_id == 10
assert B().unique_instance_id == 11
A._setup_id_counter(reset=False)
B._setup_id_counter(reset=False)
assert A().unique_instance_id == 12
assert B().unique_instance_id == 13
B._setup_id_counter(reset=True)
assert B().unique_instance_id == 10
assert A().unique_instance_id == 11
C._setup_id_counter(reset=True)
assert C().unique_instance_id == 0
C._setup_id_counter(reset=False)
assert C().unique_instance_id == 1
def test_update_id_counter(self):
class A(
enpheeph.utils.classes.IDGenerator,
reset_value=10,
use_shared=False,
shared_root_flag=True,
):
pass
class B(A, use_shared=True, shared_root_flag=False):
pass
class C(A, use_shared=False, shared_root_flag=False):
pass
assert A().unique_instance_id == 10
A._setup_id_counter(reset=True)
A._update_id_counter()
assert A().unique_instance_id == 11
A._update_id_counter()
A._update_id_counter()
assert A().unique_instance_id == 14
assert B().unique_instance_id == 15
B._update_id_counter()
assert A().unique_instance_id == 17
assert B().unique_instance_id == 18
assert C().unique_instance_id == 0
C._update_id_counter()
assert C().unique_instance_id == 2
def test_get_id_counter(self):
class A(
enpheeph.utils.classes.IDGenerator,
reset_value=10,
use_shared=False,
shared_root_flag=True,
):
pass
class B(A, use_shared=True, shared_root_flag=False):
pass
class C(A, use_shared=False, shared_root_flag=False):
pass
assert A._get_id_counter() == 10
assert A().unique_instance_id + 1 == A._get_id_counter() # 11
assert B().unique_instance_id + 1 == A._get_id_counter() # 12
assert B._get_id_counter() == 12
assert C._get_id_counter() == 0
assert C().unique_instance_id + 1 == C._get_id_counter() # 0
assert C().unique_instance_id + 1 == C._get_id_counter() # 1
assert C._get_id_counter() == 2
class TestSkipIfErrorContextManagerClass(object):
@pytest.mark.parametrize(
argnames=("error", "string_param"),
argvalues=[
pytest.param(
ValueError,
"parameter",
id="ValueError_parameter",
),
pytest.param(
TypeError,
"test",
id="TypeError_test",
),
pytest.param(
IndexError,
None,
id="IndexError_no_string_to_check",
),
pytest.param(
(TypeError, ValueError),
"parameter",
id="tuple",
),
pytest.param(
[ValueError, TypeError, OSError],
"hello",
id="list",
),
],
)
def test_skip_if_error(self, error, string_param):
with enpheeph.utils.classes.SkipIfErrorContextManager(
error=error,
string_to_check=string_param,
):
a = False
error = (
random.choice(error)
if isinstance(error, collections.abc.Sequence)
else error
)
raise error(
"".join(
random.choice(string.ascii_letters)
for _ in range(random.randint(0, 100))
)
+ string_param
if string_param is not None
else random.choice(string.ascii_letters)
+ "".join(
random.choice(string.ascii_letters)
for _ in range(random.randint(0, 100))
)
)
a = True
assert not a
@pytest.mark.parametrize(
argnames=("error", "string_param"),
argvalues=[
pytest.param(
ValueError,
"parameter",
id="ValueError",
),
pytest.param(
TypeError,
"test",
id="TypeError",
),
pytest.param(
(TypeError, ValueError),
"parameter",
id="tuple",
),
pytest.param(
[TypeError, ValueError],
"parameter",
id="list",
),
pytest.param(
BaseException,
"parameter",
id="subclass_should_not_work",
),
],
)
def test_skip_if_error_raising(self, error, string_param):
with pytest.raises(KeyboardInterrupt):
with enpheeph.utils.classes.SkipIfErrorContextManager(
error=error,
string_to_check=string_param,
):
a = False
raise KeyboardInterrupt(string_param)
a = True
assert not a
@pytest.mark.parametrize(
argnames=("error", "string_param"),
argvalues=[
pytest.param(
["a", TypeError],
"test",
id="a_TypeError_test",
),
pytest.param(
"a",
"parameter",
id="a_parameter",
),
pytest.param(
1,
"parameter",
id="1_parameter",
),
],
)
def test_skip_if_error_init_validation_rising(self, error, string_param):
with pytest.raises(TypeError):
enpheeph.utils.classes.SkipIfErrorContextManager(
error=error,
string_to_check=string_param,
)
| 11,929
| 29.356234
| 88
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/__init__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
| 1,539
| 45.666667
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_abc/test_faultabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pytest
import enpheeph.injections.abc.faultabc
class TestFaultABC(object):
def test_abstract_class(self):
# the instantiation of an abstract class leads to TypeError
with pytest.raises(TypeError, match="abstract method"):
enpheeph.injections.abc.faultabc.FaultABC()
| 1,849
| 40.111111
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_abc/test_injectionabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import enpheeph.injections.abc.injectionabc
class TestInjectionABC(object):
def test_abstract_method_setup(self):
assert getattr(
enpheeph.injections.abc.injectionabc.InjectionABC.setup,
"__isabstractmethod__",
False,
)
def test_abstract_method_teardown(self):
assert getattr(
enpheeph.injections.abc.injectionabc.InjectionABC.teardown,
"__isabstractmethod__",
False,
)
def test_attributes(self):
# __annotations__ returns the annotated attributes in the class
assert (
"location"
in enpheeph.injections.abc.injectionabc.InjectionABC.__annotations__
)
| 2,261
| 37.338983
| 80
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_abc/test_pytorchinjectionabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import torch
import enpheeph.injections.abc.pytorchinjectionabc
class TestPyTorchInjectionABC(object):
def test_abstract_method_setup(self):
assert getattr(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.setup,
"__isabstractmethod__",
False,
)
def test_teardown_not_abstract(self):
assert not getattr(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.teardown,
"__isabstractmethod__",
False,
)
def test_teardown(self):
class Implementation(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC
):
def setup(self):
pass
def module_name(self):
pass
instance = Implementation()
module = torch.nn.ReLU()
module = instance.teardown(module)
assert module(torch.tensor([1])) == torch.tensor([1])
instance.handle = module.register_forward_hook(lambda m, i, o: o + 1)
assert module(torch.tensor([1])) == torch.tensor([2])
module = instance.teardown(module)
assert module(torch.tensor([1])) == torch.tensor([1])
module = instance.teardown(module)
assert module(torch.tensor([1])) == torch.tensor([1])
def test_abstract_module_name(self):
assert getattr(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.module_name,
"__isabstractmethod__",
False,
)
# we check whether the method is a property
assert isinstance(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.module_name,
property,
)
def test_attributes(self):
class_ = enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC
# __annotations__ returns the annotated attributes in the class
assert "handle" in class_.__annotations__
| 3,525
| 34.26
| 88
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_abc/test_monitorabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pytest
import enpheeph.injections.abc.monitorabc
class TestMonitorABC(object):
def test_abstract_class(self):
# the instantiation of an abstract class leads to TypeError
with pytest.raises(TypeError, match="abstract method"):
enpheeph.injections.abc.monitorabc.MonitorABC()
| 1,857
| 40.288889
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_abc/__init__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
| 1,539
| 45.666667
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_plugins/__init__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
| 1,539
| 45.666667
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_plugins/test_indexing/test_indexingpluginabc.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
| 1,539
| 45.666667
| 77
|
py
|
enpheeph
|
enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_plugins/test_indexing/__init__.py
|
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
| 1,539
| 45.666667
| 77
|
py
|
enpheeph
|
enpheeph-main/notebooks/pruning_distribution_analysis/pruning_distribution_analysis_v2.2023_04_18__11_46_UTC.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# # Use the enpheeph-dev mamba environment
# The old one is enpheeph-dev-old-lightning-flash
# In[1]:
import math
import os
import pathlib
import time
import captum
import lightning
import numpy
import pandas
import torch
import torch.optim
import torchmetrics
import torchvision
import torchvision.datasets
import torchvision.transforms
# In[2]:
class Model(lightning.LightningModule):
@property
def LAYER_LIST(self):
return {
"vgg11": {
"model.features.0": [64, 32, 32],
"model.features.1": [64, 32, 32],
"model.features.2": [64, 16, 16],
"model.features.3": [128, 16, 16],
"model.features.4": [128, 16, 16],
"model.features.5": [128, 8, 8],
"model.features.6": [256, 8, 8],
"model.features.7": [256, 8, 8],
"model.features.8": [256, 8, 8],
"model.features.9": [256, 8, 8],
"model.features.10": [256, 4, 4],
"model.features.11": [512, 4, 4],
"model.features.12": [512, 4, 4],
"model.features.13": [512, 4, 4],
"model.features.14": [512, 4, 4],
"model.features.15": [512, 2, 2],
"model.features.16": [512, 2, 2],
"model.features.17": [512, 2, 2],
"model.features.18": [512, 2, 2],
"model.features.19": [512, 2, 2],
"model.features.20": [512, 1, 1],
"model.avgpool": [512, 7, 7],
"model.classifier.0": [4096],
"model.classifier.1": [4096],
"model.classifier.2": [4096],
"model.classifier.3": [4096],
"model.classifier.4": [4096],
"model.classifier.5": [4096],
"model.classifier.6": [self.num_classes],
},
"resnet18": {
"model.conv1": [64, 16, 16],
"model.bn1": [64, 16, 16],
"model.relu": [64, 16, 16],
"model.maxpool": [64, 8, 8],
"model.layer1.0.conv1": [64, 8, 8],
"model.layer1.0.bn1": [64, 8, 8],
"model.layer1.0.relu": [64, 8, 8],
"model.layer1.0.conv2": [64, 8, 8],
"model.layer1.0.bn2": [64, 8, 8],
"model.layer1.1.conv1": [64, 8, 8],
"model.layer1.1.bn1": [64, 8, 8],
"model.layer1.1.relu": [64, 8, 8],
"model.layer1.1.conv2": [64, 8, 8],
"model.layer1.1.bn2": [64, 8, 8],
"model.layer2.0.conv1": [128, 4, 4],
"model.layer2.0.bn1": [128, 4, 4],
"model.layer2.0.relu": [128, 4, 4],
"model.layer2.0.conv2": [128, 4, 4],
"model.layer2.0.bn2": [128, 4, 4],
"model.layer2.0.downsample.0": [128, 4, 4],
"model.layer2.0.downsample.1": [128, 4, 4],
"model.layer2.1.conv1": [128, 4, 4],
"model.layer2.1.bn1": [128, 4, 4],
"model.layer2.1.relu": [128, 4, 4],
"model.layer2.1.conv2": [128, 4, 4],
"model.layer2.1.bn2": [128, 4, 4],
"model.layer3.0.conv1": [256, 2, 2],
"model.layer3.0.bn1": [256, 2, 2],
"model.layer3.0.relu": [256, 2, 2],
"model.layer3.0.conv2": [256, 2, 2],
"model.layer3.0.bn2": [256, 2, 2],
"model.layer3.0.downsample.0": [256, 2, 2],
"model.layer3.0.downsample.1": [256, 2, 2],
"model.layer3.1.conv1": [256, 2, 2],
"model.layer3.1.bn1": [256, 2, 2],
"model.layer3.1.relu": [256, 2, 2],
"model.layer3.1.conv2": [256, 2, 2],
"model.layer3.1.bn2": [256, 2, 2],
"model.layer4.0.conv1": [512, 1, 1],
"model.layer4.0.bn1": [512, 1, 1],
"model.layer4.0.relu": [512, 1, 1],
"model.layer4.0.conv2": [512, 1, 1],
"model.layer4.0.bn2": [512, 1, 1],
"model.layer4.0.downsample.0": [512, 1, 1],
"model.layer4.0.downsample.1": [512, 1, 1],
"model.layer4.1.conv1": [512, 1, 1],
"model.layer4.1.bn1": [512, 1, 1],
"model.layer4.1.relu": [512, 1, 1],
"model.layer4.1.conv2": [512, 1, 1],
"model.layer4.1.bn2": [512, 1, 1],
"model.avgpool": [512, 1, 1],
"model.fc": [10],
},
}
def __init__(
self,
model_name,
num_classes,
accuracy_fn,
loss_fn,
dataframe_path,
optimizer_class,
learning_rate,
dataset_name=None,
):
super().__init__()
self.save_hyperparameters()
self.model_name = model_name.lower()
self.num_classes = num_classes
self.accuracy = accuracy_fn
self.loss = loss_fn
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.dataframe_path = pathlib.Path(dataframe_path)
self.setup_model(model_name=self.model_name, num_classes=self.num_classes)
self.handles = []
self.reset_dataframe()
self.init_model()
def setup_model(self, model_name, num_classes):
if model_name == "vgg11":
self.model = torchvision.models.vgg11(num_classes=num_classes)
elif model_name == "resnet18":
self.model = torchvision.models.resnet18(num_classes=num_classes)
elif model_name == "mlp":
self.model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(28 * 28, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, num_classes),
)
else:
raise ValueError("unknown model")
def init_model(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def reset_dataframe(self):
self.dataframe = pandas.DataFrame(
columns=[
"module_name",
"tensor_type",
"batch_index",
"element_in_batch_index",
"location",
"neuron_attribution_sorting",
"value",
"accuracy",
"loss",
]
)
@staticmethod
def join_saved_dataframe(dataframe, dataframe_path: os.PathLike):
dataframe_path = pathlib.Path(dataframe_path)
if not dataframe_path.exists():
dataframe_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_csv(dataframe_path, sep="|")
else:
df = pandas.read_csv(dataframe_path, sep="|", index_col=[0], header=[0])
new_df = pandas.concat([df, dataframe], axis=0)
new_df.reset_index(drop=True, inplace=True)
new_df.to_csv(dataframe_path, sep="|")
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = self.optimizer_class(self.parameters(), lr=self.learning_rate)
return optimizer
def make_neuron_output_function(
self, module_name, location, neuron_attribution_sorting
):
def save_neuron_output(module, args, output) -> None:
for b_idx, b in enumerate(output):
self.dataframe.loc[len(self.dataframe)] = [
module_name,
"output",
None,
b_idx,
location,
neuron_attribution_sorting,
b[location].item(),
None,
None,
]
return save_neuron_output
def add_hooks(self, attributions, topk=1, bottomk=1):
for layer_name, layer_attributions_and_deltas in attributions.items():
layer_attributions_cat = torch.cat(
tuple(l_attr for l_attr, _ in layer_attributions_and_deltas),
dim=0,
)
summed_layer_attributions = torch.sum(
layer_attributions_cat,
(0,),
)
topk_values, topk_indices = torch.topk(
abs(
summed_layer_attributions.flatten(),
),
k=topk,
largest=True,
sorted=True,
)
bottomk_values, bottomk_indices = torch.topk(
abs(
summed_layer_attributions.flatten(),
),
k=bottomk,
largest=False,
sorted=True,
)
indices = [
{"neuron_attribution_sorting": f"top{i}", "index": idx}
for i, idx in enumerate(topk_indices)
] + [
{"neuron_attribution_sorting": f"bottom{i}", "index": idx}
for i, idx in enumerate(bottomk_indices)
]
for index in indices:
target_neuron_location = numpy.unravel_index(
index["index"],
summed_layer_attributions.size(),
order="C",
)
module = self.get_layer_from_full_name(
self,
layer_name,
separator=".",
main_model_is_in_the_layer_name=False,
)
self.handles.append(
module.register_forward_hook(
self.make_neuron_output_function(
layer_name,
tuple(target_neuron_location),
neuron_attribution_sorting=index[
"neuron_attribution_sorting"
],
)
)
)
@staticmethod
def get_full_layer_name_from_summary(layer_summary, skip_main_model=True):
parent_info = layer_summary.parent_info
layer_full_name = layer_summary.var_name
while parent_info is not None and (
not skip_main_model
or skip_main_model
and parent_info.parent_info is not None
):
layer_full_name = f"{parent_info.var_name}.{layer_full_name}"
parent_info = parent_info.parent_info
return layer_full_name
@staticmethod
def get_layer_from_full_name(
model, layer_name, separator=".", main_model_is_in_the_layer_name=False
):
module = model
if main_model_is_in_the_layer_name:
layer_name = separator.join(layer_name.split(separator)[1:])
for l_n in layer_name.split(separator):
module = getattr(module, l_n)
return module
def get_attributions(
self,
dataloader,
layer_name_list,
attributions_checkpoint_path,
attribution=captum.attr.LayerConductance,
save_checkpoint=True,
load_checkpoint=True,
):
if attributions_checkpoint_path.exists() and load_checkpoint:
attributions = torch.load(str(attributions_checkpoint_path))
return attributions
elif save_checkpoint:
attributions_checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
model = self.train(False).to(torch.device("cpu"))
attributions = {}
for layer_name in layer_name_list:
print(layer_name)
layer_attributions = []
attr_instance = attribution(
model, model.get_layer_from_full_name(model, layer_name)
)
for idx, b in enumerate(dataloader):
x, y = b
attr, delta = attr_instance.attribute(
inputs=x.to(torch.device("cpu")),
target=y.to(torch.device("cpu")),
return_convergence_delta=True,
)
layer_attributions.append(
[
attr.detach(),
delta.detach(),
],
)
if idx % 10 == 0:
print(f"Batches done: {idx}")
attributions[layer_name] = layer_attributions
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
return attributions
def inference_step(self, batch, only_x=False):
if only_x:
x = batch
else:
x, y = batch
y_hat = self(x)
if only_x:
d = {"loss": None, "accuracy": None, "predictions": y_hat}
else:
d = {
"loss": self.loss(y_hat, y),
"accuracy": self.accuracy(y_hat, y),
"predictions": y_hat,
}
return d
def training_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"train_loss": metrics["loss"], "train_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["loss"]
def test_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch)
self.log_dict(
{"test_loss": metrics["loss"], "test_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def validation_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def predict_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch, only_x=True)
# self.log({"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]}, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return metrics["predictions"]
def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx=0):
# super().on_test_batch_end(outputs, batch, batch_idx, dataloader_idx)
_, y = batch
row_selector = (
self.dataframe["accuracy"].isnull() & self.dataframe["loss"].isnull()
)
self.dataframe.loc[row_selector, "batch_index"] = batch_idx
# assert len(self.dataframe.loc[row_selector]) / len(self.handles) == y.size()[0] == outputs.size()[0]
for bindex, (by_hat, by) in enumerate(zip(outputs, y)):
by_hat = by_hat.unsqueeze(0)
by = by.unsqueeze(0)
extra_row_selector = row_selector & (
self.dataframe["element_in_batch_index"] == bindex
)
self.dataframe.loc[extra_row_selector, "loss"] = self.loss(
by_hat, by
).item()
self.dataframe.loc[extra_row_selector, "accuracy"] = self.accuracy(
by_hat, by
).item()
self.dataframe_path.parent.mkdir(parents=True, exist_ok=True)
if batch_idx % 10 == 0:
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
self.reset_dataframe()
# print(self.dataframe)
def on_test_end(self):
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
class DataModule(lightning.LightningDataModule):
MNIST_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((28, 28)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,),
(0.3081,),
),
]
)
CIFAR10_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
GTSRB_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((48, 48)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.3337, 0.3064, 0.3171),
(0.2672, 0.2564, 0.2629),
),
]
)
@staticmethod
def gtsrb_wrapper(data_dir, train: bool, transform=None, download: bool = True):
if train is True:
split = "train"
elif train is False:
split = "test"
else:
raise ValueError()
return torchvision.datasets.GTSRB(
str(data_dir),
split=split,
download=download,
transform=transform,
)
def __init__(
self,
dataset_name,
data_dir: str = "/shared/ml/datasets/vision/",
train_transform=None,
test_transform=None,
batch_size=64,
num_workers=32,
train_val_split=0.8,
seed=42,
dataset_class=None,
):
super().__init__()
self.dataset_name = dataset_name.lower()
self.dataset_class = dataset_class
self.data_dir = data_dir
self.batch_size = batch_size
self.train_val_split = train_val_split
self.num_workers = num_workers
self.seed = seed
self.train_transform = train_transform
self.test_transform = test_transform
self.num_classes = None
self.setup_dataset()
def setup_dataset(self):
if self.dataset_class is None:
if self.dataset_name == "cifar10":
self.dataset_class = torchvision.datasets.CIFAR10
elif self.dataset_name == "mnist":
self.dataset_class = torchvision.datasets.MNIST
elif self.dataset_name == "gtsrb":
self.dataset_class = self.__class__.gtsrb_wrapper
if self.dataset_class == self.__class__.gtsrb_wrapper:
if self.train_transform is None:
self.train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((48, 48)),
torchvision.transforms.RandomCrop(48),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomVerticalFlip(),
self.GTSRB_DEFAULT_TRANSFORM,
]
)
if self.test_transform is None:
self.test_transform = self.GTSRB_DEFAULT_TRANSFORM
self.num_classes = 43
elif self.dataset_class == torchvision.datasets.MNIST or issubclass(
self.dataset_class, torchvision.datasets.MNIST
):
if self.train_transform is None:
self.train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((28, 28)),
torchvision.transforms.RandomCrop(28, padding=4),
self.MNIST_DEFAULT_TRANSFORM,
]
)
if self.test_transform is None:
self.test_transform = self.MNIST_DEFAULT_TRANSFORM
self.num_classes = 10
elif self.dataset_class == torchvision.datasets.CIFAR10 or issubclass(
self.dataset_class, torchvision.datasets.CIFAR10
):
if self.train_transform is None:
self.train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
self.CIFAR10_DEFAULT_TRANSFORM,
]
)
if self.test_transform is None:
self.test_transform = self.CIFAR10_DEFAULT_TRANSFORM
self.num_classes = 10
else:
raise ValueError("unknown dataset")
def prepare_data(self):
# download
self.dataset_class(self.data_dir, train=True, download=True)
self.dataset_class(self.data_dir, train=False, download=True)
def setup(self, stage: str):
# Assign train/val datasets for use in dataloaders
if stage == "fit":
dataset_train_transform = self.dataset_class(
self.data_dir, train=True, transform=self.train_transform
)
n_train_elements = math.floor(
len(dataset_train_transform) * self.train_val_split
)
self.dataset_train, _ = torch.utils.data.random_split(
dataset_train_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
dataset_test_transform = self.dataset_class(
self.data_dir, train=True, transform=self.test_transform
)
_, self.dataset_val = torch.utils.data.random_split(
dataset_test_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
# Assign test dataset for use in dataloader(s)
if stage == "test":
self.dataset_test = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
if stage == "predict":
self.dataset_predict = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_val,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def predict_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_predict,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
# In[3]:
TIME_FORMAT = "%Y_%m_%d__%H_%M_%S_%z"
time_string = time.strftime(TIME_FORMAT)
model_name = "resnet18"
dataset_name = "GTSRB"
pruned = False
sparse = False
base_path = pathlib.Path(
f"./results/trained_{model_name}_{dataset_name}_{'pruned' if pruned else 'original'}_{'sparse' if sparse else 'dense'}_earlystopping_lightning"
)
# model_checkpoint_path = base_path.with_suffix(f".{time_string}.pt")
model_checkpoint_path = pathlib.Path(
"./results/trained_resnet18_GTSRB_original_dense_earlystopping_lightning.2023_04_18__13_51_32_+0200.pt"
)
# attributions_checkpoint_path = base_path.with_suffix(f".{time_string}.attributions.pt")
attributions_checkpoint_path = pathlib.Path(
"./results/trained_resnet18_GTSRB_original_dense_earlystopping_lightning.2023_04_18__13_51_32_+0200.attributions.pt"
)
dataframe_path = base_path.with_suffix(f".{time_string}.csv")
learning_rate_finder = False
# seed = 7 # vgg11 cifar10
seed = 7 # resnet18 gtsrb
lightning.seed_everything(seed)
# In[4]:
trainer = lightning.Trainer(
accelerator="gpu",
devices=[2],
# setting max_epochs make it not work, it stops with max_epochs=0, also fast_dev_run=True breaks it
# fast_dev_run=True,
callbacks=[
lightning.pytorch.callbacks.EarlyStopping(
"val_loss",
min_delta=0.001,
patience=5,
verbose=True,
mode="min",
strict=True,
check_finite=True,
stopping_threshold=None,
divergence_threshold=None,
check_on_train_epoch_end=None,
log_rank_zero_only=False,
),
lightning.pytorch.callbacks.ModelCheckpoint(
dirpath=None,
filename=None,
monitor=None,
verbose=False,
save_last=None,
# to disable model saving
save_top_k=0,
save_weights_only=False,
mode="min",
auto_insert_metric_name=True,
every_n_train_steps=None,
train_time_interval=None,
every_n_epochs=None,
save_on_train_epoch_end=None,
),
lightning.pytorch.callbacks.RichProgressBar(
refresh_rate=10,
),
lightning.pytorch.callbacks.StochasticWeightAveraging(
swa_lrs=1e-2,
),
],
)
datamodule = DataModule(
dataset_name=dataset_name,
data_dir=f"/shared/ml/datasets/vision/{dataset_name}",
train_transform=None,
test_transform=None,
batch_size=64,
train_val_split=0.8,
seed=seed,
)
model = Model(
model_name=model_name,
num_classes=datamodule.num_classes,
accuracy_fn=torchmetrics.Accuracy(
task="multiclass",
num_classes=datamodule.num_classes,
),
loss_fn=torch.nn.CrossEntropyLoss(),
dataframe_path=dataframe_path,
optimizer_class=torch.optim.AdamW,
learning_rate=2e-3,
)
# In[ ]:
if learning_rate_finder:
tuner = lightning.pytorch.tuner.Tuner(trainer)
tuner.lr_find(model, datamodule=datamodule)
print(model.learning_rate)
raise Exception()
if model_checkpoint_path.exists():
model.__class__.load_from_checkpoint(str(model_checkpoint_path))
else:
model_checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
trainer.fit(model, datamodule)
trainer.save_checkpoint(str(model_checkpoint_path))
# In[ ]:
datamodule.prepare_data()
datamodule.setup(stage="test")
attributions = model.__class__.get_attributions(
model,
datamodule.test_dataloader(),
list(model.LAYER_LIST[model.model_name].keys()),
attributions_checkpoint_path=attributions_checkpoint_path,
save_checkpoint=True,
load_checkpoint=True,
)
datamodule.teardown(stage="test")
model.add_hooks(attributions, topk=5, bottomk=5)
# In[ ]:
trainer.test(model, datamodule, ckpt_path=str(model_checkpoint_path))
| 28,259
| 34.280899
| 147
|
py
|
enpheeph
|
enpheeph-main/notebooks/pruning_distribution_analysis/pruning_distribution_analysis_v2.2023_04_15__15_28_UTC.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# # Use the enpheeph-dev mamba environment
# The old one is enpheeph-dev-old-lightning-flash
# In[1]:
import math
import os
import pathlib
import time
import captum
import lightning
import numpy
import pandas
import torch
import torch.optim
import torchmetrics
import torchvision
import torchvision.datasets
import torchvision.transforms
# In[2]:
class Model(lightning.LightningModule):
LAYER_LIST = {
"vgg11": {
"model.features.0": [64, 32, 32],
"model.features.1": [64, 32, 32],
"model.features.2": [64, 16, 16],
"model.features.3": [128, 16, 16],
"model.features.4": [128, 16, 16],
"model.features.5": [128, 8, 8],
"model.features.6": [256, 8, 8],
"model.features.7": [256, 8, 8],
"model.features.8": [256, 8, 8],
"model.features.9": [256, 8, 8],
"model.features.10": [256, 4, 4],
"model.features.11": [512, 4, 4],
"model.features.12": [512, 4, 4],
"model.features.13": [512, 4, 4],
"model.features.14": [512, 4, 4],
"model.features.15": [512, 2, 2],
"model.features.16": [512, 2, 2],
"model.features.17": [512, 2, 2],
"model.features.18": [512, 2, 2],
"model.features.19": [512, 2, 2],
"model.features.20": [512, 1, 1],
"model.avgpool": [512, 7, 7],
"model.classifier.0": [4096],
"model.classifier.1": [4096],
"model.classifier.2": [4096],
"model.classifier.3": [4096],
"model.classifier.4": [4096],
"model.classifier.5": [4096],
"model.classifier.6": [10],
},
}
def __init__(
self,
model_name,
num_classes,
accuracy_fn,
loss_fn,
dataframe_path,
optimizer_class,
learning_rate,
):
super().__init__()
self.save_hyperparameters()
self.model_name = model_name
self.num_classes = num_classes
self.accuracy = accuracy_fn
self.loss = loss_fn
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.dataframe_path = pathlib.Path(dataframe_path)
self.setup_model(model_name=self.model_name, num_classes=self.num_classes)
self.handles = []
self.reset_dataframe()
self.init_model()
def init_model(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def reset_dataframe(self):
self.dataframe = pandas.DataFrame(
columns=[
"module_name",
"tensor_type",
"batch_index",
"element_in_batch_index",
"location",
"value",
"accuracy",
"loss",
]
)
@staticmethod
def join_saved_dataframe(dataframe, dataframe_path: os.PathLike):
dataframe_path = pathlib.Path(dataframe_path)
if not dataframe_path.exists():
dataframe_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_csv(dataframe_path, sep="|")
else:
df = pandas.read_csv(dataframe_path, sep="|", index_col=[0], header=[0])
new_df = pandas.concat([df, dataframe], axis=0)
new_df.reset_index(drop=True, inplace=True)
new_df.to_csv(dataframe_path, sep="|")
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = self.optimizer_class(self.parameters(), lr=self.learning_rate)
return optimizer
def make_neuron_output_function(self, module_name, location):
def save_neuron_output(module, args, output) -> None:
for b_idx, b in enumerate(output):
self.dataframe.loc[len(self.dataframe)] = [
module_name,
"output",
None,
b_idx,
location,
b[location].item(),
None,
None,
]
return save_neuron_output
def add_hooks(self, attributions, topk=1):
for layer_name, layer_attributions_and_deltas in attributions.items():
layer_attributions_cat = torch.cat(
tuple(l_attr for l_attr, _ in layer_attributions_and_deltas),
dim=0,
)
summed_layer_attributions = torch.sum(
layer_attributions_cat,
(0,),
)
topk_values, topk_indices = torch.topk(
abs(
summed_layer_attributions.flatten(),
),
k=topk,
largest=True,
sorted=True,
)
for top_index in topk_indices:
target_neuron_location = numpy.unravel_index(
top_index,
summed_layer_attributions.size(),
order="C",
)
module = self.get_layer_from_full_name(
self,
layer_name,
separator=".",
main_model_is_in_the_layer_name=False,
)
self.handles.append(
module.register_forward_hook(
self.make_neuron_output_function(
layer_name, tuple(target_neuron_location)
)
)
)
def setup_model(self, model_name, num_classes):
if model_name == "vgg11":
self.model = torchvision.models.vgg11(
num_classes=num_classes, init_weights=True
)
elif model_name == "mlp":
self.model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(28 * 28, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, num_classes),
)
else:
raise ValueError("unknown model")
@staticmethod
def get_full_layer_name_from_summary(layer_summary, skip_main_model=True):
parent_info = layer_summary.parent_info
layer_full_name = layer_summary.var_name
while parent_info is not None and (
not skip_main_model
or skip_main_model
and parent_info.parent_info is not None
):
layer_full_name = f"{parent_info.var_name}.{layer_full_name}"
parent_info = parent_info.parent_info
return layer_full_name
@staticmethod
def get_layer_from_full_name(
model, layer_name, separator=".", main_model_is_in_the_layer_name=False
):
module = model
if main_model_is_in_the_layer_name:
layer_name = separator.join(layer_name.split(separator)[1:])
for l_n in layer_name.split(separator):
module = getattr(module, l_n)
return module
def get_attributions(
self,
dataloader,
layer_name_list,
attributions_checkpoint_path,
attribution=captum.attr.LayerConductance,
save_checkpoint=True,
load_checkpoint=True,
):
if attributions_checkpoint_path.exists() and load_checkpoint:
attributions = torch.load(str(attributions_checkpoint_path))
return attributions
elif save_checkpoint:
attributions_checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
model = self.train(False).to(torch.device("cpu"))
attributions = {}
for layer_name in layer_name_list:
print(layer_name)
layer_attributions = []
attr_instance = attribution(
model, model.get_layer_from_full_name(model, layer_name)
)
for idx, b in enumerate(dataloader):
x, y = b
attr, delta = attr_instance.attribute(
inputs=x.to(torch.device("cpu")),
target=y.to(torch.device("cpu")),
return_convergence_delta=True,
)
layer_attributions.append(
[
attr.detach(),
delta.detach(),
],
)
if idx % 10 == 0:
print(f"Batches done: {idx}")
attributions[layer_name] = layer_attributions
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
return attributions
def inference_step(self, batch, only_x=False):
if only_x:
x = batch
else:
x, y = batch
y_hat = self(x)
if only_x:
d = {"loss": None, "accuracy": None, "predictions": y_hat}
else:
d = {
"loss": self.loss(y_hat, y),
"accuracy": self.accuracy(y_hat, y),
"predictions": y_hat,
}
return d
def training_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"train_loss": metrics["loss"], "train_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["loss"]
def test_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch)
self.log_dict(
{"test_loss": metrics["loss"], "test_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def validation_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def predict_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch, only_x=True)
# self.log({"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]}, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return metrics["predictions"]
def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx=0):
# super().on_test_batch_end(outputs, batch, batch_idx, dataloader_idx)
_, y = batch
row_selector = (
self.dataframe["accuracy"].isnull() & self.dataframe["loss"].isnull()
)
self.dataframe.loc[row_selector, "batch_index"] = batch_idx
assert (
len(self.dataframe.loc[row_selector]) / len(self.handles)
== y.size()[0]
== outputs.size()[0]
)
for bindex, (by_hat, by) in enumerate(zip(outputs, y)):
by_hat = by_hat.unsqueeze(0)
by = by.unsqueeze(0)
extra_row_selector = row_selector & (
self.dataframe["element_in_batch_index"] == bindex
)
self.dataframe.loc[extra_row_selector, "loss"] = self.loss(
by_hat, by
).item()
self.dataframe.loc[extra_row_selector, "accuracy"] = self.accuracy(
by_hat, by
).item()
self.dataframe_path.parent.mkdir(parents=True, exist_ok=True)
if batch_idx % 10 == 0:
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
self.reset_dataframe()
# print(self.dataframe)
def on_test_end(self):
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
class DataModule(lightning.LightningDataModule):
MNIST_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,),
(0.3081,),
),
]
)
CIFAR10_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
def __init__(
self,
dataset_class,
data_dir: str = "/shared/ml/datasets/vision/",
train_transform=None,
test_transform=None,
batch_size=64,
num_workers=32,
train_val_split=0.8,
seed=42,
):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.train_val_split = train_val_split
self.num_workers = num_workers
self.seed = seed
if issubclass(dataset_class, torchvision.datasets.MNIST):
if train_transform is None:
train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(28, padding=4),
self.MNIST_DEFAULT_TRANSFORM,
]
)
if test_transform is None:
test_transform = self.MNIST_DEFAULT_TRANSFORM
self.num_classes = 10
elif issubclass(dataset_class, torchvision.datasets.CIFAR10):
if train_transform is None:
train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
self.CIFAR10_DEFAULT_TRANSFORM,
]
)
if test_transform is None:
test_transform = self.CIFAR10_DEFAULT_TRANSFORM
self.num_classes = 10
else:
raise ValueError("unknown dataset")
self.dataset_class = dataset_class
self.train_transform = train_transform
self.test_transform = test_transform
def prepare_data(self):
# download
self.dataset_class(self.data_dir, train=True, download=True)
self.dataset_class(self.data_dir, train=False, download=True)
def setup(self, stage: str):
# Assign train/val datasets for use in dataloaders
if stage == "fit":
dataset_train_transform = self.dataset_class(
self.data_dir, train=True, transform=self.train_transform
)
n_train_elements = math.floor(
len(dataset_train_transform) * self.train_val_split
)
self.dataset_train, _ = torch.utils.data.random_split(
dataset_train_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
dataset_test_transform = self.dataset_class(
self.data_dir, train=True, transform=self.test_transform
)
_, self.dataset_val = torch.utils.data.random_split(
dataset_test_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
# Assign test dataset for use in dataloader(s)
if stage == "test":
self.dataset_test = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
if stage == "predict":
self.dataset_predict = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_val,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def predict_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_predict,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
# In[3]:
TIME_FORMAT = "%Y_%m_%d__%H_%M_%S_%z"
time_string = time.strftime(TIME_FORMAT)
base_path = pathlib.Path("./results/trained_vgg11_cifar10_earlystopping_lightning")
model_checkpoint_path = base_path.with_suffix(f".{time_string}.pt")
attributions_checkpoint_path = base_path.with_suffix(f".{time_string}.attributions.pt")
dataframe_path = base_path.with_suffix(f".{time_string}.csv")
learning_rate_finder = False
seed = 7
lightning.seed_everything(seed)
# In[4]:
trainer = lightning.Trainer(
accelerator="gpu",
devices=[2],
max_epochs=-1,
callbacks=[
lightning.pytorch.callbacks.EarlyStopping(
"val_loss",
min_delta=0.001,
patience=5,
verbose=True,
mode="min",
strict=True,
check_finite=True,
stopping_threshold=None,
divergence_threshold=None,
check_on_train_epoch_end=None,
log_rank_zero_only=False,
),
lightning.pytorch.callbacks.ModelCheckpoint(
dirpath=None,
filename=None,
monitor=None,
verbose=False,
save_last=None,
# to disable model saving
save_top_k=0,
save_weights_only=False,
mode="min",
auto_insert_metric_name=True,
every_n_train_steps=None,
train_time_interval=None,
every_n_epochs=None,
save_on_train_epoch_end=None,
),
lightning.pytorch.callbacks.RichProgressBar(
refresh_rate=10,
),
lightning.pytorch.callbacks.StochasticWeightAveraging(
swa_lrs=1e-2,
),
],
)
model = Model(
model_name="vgg11",
num_classes=10,
accuracy_fn=torchmetrics.Accuracy(
task="multiclass",
num_classes=10,
),
loss_fn=torch.nn.CrossEntropyLoss(),
dataframe_path=dataframe_path,
optimizer_class=torch.optim.AdamW,
learning_rate=1e-3,
)
datamodule = DataModule(
dataset_class=torchvision.datasets.CIFAR10,
data_dir="/shared/ml/datasets/vision/CIFAR10",
train_transform=None,
test_transform=None,
batch_size=64,
train_val_split=0.8,
seed=seed,
)
tuner = lightning.pytorch.tuner.Tuner(trainer)
# In[5]:
if learning_rate_finder:
tuner.lr_find(model, datamodule=datamodule)
print(model.learning_rate)
if model_checkpoint_path.exists():
model.__class__.load_from_checkpoint(str(model_checkpoint_path))
else:
model_checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
trainer.fit(model, datamodule)
trainer.save_checkpoint(str(model_checkpoint_path))
# In[ ]:
datamodule.prepare_data()
datamodule.setup(stage="test")
attributions = model.__class__.get_attributions(
model,
datamodule.test_dataloader(),
list(model.LAYER_LIST[model.model_name].keys()),
attributions_checkpoint_path=attributions_checkpoint_path,
save_checkpoint=True,
load_checkpoint=True,
)
datamodule.teardown(stage="test")
model.add_hooks(attributions, topk=3)
# In[ ]:
trainer.test(model, datamodule, ckpt_path=str(model_checkpoint_path))
# In[ ]:
# dataframe_path = model_checkpoint_path.with_suffix(f".{time.strftime(TIME_FORMAT)}.csv")
# model.dataframe.to_csv(dataframe_path, sep="|")
# model.dataframe
| 21,405
| 31.482549
| 143
|
py
|
autoSDC
|
autoSDC-master/setup.py
|
from setuptools import setup, find_packages
setup(
name="asdc",
version="0.1",
packages=find_packages(),
include_package_data=True,
install_requires=["Click",],
entry_points="""
[console_scripts]
asdc=asdc.scripts.cli:cli
""",
)
| 274
| 18.642857
| 43
|
py
|
autoSDC
|
autoSDC-master/examples/position_step.py
|
#!/usr/bin/env python
import asdc.position
def position_step(speed=1e-5):
with asdc.position.controller(ip="192.168.10.11", speed=speed) as pos:
pos.print_status()
pos.update_x(delta=1e-4, verbose=True)
pos.print_status()
if __name__ == "__main__":
position_step()
| 304
| 16.941176
| 74
|
py
|
autoSDC
|
autoSDC-master/examples/line_scan.py
|
#!/usr/bin/env python
import json
import time
from datetime import datetime
import os
import sys
import asdc.position
import asdc.control
def line_scan(speed=1e-5, poll_interval=5):
""" perform a line scan with CV experiments, recording position, current, potential, and parameters in json log files
Position units are METERS!
"""
delta = [1e-4, 1e-4, 0.0]
initial_delta = [0.0, 0.0, -4.90e-4]
final_delta = [0.0, 0.0, 4.90e-4]
n_steps = 10
with asdc.position.controller(ip="192.168.10.11", speed=speed) as pos:
pos.print_status()
pos.update(delta=initial_delta, verbose=True)
pos.print_status()
with asdc.control.controller(start_idx=17109013) as pstat:
pstat.set_current_range("20nA")
for idx in range(n_steps):
# scan, log, take a position step
# run a CV experiment
status, params = pstat.multi_cyclic_voltammetry(
initial_potential=0.0,
vertex_potential_1=-0.25,
vertex_potential_2=0.65,
final_potential=0.0,
scan_rate=0.2,
cell_to_use="EXTERNAL",
e_filter="1Hz",
i_filter="1Hz",
)
pstat.start()
while pstat.sequence_running():
time.sleep(poll_interval)
# collect and log data
scan_data = {
"measurement": "cyclic_voltammetry",
"parameters": params,
"index_in_sequence": idx,
"timestamp": datetime.now().isoformat(),
"current": pstat.current(),
"potential": pstat.potential(),
"position": pos.current_position(),
}
logfile = "line_scan_{:03d}.json".format(idx)
with open(logfile, "w") as f:
json.dump(scan_data, f)
pstat.clear()
# update position
pos.update(delta=delta, verbose=True)
pos.print_status()
# bring the probe back up
pos.update(delta=final_delta, verbose=True)
pos.print_status()
if __name__ == "__main__":
line_scan()
| 2,365
| 28.209877
| 121
|
py
|
autoSDC
|
autoSDC-master/sphinx-docs/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# versastat documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 8 11:29:00 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.mathjax", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "versastat"
copyright = "2018, Brian DeCost"
author = "Brian DeCost"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "versastatdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "versastat.tex", "versastat Documentation", "Brian DeCost", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "versastat", "versastat Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"versastat",
"versastat Documentation",
author,
"versastat",
"One line description of project.",
"Miscellaneous",
),
]
| 5,159
| 29.898204
| 87
|
py
|
autoSDC
|
autoSDC-master/test/flow_mixing.py
|
import os
import sys
import json
import time
import serial
import argparse
import numpy as np
from pathlib import Path
from datetime import datetime
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_dir)
from asdc import sdc
PUMP_PORT = "COM12"
ORION_PORT = "COM17"
# solutions = {
# 0: {'base': 1.0},
# 1: {'water': 1.0},
# }
solutions = {0: {"KOH": 1.0}, 1: {"K2SO4": 1.0}, 2: {"H2SO4": 1.0}}
# what interface would be nice?
# current:
# rates = {'acid': 0.4, 'base': 0.6}
# pump_array.set_rates(scale(rates, 2))
#
# pump_array.set_rates(rates, 2)
# pump_array.set_rates([0.4, 0.6, 0.0], 2)
S1 = {
"mixfileVersion": 0.01,
"name": "syringe1",
"contents": [
{"name": "Na2SO4", "formula": "Na2SO4", "quantity": 0.1, "units": "mol/L"},
{
"name": "water",
"molfile": "\n\n\n 1 0 0 0 0 0 0 0 0 0999 V2000\n 0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0\nM END",
"inchi": "InChI=1S/H2O/h1H2",
"inchiKey": "InChIKey=XLYOFNOQVPJJNP-UHFFFAOYSA-N",
},
],
}
def test_flow_mixing(
data_dir, relative_rates, total_rate=11, duration=60, dashboard=False
):
data_dir = Path(data_dir)
os.makedirs(data_dir, exist_ok=True)
phmeter = sdc.orion.PHMeter(ORION_PORT, zmq_pub=dashboard)
pump_array = sdc.pump.PumpArray(solutions, port=PUMP_PORT, timeout=1)
meta = []
for idx, setpoint in enumerate(relative_rates):
logfile = f"pH-log-{idx}.csv"
with phmeter.monitor(interval=1, logfile=data_dir / logfile):
pump_array.set_rates(setpoint, start=True, fast=True)
meta.append(
{
"logfile": logfile,
"setpoint": setpoint,
"ts": datetime.now().isoformat(),
}
)
time.sleep(duration)
pump_array.stop_all(fast=True)
with open(data_dir / "metadata.json", "w") as f:
json.dump(meta, f)
def dryrun(data_dir, relative_rates, total_rate=11, duration=30):
data_dir = Path(data_dir)
meta = []
for idx, setpoint in enumerate(relative_rates):
# setpoint = {'KOH': x * total_rate, 'K2SO4': (1-x) * total_rate}
# logfile = f'pH-log-{idx}-x{x}.csv'
logfile = f"pH-log-{idx}.csv"
print(data_dir / logfile, setpoint)
meta.append(
{"logfile": logfile, "setpoint": setpoint, "ts": datetime.now().isoformat()}
)
time.sleep(0.1)
print(json.dumps(meta))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="flow mixing test harness")
parser.add_argument("datadir", type=str, help="data and log directory")
parser.add_argument(
"--total-rate", type=float, default=5, help="total flow rate in mL/min"
)
parser.add_argument("--duration", type=float, default=60, help="hold time in s")
parser.add_argument(
"--dashboard", action="store_true", help="set up ZMQ publisher for dashboard"
)
parser.add_argument("--dry-run", action="store_true", help="generate test output")
args = parser.parse_args()
print(args)
# relative_rates = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1]
# base first
# relative_rates = [1, .3, .1, .03, .01, .003, .001, 0]
# relative_rates = [1, 0.1, 0.01, 0.001, 0.0001, 0]
# relative_rates = [{'KOH': x * total_rate, 'K2SO4': (1-x) * total_rate} for x in relative_rates]
# basic to neutral
total_rate = args.total_rate
# relative_rates = [1, 0.1, 0.01, 0.001]
relative_rates = [1, 0.3, 0.1, 0.03, 0.01]
basic_rates = [
{"KOH": x * total_rate, "K2SO4": (1 - x) * total_rate} for x in relative_rates
]
# switch the acid ordering to go from neutral to acidic
acidic_rates = [
{"H2SO4": x * total_rate, "K2SO4": (1 - x) * total_rate}
for x in relative_rates[::-1]
]
# relative_rates = basic_rates + [{'K2SO4': total_rate}] + acidic_rates
relative_rates = [{"K2SO4": total_rate}] + acidic_rates
if args.dry_run:
dryrun(
args.datadir,
relative_rates,
total_rate=args.total_rate,
duration=args.duration,
)
else:
test_flow_mixing(
args.datadir,
relative_rates,
args.total_rate,
duration=args.duration,
dashboard=args.dashboard,
)
| 4,491
| 28.748344
| 150
|
py
|
autoSDC
|
autoSDC-master/scripts/k20_single_posterior.py
|
#!/usr/bin/env python
import os
import sys
import json
import click
import gpflow
import numpy as np
import pandas as pd
from scipy import stats
from ruamel import yaml
from sklearn import metrics
import ternary
import matplotlib.pyplot as plt
from asdc import analyze
from asdc import emulation
from asdc import visualization
opt = gpflow.training.ScipyOptimizer()
def probability_of_improvement(mu, var, current_best=None, minimize=True):
""" probability of improvement: default to minimization """
dist = stats.norm(mu.flat, np.sqrt(var).flat)
if minimize:
poi = dist.cdf(current_best)
else:
poi = 1 - dist.cdf(current_best)
return poi
def confidence_bound(mu, var, current_best, kappa=2, minimize=True):
""" confidence bound acquisition """
if minimize:
bound = -(mu.flat - kappa * np.sqrt(var.flat))
else:
bound = mu.flat + kappa * np.sqrt(var.flat)
return bound
def random(mu, var, current_best, minimize=True):
""" acquisition stub for random acquisition function
just draw acquisition function from standard gaussian distribution
"""
N, _ = mu.shape
return np.random.normal(0, 1, N)
def setup_acquisition(config):
""" return a closure wrapping specific parameters of acquisition function... """
a = config["acquisition"]
if a["strategy"] == "cb":
def acquisition(mu, var, current_best, minimize=True):
return confidence_bound(
mu,
var,
current_best,
kappa=config["acquisition"]["kappa"],
minimize=minimize,
)
return acquisition
elif a["strategy"] == "random":
return random
elif a["strategy"] == "pi":
return probability_of_improvement
@click.command()
@click.argument("config-file", type=click.Path())
def k20_single_objective(config_file):
""" optimize a single-objective function emulated by a GP fit to experimental data """
with open(config_file, "r") as f:
config = yaml.safe_load(f)
model_dir, _ = os.path.split(config_file)
fig_dir = os.path.join(model_dir, "figures")
os.makedirs(fig_dir, exist_ok=True)
print(config)
c = config["emulator"]
em = emulation.ExperimentEmulator(c["datafile"], components=c["components"])
task = config["task"]
target = task["target"]
evaluate_candidates = setup_acquisition(config)
# set up a discrete grid of samples to optimize over...
# randomize the grid because np.argmax takes the first value in memory order
# if there are degenerate values
domain = emulation.simplex_grid(task["domain_resolution"], buffer=task["buffer"])
domain = domain[np.random.permutation(domain.shape[0])]
visualization.ternary_scatter(domain, em(domain, target=target), label=target)
plt.savefig(
os.path.join(fig_dir, f"target_function_mean_{target}.png"), bbox_inches="tight"
)
plt.clf()
# find max
# s = emulation.simplex_grid(200, buffer=0.05)
response = em(domain, target=target, sample_posterior=True)
max_value = response.max()
min_value = response.min()
print(max_value)
visualization.ternary_scatter(domain, response, label=target)
plt.savefig(
os.path.join(fig_dir, f"target_function_{target}.png"), bbox_inches="tight"
)
plt.clf()
# initialize with the corners of the simplex subdomain...
queries = []
_s = domain.argmax(axis=0)
s = domain[_s]
v = response[_s]
mae, r2, ev, best_value = [], [], [], []
for query_idx in range(task["budget"]):
# draw a picture
visualization.ternary_scatter(s, v)
plt.savefig(
os.path.join(fig_dir, f"measured_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
# fit the surrogate model
m = emulation.model_ternary(s, v[:, None])
opt.minimize(m)
mu, var = m.predict_y(domain[:, :-1])
# assess regret...
if task["minimize"]:
current_best = v.min()
print(f"query {query_idx}: {min_value - current_best}")
else:
current_best = v.max()
print(f"query {query_idx}: {max_value - current_best}")
# evaluate predictive accuracy
best_value.append(current_best)
mae.append(np.mean(np.abs(response - mu)))
r2.append(metrics.r2_score(response.flat, mu.flat))
ev.append(metrics.explained_variance_score(response.flat, mu.flat))
print("MAE", mae[-1])
print("R2", r2[-1])
print("EV", ev[-1])
plt.scatter(response.flat, mu.flat)
plt.plot(
(response.min(), response.max()),
(response.min(), response.max()),
linestyle="--",
color="k",
)
plt.savefig(
os.path.join(fig_dir, f"parity_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
# draw the extrapolations
visualization.ternary_scatter(domain, mu.flat, label=target)
plt.savefig(
os.path.join(fig_dir, f"surrogate_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
acquisition = evaluate_candidates(
mu, var, current_best, minimize=task["minimize"]
)
acquisition[queries] = -np.inf
visualization.ternary_scatter(domain, acquisition, label="acquisition")
plt.savefig(
os.path.join(fig_dir, f"acquisition_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
# update the dataset
# index into pre-generated posterior function sample...
query_idx = np.argmax(acquisition)
queries.append(query_idx)
query = domain[queries[-1]][None, :]
s = np.vstack((s, query))
v = np.hstack((v, response[query_idx]))
# draw a picture
visualization.ternary_scatter(s, v)
plt.savefig(
os.path.join(fig_dir, f"measured_{target}_{len(queries)}.png"),
bbox_inches="tight",
)
plt.clf()
budget = (3 + np.arange(task["budget"])).tolist()
with open(os.path.join(model_dir, "trace.json"), "w") as f:
json.dump(
{
"budget": budget,
"mae": mae,
"r2": r2,
"ev": ev,
"best_value": best_value,
},
f,
)
return
if __name__ == "__main__":
k20_single_objective()
| 6,600
| 28.337778
| 90
|
py
|
autoSDC
|
autoSDC-master/scripts/test_pumps.py
|
import sys
import numpy as np
sys.path.append(".")
from asdc import sdc
def test_pump_array():
print("connecting to pumps...")
p = sdc.pump.PumpArray(port="COM6")
p.print_config()
if __name__ == "__main__":
test_pump_array()
| 246
| 14.4375
| 39
|
py
|
autoSDC
|
autoSDC-master/scripts/stop_reglo.py
|
import sys
sys.path.append(".")
from asdc.sdc import reglo
r = reglo.Reglo(address="COM16")
r.stop()
| 103
| 12
| 32
|
py
|
autoSDC
|
autoSDC-master/scripts/analyze_NiTiAl_k20.py
|
import os
import glob
import json
import gpflow
import dataset
import numpy as np
import pandas as pd
from datetime import datetime
from asdc import analyze
flags = [36, 38]
elements = ["Ni", "Al", "Ti"]
def model_composition(df, invert_x=True, convert_to_decimal=True):
X = df.loc[:, ("0", "1")].values.astype(np.float)
# wafer and composition measurement reference frames
# are mirrored in x
if invert_x:
X[:, 0] *= -1
Y = df.loc[:, elements].values
if convert_to_decimal:
Y *= 0.01
N, D = X.shape
m = gpflow.models.GPR(
X, Y, kern=gpflow.kernels.RBF(2), mean_function=gpflow.mean_functions.Constant()
)
opt = gpflow.train.ScipyOptimizer()
opt.minimize(m, disp=True)
return m
def model_fwhm(df, invert_x=True):
X = df.loc[:, ("0", "1")].values.astype(np.float)
# wafer and composition measurement reference frames
# are mirrored in x
if invert_x:
X[:, 0] *= -1
F = df["FWHM"].values.astype(np.float)[:, None]
N, D = X.shape
m = gpflow.models.GPR(
X,
F,
kern=gpflow.kernels.Matern32(2),
mean_function=gpflow.mean_functions.Constant(),
)
opt = gpflow.train.ScipyOptimizer()
opt.minimize(m, disp=True)
return m
def extract_cv_features(data, start=1120):
# first find the right segment of the data...
I = np.array(data["current"])[start:]
V = np.array(data["potential"])[start:]
# now correct for autorange artifacts
a = analyze.model_autorange_artifacts(V, I, tau_increasing=10)
log_I = np.log10(np.abs(I)) - a
# extract features from polarization curve
log_I, m, lm, cv_features = analyze.model_polarization_curve(
V, log_I, bg_order=7, lm_method="huber", smooth=True
)
cv_features["slope"] = lm.coef_[0]
return cv_features
def analyze_NiTiAl():
datafiles = sorted(glob.glob("data/NiTiAl-K20/*.json"))
db_file = "data/k20-NiTiAl.db"
db = dataset.connect(f"sqlite:///{db_file}")
invert_x = True
convert_to_decimal = True
k20 = pd.read_csv("data/k20.csv", index_col=0)
composition = model_composition(
k20, invert_x=invert_x, convert_to_decimal=convert_to_decimal
)
fwhm = model_fwhm(k20, invert_x=invert_x)
for idx, datafile in enumerate(datafiles):
with open(datafile, "r") as ff:
d = json.load(ff)
d["flag"] = True if d["index_in_sequence"] in flags else False
d["timestamp_start"] = datetime.fromisoformat(d["timestamp_start"][0])
d["timestamp"] = datetime.fromisoformat(d["timestamp"])
if not d["flag"]:
cv_features = extract_cv_features(d)
# convert to native types
cv_features = {key: float(val) for key, val in cv_features.items()}
d.update(cv_features)
# unpack positions
d["x_combi"] = d["position_combi"][0]
d["y_combi"] = d["position_combi"][1]
d["x_versa"] = d["position_versa"][0]
d["y_versa"] = d["position_versa"][1]
d["z_versa"] = d["position_versa"][2]
X_query = np.array((d["x_combi"], d["y_combi"]))[None, :]
C, C_var = composition.predict_y(X_query)
for el, comp in zip(elements, C.flat):
d[el] = comp
F, F_var = fwhm.predict_y(X_query)
d["fwhm"] = float(F)
# pack results into json string
res = {
"current": d["current"],
"potential": d["potential"],
"error_codes": d["error_codes"],
}
d["result"] = json.dumps(res)
for key in res.keys():
del d[key]
del d["position_combi"]
del d["position_versa"]
with db as tx:
db["experiment"].insert(d)
if __name__ == "__main__":
analyze_NiTiAl()
| 3,828
| 25.047619
| 88
|
py
|
autoSDC
|
autoSDC-master/scripts/test_pump_command.py
|
import sys
import numpy as np
sys.path.append(".")
from asdc import sdc
solutions = {0: {"H2SO4": 1.0}, 1: {"Na2SO4": 1.0}, 2: {"KOH": 1.0}}
def test_pump_array():
print("connecting to pumps...")
p = sdc.pump.PumpArray(solutions, port="COM6")
p.print_config()
# p.set_pH(setpoint=2.0)
p.set_rates({"KOH": 1.0, "H2SO4": 0.5})
if __name__ == "__main__":
test_pump_array()
| 401
| 18.142857
| 68
|
py
|
autoSDC
|
autoSDC-master/scripts/counterpump.py
|
import sys
sys.path.append(".")
from asdc.sdc import microcontroller
adafruit_port = "COM9"
p = microcontroller.PeristalticPump(port=adafruit_port, timeout=1)
# parse commandline arguments
args = sys.argv
if len(args) == 2:
setpoint = args[1]
try:
setpoint = float(setpoint)
if setpoint < 0 or setpoint > 1:
raise ValueError
except ValueError:
if setpoint != "stop":
raise ("setpoint must be between 0 and 1")
elif len(args) == 1:
print("starting the pump with default flow rate")
setpoint = 0.3
if setpoint == "stop":
print(f"stopping the pump!")
p.stop()
else:
print(f"setting flow rate to {setpoint}")
p.set_flow_proportion(setpoint)
p.start()
| 740
| 22.15625
| 66
|
py
|
autoSDC
|
autoSDC-master/scripts/debug_reglo.py
|
import sys
import json
import time
import asyncio
import numpy as np
import regloicclib
sys.path.append(".")
from asdc import sdc
# needle defaults counterclockwise (-)
# dump (-)
# loop (+)
# source (+)
from enum import IntEnum
class Channel(IntEnum):
ALL = 0
NEEDLE = 1
DUMP = 2
LOOP = 3
SOURCE = 4
# 12 mL/min
class Reglo(regloicclib.Pump):
""" thin wrapper around the pump interface from regloicc """
def __init__(self, address="COM16", tubing_inner_diameter=1.52):
super().__init__(address=address)
self.tubing_inner_diameter = tubing_inner_diameter
for channel in range(1, 5):
self.setTubingInnerDiameter(self.tubing_inner_diameter, channel=channel)
# def stop(self):
# self.pump.stop()
def droplet(
self,
prep_height=0.004,
wetting_height=0.0011,
fill_rate=1.0,
fill_counter_ratio=0.75,
fill_time=None,
shrink_counter_ratio=1.1,
shrink_time=None,
flow_rate=0.5,
target_rate=0.05,
cleanup_duration=3,
cleanup_pulse_duration=0,
stage_speed=0.001,
):
""" slack bot command for prototyping droplet contact routine
#### json arguments
| Name | Type | Description | Default |
|------------------|-------|-----------------------------------------------------|---------|
| `prep_height` | float | z setting to grow the droplet | 4mm |
| `wetting_height` | float | z setting to wet the droplet to the surface | 1.1mm |
| `fill_rate` | float | pumping rate during droplet growth | 1 mL/min |
| `fill_counter_ratio` | float | counterpumping ratio during droplet growth | 0.75 |
| `fill_time` | float | droplet growth duration (s) | None |
| `shrink_counter_ratio` | float | counterpumping ratio during droplet wetting phase | 1.1 |
| `shrink_time` | float | droplet wetting duration (s) | None |
| `flow_rate` | float | total flow rate during droplet formation (mL/min) | 0.5 |
| `target_rate` | float | final flow rate after droplet formation (mL/min) | 0.05 |
| `cleanup` | float | duration of pre-droplet-formation cleanup siphoning | 0 |
| `stage_speed` | float | stage velocity during droplet formation op | 0.001 |
"""
# stage speed is specified in m/s
stage_speed = min(stage_speed, 1e-3)
stage_speed = max(stage_speed, 1e-5)
# start at zero
with sdc.position.sync_z_step(height=wetting_height, speed=stage_speed):
if cleanup_duration > 0:
# TODO: turn on the needle
# make an option to pulse loop and dump simultaneously, same rate opposite directions?
print("cleaning up...")
self.continuousFlow(-10.0, channel=Channel.NEEDLE.value)
self.stop(channel=Channel.SOURCE.value)
self.stop(channel=Channel.LOOP.value)
if cleanup_pulse_duration > 0:
pulse_flowrate = -1.0
# self.continuousFlow(pulse_flowrate, channel=Channel.LOOP.value)
self.continuousFlow(pulse_flowrate, channel=Channel.DUMP.value)
time.sleep(cleanup_pulse_duration)
self.stop(channel=Channel.DUMP.value)
time.sleep(cleanup_duration)
height_difference = prep_height - wetting_height
height_difference = max(0, height_difference)
with sdc.position.sync_z_step(height=height_difference, speed=stage_speed):
# counterpump slower to fill the droplet
print("filling droplet")
counter_flowrate = fill_rate * fill_counter_ratio
self.continuousFlow(fill_rate, channel=Channel.SOURCE.value)
self.continuousFlow(-fill_rate, channel=Channel.LOOP.value)
self.continuousFlow(-counter_flowrate, channel=Channel.DUMP.value)
fill_start = time.time()
if fill_time is None:
input("*filling droplet*: press enter to continue...")
else:
time.sleep(fill_time)
fill_time = time.time() - fill_start
# drop down to wetting height
# counterpump faster to shrink the droplet
print("shrinking droplet")
shrink_flowrate = fill_rate * shrink_counter_ratio
self.continuousFlow(-shrink_flowrate, channel=Channel.DUMP.value)
shrink_start = time.time()
if shrink_time is None:
input("*shrinking droplet*: press enter to continue...")
else:
time.sleep(shrink_time)
shrink_time = time.time() - shrink_start
print("equalizing differential pumping rate")
self.continuousFlow(fill_rate, channel=Channel.SOURCE.value)
self.continuousFlow(-fill_rate, channel=Channel.LOOP.value)
self.continuousFlow(-fill_rate, channel=Channel.DUMP.value)
# drop down to contact height
# instructions['fill_time'] = fill_time
# instructions['shrink_time'] = shrink_time
time.sleep(3)
# purge...
print("purging solution")
self.continuousFlow(6.0, channel=Channel.SOURCE.value)
self.continuousFlow(-6.0, channel=Channel.LOOP.value)
self.continuousFlow(-6.0, channel=Channel.DUMP.value)
time.sleep(60)
# reverse the loop direction
self.continuousFlow(6.0, channel=Channel.LOOP.value)
time.sleep(3)
# disable source and dump
self.stop(channel=Channel.SOURCE.value)
self.stop(channel=Channel.DUMP.value)
# step to target flow rate
self.continuousFlow(target_rate, channel=Channel.LOOP.value)
self.continuousFlow(-2.0, channel=Channel.NEEDLE.value)
# message = f"contact routine with {json.dumps(locals())}"
# print(message)
print(locals)
return
| 6,330
| 35.595376
| 106
|
py
|
autoSDC
|
autoSDC-master/scripts/calibrate_ismatec.py
|
import sys
sys.path.append(".")
import json
import time
import numpy as np
import pandas as pd
from scipy import stats
from asdc.sdc import utils
from asdc.sdc import microcontroller
pump = microcontroller.PeristalticPump()
proportion = np.linspace(0, 0.6, 0.05)
volts_in, volts_out, sem_volts_out = [], [], []
pump.start()
for p in proportion:
volts_in.append(p * 3.3)
pump.set_flow_proportion(p)
v = []
for iteration in range(5):
time.sleep(1)
v.append(pump.get_flow())
volts_out.append(np.mean(v))
sem_volts_out.append(stats.sem(v))
pump.stop()
df = pd.DataFrame(
{
"proportion": proportion,
"volts_in": volts_in,
"volts_out": volts_out,
"sem_volts_out": sem_volts_out,
}
)
df.to_csv("ismatec_calib.csv")
| 799
| 17.181818
| 47
|
py
|
autoSDC
|
autoSDC-master/scripts/test_pH_target.py
|
import sys
import numpy as np
sys.path.append(".")
from asdc import sdc
def test_pump_array():
print("connecting to pumps...")
p = sdc.pump.PumpArray(port="COM6")
p.print_config()
p.run_all()
for setpoint in [2.0, 3.0, 4.0, 5.0]:
print("setpoint pH:", setpoint)
p.set_pH(setpoint=setpoint)
input("Press enter to continue to the next pH setpoint...")
p.stop_all()
if __name__ == "__main__":
test_pump_array()
| 469
| 17.076923
| 67
|
py
|
autoSDC
|
autoSDC-master/scripts/copper_pourbaix.py
|
import sys
import click
import numpy as np
from ruamel import yaml
sys.path.append(".")
from asdc import sdc
@click.command()
@click.argument("config-file", type=click.Path())
def copper_pourbaix(config_file):
with open(config_file, "r") as f:
config = yaml.safe_load(f)
solutions = config.get("solutions")
print("connecting to pumps...")
p = sdc.pump.PumpArray(solutions, port="COM6")
p.print_config()
p.run_all()
for setpoint in [2.0, 3.0, 4.0, 5.0]:
print("setpoint pH:", setpoint)
p.set_pH(setpoint=setpoint)
input("Press enter to continue to the next pH setpoint...")
p.stop_all()
if __name__ == "__main__":
copper_pourbaix()
| 711
| 18.777778
| 67
|
py
|
autoSDC
|
autoSDC-master/scripts/k20_single_objective.py
|
#!/usr/bin/env python
import os
import sys
import click
import gpflow
import numpy as np
import pandas as pd
from scipy import stats
from ruamel import yaml
from sklearn import metrics
import ternary
import matplotlib.pyplot as plt
from asdc import analyze
from asdc import emulation
from asdc import visualization
opt = gpflow.training.ScipyOptimizer()
def probability_of_improvement(mu, var, current_best=None, minimize=True):
""" probability of improvement: default to minimization """
dist = stats.norm(mu.flat, np.sqrt(var).flat)
if minimize:
poi = dist.cdf(current_best)
else:
poi = 1 - dist.cdf(current_best)
return poi
def confidence_bound(mu, var, current_best, kappa=2, minimize=True):
""" confidence bound acquisition """
if minimize:
bound = -(mu.flat - kappa * np.sqrt(var.flat))
else:
bound = mu.flat + kappa * np.sqrt(var.flat)
return bound
def random(mu, var, current_best, minimize=True):
""" acquisition stub for random acquisition function
just draw acquisition function from standard gaussian distribution
"""
N, _ = mu.shape
return np.random.normal(0, 1, N)
def setup_acquisition(config):
""" return a closure wrapping specific parameters of acquisition function... """
a = config["acquisition"]
if a["strategy"] == "cb":
def acquisition(mu, var, current_best, minimize=True):
return confidence_bound(
mu,
var,
current_best,
kappa=config["acquisition"]["kappa"],
minimize=minimize,
)
return acquisition
elif a["strategy"] == "random":
return random
elif a["strategy"] == "pi":
return probability_of_improvement
@click.command()
@click.argument("config-file", type=click.Path())
def k20_single_objective(config_file):
""" optimize a single-objective function emulated by a GP fit to experimental data """
with open(config_file, "r") as f:
config = yaml.safe_load(f)
model_dir, _ = os.path.split(config_file)
fig_dir = os.path.join(model_dir, "figures")
os.makedirs(fig_dir, exist_ok=True)
print(config)
c = config["emulator"]
em = emulation.ExperimentEmulator(c["datafile"], components=c["components"])
task = config["task"]
target = task["target"]
evaluate_candidates = setup_acquisition(config)
# set up a discrete grid of samples to optimize over...
# randomize the grid because np.argmax takes the first value in memory order
# if there are degenerate values
domain = emulation.simplex_grid(task["domain_resolution"], buffer=task["buffer"])
domain = domain[np.random.permutation(domain.shape[0])]
visualization.ternary_scatter(domain, em(domain, target=target), label=target)
plt.savefig(
os.path.join(fig_dir, f"target_function_{target}.png"), bbox_inches="tight"
)
plt.clf()
# find max
# s = emulation.simplex_grid(200, buffer=0.05)
true_mu = em(domain, target=target)
max_value = true_mu.max()
max_value = true_mu.min()
print(max_value)
# initialize
queries = []
s = emulation.simplex_grid(2, buffer=task["buffer"])
v = em(s, target=target)
mae, r2, ev = [], [], []
for query_idx in range(task["budget"]):
# draw a picture
visualization.ternary_scatter(s, v)
plt.savefig(
os.path.join(fig_dir, f"measured_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
# fit the surrogate model
m = emulation.model_ternary(s, v[:, None])
opt.minimize(m)
mu, var = m.predict_y(domain[:, :-1])
# assess regret...
if task["minimize"]:
current_best = v.min()
print(f"query {query_idx}: {min_value - current_best}")
else:
current_best = v.max()
print(f"query {query_idx}: {max_value - current_best}")
# evaluate predictive accuracy
mae.append(np.mean(np.abs(true_mu - mu)))
r2.append(metrics.r2_score(true_mu.flat, mu.flat))
ev.append(metrics.explained_variance_score(true_mu.flat, mu.flat))
print("MAE", mae[-1])
print("R2", r2[-1])
print("EV", ev[-1])
plt.scatter(true_mu.flat, mu.flat)
plt.plot(
(true_mu.min(), true_mu.max()),
(true_mu.min(), true_mu.max()),
linestyle="--",
color="k",
)
plt.savefig(
os.path.join(fig_dir, f"parity_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
# draw the extrapolations
visualization.ternary_scatter(domain, mu.flat, label=target)
plt.savefig(
os.path.join(fig_dir, f"surrogate_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
acquisition = evaluate_candidates(
mu, var, current_best, minimize=task["minimize"]
)
acquisition[queries] = -np.inf
visualization.ternary_scatter(domain, acquisition, label="acquisition")
plt.savefig(
os.path.join(fig_dir, f"acquisition_{target}_{len(queries):02d}.png"),
bbox_inches="tight",
)
plt.clf()
# update the dataset
queries.append(np.argmax(acquisition))
query = domain[queries[-1]][None, :]
s = np.vstack((s, query))
v = np.hstack((v, em(query, target=target)))
# draw a picture
visualization.ternary_scatter(s, v)
plt.savefig(
os.path.join(fig_dir, f"measured_{target}_{len(queries)}.png"),
bbox_inches="tight",
)
plt.clf()
plt.plot(3 + np.arange(task["budget"]), mae)
plt.savefig(os.path.join(fig_dir, f"mae_{target}.png"), bbox_inches="tight")
plt.clf()
plt.plot(3 + np.arange(task["budget"]), r2)
plt.savefig(os.path.join(fig_dir, f"R2_{target}.png"), bbox_inches="tight")
plt.clf()
plt.plot(3 + np.arange(task["budget"]), ev)
plt.savefig(
os.path.join(fig_dir, f"explained_variance_{target}.png"), bbox_inches="tight"
)
plt.clf()
return
if __name__ == "__main__":
k20_single_objective()
| 6,297
| 28.568075
| 90
|
py
|
autoSDC
|
autoSDC-master/asdc/webcam.py
|
import time
import typing
import argparse
import cv2, platform
# import numpy as np
# https://stackoverflow.com/questions/26691189/how-to-capture-video-stream-with-opencv-python
def imagecap(camera_device_index: int = 2):
cap = cv2.VideoCapture(camera_device_index)
time.sleep(1)
if not cap:
print("!!! Failed VideoCapture: invalid parameter!")
while True:
# Capture frame-by-frame
ret, frame = cap.read()
h, w, c = frame.shape
if type(frame) == type(None):
print("!!! Couldn't read frame!")
break
frame = cv2.drawMarker(
frame, (w // 2, h // 2), (0, 0, 0), markerSize=800, thickness=2
)
# Display the resulting frame
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SDC camera client")
parser.add_argument("id", type=int, nargs="?", default=2, help="camera index")
args = parser.parse_args()
imagecap(args.id)
| 1,148
| 25.113636
| 93
|
py
|
autoSDC
|
autoSDC-master/asdc/epics.py
|
import os
import re
import sys
import time
import shutil
import typing
import pathlib
import subprocess
# the epics channel used to trigger x-ray measurements
PV = "XF:06BM-ES:1{Sclr:1}.NM29"
XRF_DIR = "/home/xf06bm/Data/Visitors/Howard\ Joress/2020-01-22"
PILATUS_DIR = "/nist/xf06bm/experiments/XAS/Pilatus/Howard_Joress"
EPICS_VERSION = "CA-3.15.6"
# keep Epics DLLs and executables under ${REPOSITORY_ROOT}/lib
SDC_LIB = pathlib.Path(__file__).resolve().parent.parent / "lib"
EPICS_LIB = SDC_LIB / EPICS_VERSION
EPICS_EXTRA_BIN = pathlib.Path("home/phoebus/ANJ/win32/bin")
sys.path.append(EPICS_LIB)
GIT_BIN = pathlib.Path("/Program Files/Git/usr/bin").resolve()
SCP = str(GIT_BIN / "scp")
print(EPICS_LIB)
CA_GET = str(EPICS_LIB / "caget.exe")
CA_PUT = str(EPICS_LIB / "caput.exe")
CA_REPEATER = str(EPICS_LIB / EPICS_EXTRA_BIN / "caRepeater.exe")
print(f"subprocess.Popen(['{CA_REPEATER}'])")
if os.path.isfile(CA_REPEATER):
subprocess.Popen([CA_REPEATER])
def caget(key: str) -> str:
response = subprocess.check_output([CA_GET, key]).decode()
m = re.match(key, response)
start, end = m.span()
return response[end:].strip()
def caput(key: str, value: str):
response = subprocess.check_output([CA_PUT, key, value])
print(response)
return response
def scp_get_files(pattern: str, remotehost: str = "6bm", dest="./"):
"""grab file(s) with scp.
make sure `remotehost` is included in `~/.ssh/config`, and that the
`localhost` machine can use the `publickey` method to access `remotehost`
"""
print(SCP)
print(pattern)
print(f"{remotehost}:{pattern}")
subprocess.check_call([SCP, f"{remotehost}:{pattern}", dest])
def dispatch_xrays(name, data_dir):
""" signal to 06-BM automation tooling to perform XRF and XRD measurements """
os.makedirs(data_dir, exist_ok=True)
caput(PV, name)
time.sleep(5)
while True:
time.sleep(1)
response = caget(PV)
if response == "":
break
print("fetching the data")
scp_get_files(f"{XRF_DIR}/{name}*", remotehost="6bm", dest=data_dir)
scp_get_files(f"{PILATUS_DIR}/{name}*", remotehost="6bm", dest=data_dir)
return
| 2,202
| 25.865854
| 82
|
py
|
autoSDC
|
autoSDC-master/asdc/dashboard.py
|
import hvplot.pandas
import asyncio
import zmq.asyncio
import pandas as pd
from datetime import datetime
from functools import partial
import streamz
import hvplot.streamz
from streamz.dataframe import DataFrame
import panel as pn
import holoviews as hv
from bokeh.plotting import curdoc
from holoviews.streams import Pipe, Buffer
hv.extension("bokeh")
# set up ZMQ subscriber socket
DASHBOARD_PORT = 2345
DASHBOARD_ADDRESS = "127.0.0.1"
context = zmq.asyncio.Context.instance()
socket = context.socket(zmq.SUB)
socket.connect(f"tcp://{DASHBOARD_ADDRESS}:{DASHBOARD_PORT}")
socket.setsockopt(zmq.SUBSCRIBE, b"")
# use a streamz.DataFrame to stream data from zmq socket
source = streamz.Stream()
# example = pd.DataFrame({'pH': [], 'temperature': []})
example = pd.DataFrame({"current": [], "potential": [], "elapsed_time": []})
df = DataFrame(source, example=example)
# df = source.to_dataframe(example=example).window(1000).full()
# pipe = Pipe(data=example)
# source.sliding_window(2).map(pd.concat).sink(pipe.send) # Connect streamz to the Pipe
async def loop():
""" stream pandas dataframe chunks from zmq socket... """
while True:
new_data = await socket.recv_pyobj()
source.emit(new_data)
# run the zmq client loop in the background
asyncio.create_task(loop())
# set up pyviz.panel layout
options = dict(width=800, height=300, show_grid=True, xlabel="elapsed time (s)")
pn.Column(
# pn.panel(hv.DynamicMap(hv.Curve, streams=[Buffer(df.pH)]).opts(title='pH', **options)),
# pn.panel(hv.DynamicMap(hv.Curve, streams=[Buffer(df.temperature)]).opts(title='temperature', **options))
pn.panel(
hv.DynamicMap(
partial(hv.Curve, kdims=["elapsed_time"], vdims=["potential"]),
streams=[Buffer(df, index=False, length=4000)],
).opts(title="pH", **options)
),
pn.panel(
hv.DynamicMap(
partial(hv.Curve, kdims=["potential"], vdims=["current"]),
streams=[Buffer(df, index=False, length=4000)],
).opts(title="temperature", **options)
)
# pn.panel(df.hvplot(x='potential', y='current', backlog=1000).opts(title='echem', **options)),
# pn.panel(hv.DynamicMap(hv.Curve, streams=[pipe]).opts(title='temperature', **options))
).servable()
| 2,273
| 31.956522
| 110
|
py
|
autoSDC
|
autoSDC-master/asdc/gp_deposition.py
|
""" GP deposition -- deposit duplicate samples, performing a corrosion test on the second """
import os
import sys
import json
import time
import click
import asyncio
import dataset
import functools
import numpy as np
import pandas as pd
from ruamel import yaml
from aioconsole import ainput
from typing import Any, List, Dict, Optional, Tuple
import matplotlib.pyplot as plt
import gpflow
import gpflowopt
from gpflowopt import acquisition
from scipy import stats
from scipy import spatial
from scipy import integrate
from datetime import datetime
sys.path.append(".")
import cycvolt
from asdc import _slack
from asdc import slackbot
from asdc import analyze
from asdc import emulation
from asdc import visualization
from asdc import characterization
import enum
Action = enum.Enum("Action", ["QUERY", "REPEAT", "PHOTONS", "CORRODE"])
BOT_TOKEN = open("slack_bot_token.txt", "r").read().strip()
SDC_TOKEN = open("slacktoken.txt", "r").read().strip()
def deposition_instructions(query, experiment_id=0):
""" TODO: do something about deposition duration.... """
# query = pd.Series({'Ni_fraction': guess[0], 'metal_fraction': guess[1], 'potential': guess[2]})
total_flow = 0.15
relative_rates = {
"KCl": (1 - query["metal_fraction"]),
"NiCl2": query["metal_fraction"] * query["Ni_fraction"],
"ZnCl2": query["metal_fraction"] * (1 - query["Ni_fraction"]),
}
rates = {key: value * total_flow for key, value in relative_rates.items()}
instructions = [
{"intent": "deposition", "experiment_id": experiment_id},
{"op": "set_flow", "rates": rates, "hold_time": 180},
{
"op": "potentiostatic",
"potential": query["potential"],
"duration": 600,
"current_range": "2MA",
},
{"op": "post_flush", "rates": {"H2O": 1.0}, "duration": 120},
]
return instructions
def characterize_instructions(experiment_id=0):
return [
{"intent": "characterize", "experiment_id": experiment_id},
{"op": "photons"},
]
def corrosion_instructions(experiment_id=0):
instructions = [
{"intent": "corrosion", "experiment_id": experiment_id},
{"op": "set_flow", "rates": {"NaCl": 0.1}, "hold_time": 120},
{
"op": "lpr",
"initial_potential": -0.03,
"final_potential": 0.03,
"step_size": 0.0005,
"step_time": 1.0,
"current_range": "2MA",
},
# {
# "op": "lsv",
# "initial_potential": -1.0,
# "final_potential": 0.5,
# "scan_rate": 0.075,
# "current_range": "2MA"
# },
{"op": "post_flush", "rates": {"H2O": 0.5}, "duration": 90},
]
return instructions
def exp_id(db):
"""we're running two depositions followed by a corrosion experiment
return 0 if it's time for the first deposition
1 if it's time for the second deposition
2 if it's time for corrosion
"""
deps = db["experiment"].count(intent="deposition")
cors = db["experiment"].count(intent="corrosion")
if cors == 0:
phase = deps
else:
phase = deps % cors
if phase in (0, 1):
intent = "deposition"
else:
intent = "corrosion"
if phase == 0:
fit_gp = True
else:
fit_gp = False
return intent, fit_gp
def select_action(db, run_replicates=True, threshold=0.9):
"""run two depositions, followed by a corrosion experiment if the deposits are acceptable."""
prev_id = db["experiment"].count()
prev = db["experiment"].find_one(id=prev_id)
if prev["intent"] == "corrosion":
return Action.QUERY
elif prev["intent"] == "deposition":
n_repeats = db["experiment"].count(experiment_id=prev["experiment_id"])
if n_repeats == 1:
# logic to skip replicate based on quality goes here...
return Action.REPEAT
elif n_repeats == 2:
# if coverage is good enough, run a corrosion measurement.
session = pd.DataFrame(
db["experiment"].find(experiment_id=prev["experiment_id"])
)
min_coverage = session["coverage"].min()
if min_coverage >= threshold:
target = pd.DataFrame(
db["experiment"].find(experiment_id=prev["experiment_id"])
)
target = target[~(target["has_bubble"] == True)]
if target.shape[0] == 0:
print("no replicates without bubbles...")
return Action.QUERY
else:
target = target.iloc[0]
print(f"good coverage ({min_coverage})")
print("target", target["id"])
pos = {"x": target["x_combi"], "y": target["y_combi"]}
return Action.CORRODE
else:
print(f"poor coverage ({min_coverage})")
return Action.QUERY
def select_action_single(db, run_replicates=True, threshold=0.9):
"""run single depositions, followed by a corrosion experiment if the deposits are acceptable."""
prev_id = db["experiment"].count()
prev = db["experiment"].find_one(id=prev_id)
if prev["intent"] == "corrosion":
return Action.QUERY
elif prev["intent"] == "deposition":
expt = db["experiment"].find_one(experiment_id=prev["experiment_id"])
if expt["image_name"] is None:
return Action.PHOTONS
return Action.CORRODE
def load_cv(row, data_dir="data", segment=2, half=True, log=True):
""" load CV data and process it... """
cv = pd.read_csv(os.path.join(data_dir, row["datafile"]), index_col=0)
sel = cv["segment"] == segment
I = cv["current"][sel].values
V = cv["potential"][sel].values
t = cv["elapsed_time"][sel].values
if half:
# grab the length of the polarization curve
n = I.size // 2
I = I[:n]
V = V[:n]
if log:
I = cycvolt.analyze.log_abs_current(I)
return V, I, t - t[0]
def deposition_flow_rate(ins):
i = json.loads(ins)
try:
return i[0]["rates"]["CuSO4"]
except KeyError:
return None
def deposition_potential(df):
p = []
for idx, row in df.iterrows():
if row["intent"] == "deposition":
instructions = json.loads(row["instructions"])
for instruction in json.loads(row["instructions"]):
if instruction.get("op") == "potentiostatic":
p.append(instruction.get("potential"))
elif row["intent"] == "corrosion":
p.append(None)
return p
def load_experiment_files(csv_files, dir="."):
dir, _ = os.path.split(dir)
file = os.path.join(dir, csv_file)
if os.path.isfile(file):
experiments = pd.concat(
(pd.read_csv(file, index_col=0) for csv_file in csv_files),
ignore_index=True,
)
else:
experiments = []
return experiments
def load_experiment_json(experiment_files, dir="."):
""" an experiment file contains a json list of experiment definitions """
dir, _ = os.path.split(dir)
experiments = None
for experiment_file in experiment_files:
p = os.path.join(dir, experiment_file)
if os.path.isfile(p):
with open(p, "r") as f:
if experiments is None:
experiments = json.load(f)
else:
experiments.append(json.load(f))
else:
experiments = []
return experiments
def confidence_bound(model, candidates, sign=1, cb_beta=0.25):
# set per-model confidence bound beta
# default to lower confidence bound
t = model.X.shape[0]
cb_weight = cb_beta * np.log(2 * t + 1)
mean, var = model.predict_y(candidates)
criterion = (sign * mean) - cb_weight * np.sqrt(var)
return criterion
def composition_loss_confidence_bound(model, candidates, target, sign=1, cb_beta=0.25):
# set per-model confidence bound beta
# default to lower confidence bound
t = model.X.shape[0]
cb_weight = cb_beta * np.log(2 * t + 1)
mean, var = model.predict_y(candidates)
composition_loss = np.abs(mean - target)
criterion = composition_loss - cb_weight * np.sqrt(var)
return criterion
def classification_criterion(model, candidates):
""" compute the classification criterion from 10.1007/s11263-009-0268-3 """
loc, scale = model.predict_f(candidates)
criterion = np.abs(loc) / np.sqrt(scale + 0.001)
return criterion
def plot_map(vals, X, guess, extent, figpath):
plt.figure(figsize=(4, 4))
plt.imshow(vals, cmap="Blues", extent=extent, origin="lower")
plt.colorbar()
plt.scatter(X[:, 0], X[:, 1], color="k")
plt.scatter(guess[0], guess[1], color="r")
if "coverage" in figpath:
plt.contour(vals, levels=[0.5], extent=extent, colors="k", linestyles="--")
plt.xlim(extent[0], extent[1])
plt.ylim(extent[2], extent[3])
plt.xlabel("flow rate")
plt.ylabel("potential")
plt.tight_layout()
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
def filter_experiments(instructions, num_previous):
""" filter out instructions files -- count only operations that add rows to the db """
expt_count = 0
if num_previous == 0:
return instructions
for idx, expt in enumerate(instructions):
intent = expt[0].get("intent")
if intent in ("deposition", "corrosion"):
if expt_count == num_previous:
break
expt_count += 1
print(expt_count, intent)
return instructions[idx:]
class Controller(slackbot.SlackBot):
""" autonomous scanning droplet cell client """
command = slackbot.CommandRegistry()
def __init__(self, config=None, verbose=False, logfile=None, token=BOT_TOKEN):
super().__init__(name="ctl", token=token)
self.command.update(super().command)
self.msg_id = 0
# self.update_event = asyncio.Event(loop=self.loop)
self.verbose = verbose
self.logfile = logfile
self.confirm = config.get("confirm", True)
self.notify = config.get("notify_slack", True)
self.data_dir = config.get("data_dir", os.getcwd())
self.figure_dir = config.get("figure_dir", os.getcwd())
self.domain_file = config.get("domain_file")
self.coverage_threshold = config.get("coverage_threshold", 0.9)
self.repeat_depositions = config.get("repeat_depositions", False)
self.db_file = os.path.join(self.data_dir, config.get("db_file", "test.db"))
self.db = dataset.connect(f"sqlite:///{self.db_file}")
self.experiment_table = self.db["experiment"]
self.targets = pd.read_csv(config["target_file"], index_col=0)
instructions = load_experiment_json(
config["experiment_file"], dir=self.data_dir
)
# remove experiments if there are records in the database
num_previous = self.db["experiment"].count()
self.experiments = filter_experiments(instructions, num_previous)
# gpflowopt minimizes objectives...
# UCB switches to maximizing objectives...
# classification criterion: minimization
# confidence bound using LCB variant
# swap signs for things we want to maximize (just polarization_resistance...)
self.objectives = ("Ni_loss", "Ni_variance", "polarization_resistance")
self.objective_alphas = [3, 2, 1]
# self.objective_alphas = [1, 1, 1]
self.sgn = np.array([1, 1, -1])
# set up the optimization domain
with open(os.path.join(self.data_dir, os.pardir, self.domain_file), "r") as f:
domain_data = json.load(f)
dmn = domain_data["domain"]["x1"]
# self.levels = [
# np.array([0.030, 0.050, 0.10, 0.30]),
# np.linspace(dmn['min'], dmn['max'], 50)
# ]
self.levels = [
np.linspace(0.0, 1.0, 100), # Ni fraction in solution
np.linspace(0.1, 1.0, 100), # metal fraction in solution
np.linspace(-1.5, -1.3, 50), # deposition potential
]
self.ndim = [len(l) for l in self.levels][::-1]
self.extent = [
np.min(self.levels[0]),
np.max(self.levels[0]),
np.min(self.levels[1]),
np.max(self.levels[1]),
]
xx, yy, zz = np.meshgrid(self.levels[0], self.levels[1], self.levels[2])
self.candidates = np.c_[xx.flatten(), yy.flatten(), zz.flatten()]
async def dm_sdc(self, web_client, text, channel="#asdc"):
# channel='DHY5REQ0H'):
web_client.chat_postMessage(
channel=channel,
text=text,
token=SDC_TOKEN,
as_user=False,
username="ctl",
icon_emoji=":robot_face:",
)
def load_experiment_indices(self):
# indices start at 0...
# sqlite integer primary keys start at 1...
df = pd.DataFrame(self.experiment_table.all())
target_idx = self.experiment_table.count()
experiment_idx = self.experiment_table.count(flag=False)
return df, target_idx, experiment_idx
def analyze_corrosion_features(self, segment=0):
rtab = self.db.get_table("result", primary_id=False)
for row in self.db["experiment"].all(intent="corrosion"):
# extract features for any data that's missing
if rtab.find_one(id=row["id"]):
continue
d = {"id": row["id"]}
# V, log_I, t = load_cv(row, data_dir=self.data_dir, segment=segment)
# cv_features, fit_data = cycvolt.analyze.model_polarization_curve(
# V, log_I, smooth=False, lm_method=None, shoulder_percentile=0.99
# )
# d.update(cv_features)
# d['passive_region'] = d['V_tp'] - d['V_pass']
V, I, t = load_cv(
row, data_dir=self.data_dir, segment=segment, log=False, half=False
)
d["integral_current"] = np.abs(integrate.trapz(I, t))
d["ts"] = datetime.now()
rtab.upsert(d, ["id"])
return
def random_scalarization_cb(self, models, candidates, cb_beta=0.25):
"""random scalarization acquisition policy function
depending on model likelihood, use different policy functions for different outputs
each criterion should be framed as a minimization problem...
"""
objective = np.zeros(candidates.shape[0])
# sample one set of weights from a dirichlet distribution
# that specifies our general preference on the objective weightings
weights = stats.dirichlet.rvs(self.objective_alphas).squeeze()
# weights = [0.0, 1.0]
mask = None
criteria = []
for idx, model in enumerate(models):
sign = self.sgn[idx]
if idx == 0:
# first model is the composition model -- target 15% Ni!
criterion = composition_loss_confidence_bound(
model, candidates, target=0.15, sign=sign, cb_beta=cb_beta
)
elif model.likelihood.name in ("Gaussian", "Beta"):
criterion = confidence_bound(
model, candidates, sign=sign, cb_beta=cb_beta
)
elif model.likelihood.name == "Bernoulli":
criterion = classification_criterion(model, candidates)
y_loc, _ = model.predict_y(candidates)
mask = (y_loc > 0.5).squeeze()
criteria.append(criterion.squeeze())
objective = np.zeros_like(criteria[0])
for weight, criterion in zip(weights, criteria):
if mask is not None:
criterion[~mask] = np.inf
drange = np.ptp(criterion[np.isfinite(criterion)])
criterion = (criterion - criterion.min()) / drange
objective += weight * criterion
return objective
def gp_acquisition(self, resolution=100, t=0):
df = characterization.load_characterization_results(self.db_file)
Ni_fraction = df["NiCl2"] / (df["NiCl2"] + df["ZnCl2"])
metal_fraction = (df["NiCl2"] + df["ZnCl2"]) / (
df["NiCl2"] + df["ZnCl2"] + df["KCl"]
)
X = np.vstack((Ni_fraction, metal_fraction, df["potential"])).T
Ni = df["Ni_ratio"].values[:, None]
Ni_variance = df["Ni_variance"].values[:, None]
pr = df["polarization_resistance"].values[:, None]
# reset tf graph -- long-running program!
gpflow.reset_default_graph_and_session()
# set up models
dx = 0.25 * np.ptp(self.candidates)
models = [
emulation.model_quality(X, Ni, dx=dx, likelihood="beta", optimize=True),
emulation.model_property(X, Ni_variance, dx=dx, optimize=True),
emulation.model_property(X, pr, dx=dx, optimize=True),
]
# evaluate the acquisition function on a grid
# acq = criterion.evaluate(candidates)
acq = self.random_scalarization_cb(models, self.candidates)
# remove previously measured candidates
mindist = spatial.distance.cdist(X, self.candidates).min(axis=0)
acq[mindist < 1e-5] = np.inf
query_idx = np.argmin(acq)
guess = self.candidates[query_idx]
query = pd.Series(
{"Ni_fraction": guess[0], "metal_fraction": guess[1], "potential": guess[2]}
)
print(query)
return query
@command
async def go(self, args: str, msgdata: Dict, web_client: Any):
"""keep track of target positions and experiment list
target and experiment indices start at 0
sqlite integer primary keys start at 1...
"""
previous_op = self.db["experiment"].find_one(id=self.db["experiment"].count())
print(previous_op)
if len(self.experiments) > 0:
instructions = self.experiments.pop(0)
intent = instructions[0].get("intent")
fit_gp = False
if intent == "deposition":
# correctly handle "double-tap" protocol
# assume a deposition op following another deposition is a repeat
if previous_op is None:
action = Action.QUERY
elif (self.repeat_depositions == True) and (
previous_op["intent"] == "deposition"
):
action = Action.REPEAT
else:
action = Action.QUERY
elif intent == "characterize":
action = Action.PHOTONS
elif intent == "corrosion":
action = Action.CORRODE
else:
print("selecting an action")
instructions = None
action = select_action_single(self.db, threshold=self.coverage_threshold)
print(action)
# intent, fit_gp = exp_id(self.db)
if action == Action.QUERY:
if previous_op is not None:
experiment_id = int(previous_op.get("experiment_id")) + 1
else:
experiment_id = 1
# march through target positions sequentially
target_idx = self.db["experiment"].count(intent="deposition")
target = self.targets.iloc[target_idx]
pos = {"x": target.x, "y": target.y}
elif action == Action.REPEAT:
experiment_id = int(previous_op.get("experiment_id"))
# march through target positions sequentially
target_idx = self.db["experiment"].count(intent="deposition")
target = self.targets.iloc[target_idx]
pos = {"x": target.x, "y": target.y}
elif action == Action.PHOTONS:
pos = None
experiment_id = int(previous_op.get("experiment_id"))
elif action == Action.CORRODE:
if previous_op is None:
experiment_id = 1
elif previous_op["intent"] == "deposition":
experiment_id = int(previous_op.get("experiment_id"))
else:
# if we are only doing corrosions...
experiment_id = int(previous_op.get("experiment_id")) + 1
# if action is Action.CORRODE, select a target without a bubble to corrode
if action == Action.CORRODE:
if previous_op["intent"] == "corrosion":
count = self.db["experiment"].count(intent="corrosion")
target = self.targets.iloc[count]
else:
targets = pd.DataFrame(
self.db["experiment"].find(experiment_id=experiment_id)
)
try:
target = targets[~(targets["has_bubble"] == True)].iloc[0]
except KeyError:
target = targets.iloc[0]
try:
pos = {"x": target["x_combi"], "y": target["y_combi"]}
except KeyError:
pos = {"x": target["x"], "y": target["y"]}
if instructions is None:
print("get instructions")
# get the next instruction set
if action == Action.QUERY:
query = self.gp_acquisition(t=experiment_id)
instructions = deposition_instructions(
query, experiment_id=experiment_id
)
elif action == Action.REPEAT:
instructions = json.loads(previous_op["instructions"])
instructions = [
{"intent": "deposition", "experiment_id": experiment_id}
] + instructions
elif action == Action.PHOTONS:
instructions = characterize_instructions(experiment_id=experiment_id)
elif action == Action.CORRODE:
instructions = corrosion_instructions(experiment_id=experiment_id)
# update the intent block to include the target position
if pos is not None:
instructions[0].update(pos)
instructions[0].update({"experiment_id": experiment_id})
print(instructions)
# send the experiment command
if action in {Action.QUERY, Action.REPEAT, Action.CORRODE}:
await self.dm_sdc(
web_client, f"<@UHT11TM6F> run_experiment {json.dumps(instructions)}"
)
elif action == Action.PHOTONS:
await self.dm_sdc(
web_client,
f"<@UHT11TM6F> run_characterization {json.dumps(instructions)}",
)
return
# @command
# async def update(self, args: str, msgdata: Dict, web_client: Any):
# update_type, rest = args.split(' ', 1)
# print(update_type)
# self.update_event.set()
# return
@command
async def dm(self, args: str, msgdata: Dict, web_client: Any):
""" echo random string to DM channel """
dm_channel = "DHY5REQ0H"
# dm_channel = 'DHNHM74TU'
web_client.chat_postMessage(
channel=dm_channel,
text=args,
as_user=False,
username="ctl",
token=CTL_TOKEN,
)
@command
async def abort_running_handlers(self, args: str, msgdata: Dict, web_client: Any):
"""cancel all currently running task handlers...
WARNING: does not do any checks on the potentiostat -- don't call this while an experiment is running...
we could register the coroutine address when we start it up, and broadcast that so it's cancellable...?
"""
channel = "<@UC537488J>"
text = f"sdc: {msgdata['username']} said abort_running_handlers"
print(text)
# dm UC537488J (brian)
web_client.chat_postMessage(
channel=channel, text=text, username="ctl", token=CTL_TOKEN
)
current_task = asyncio.current_task()
for task in asyncio.all_tasks():
if task._coro == current_task._coro:
continue
if task._coro.__name__ == "handle":
print(f"killing task {task._coro}")
task.cancel()
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--verbose/--no-verbose", default=False)
def sdc_controller(config_file, verbose):
with open(config_file, "r") as f:
config = yaml.safe_load(f)
experiment_root, _ = os.path.split(config_file)
# specify target file relative to config file
target_file = config.get("target_file")
config["target_file"] = os.path.join(experiment_root, target_file)
data_dir = config.get("data_dir")
if data_dir is None:
config["data_dir"] = os.path.join(experiment_root, "data")
figure_dir = config.get("figure_dir")
if figure_dir is None:
config["figure_dir"] = os.path.join(experiment_root, "figures")
os.makedirs(config["data_dir"], exist_ok=True)
os.makedirs(config["figure_dir"], exist_ok=True)
if config["step_height"] is not None:
config["step_height"] = abs(config["step_height"])
# logfile = config.get('command_logfile', 'commands.log')
logfile = "controller.log"
logfile = os.path.join(config["data_dir"], logfile)
ctl_bot = Controller(verbose=verbose, config=config, logfile=logfile)
asyncio.run(ctl_bot.main())
if __name__ == "__main__":
sdc_controller()
| 25,649
| 32.529412
| 112
|
py
|
autoSDC
|
autoSDC-master/asdc/visualization.py
|
import os
import numpy as np
import ternary
from ternary.helpers import simplex_iterator
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
import warnings
warnings.simplefilter("ignore", UserWarning)
sys.coinit_flags = 2
import asdc.analyze
def colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return fig.colorbar(mappable, cax=cax)
def plot_iv(I, V, figpath="iv.png"):
plt.plot(np.log10(np.abs(I)), V)
plt.xlabel("log current")
plt.ylabel("voltage")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_lpr(I, V, figpath="lpr.png"):
plt.plot(I, V)
plt.axvline(0, color="k")
plt.xlabel("current")
plt.ylabel("potential")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_vi(I, V, figpath="iv.png"):
plt.plot(V, np.log10(np.abs(I)))
plt.ylabel("log current")
plt.xlabel("voltage")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_v(t, V, figpath="v.png"):
n = min(len(t), len(I))
plt.plot(t[:n], V[:n])
plt.xlabel("time (s)")
plt.ylabel("voltage")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_i(t, I, figpath="i.png"):
n = min(len(t), len(I))
plt.plot(t[:n], I[:n])
plt.xlabel("time (s)")
plt.ylabel("current (A)")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_cv(V, I, segment=None, segments=[2, 3], figpath="cv.png"):
for s in segments:
plt.plot(V[segment == s], I[segment == s], label=s)
plt.xlabel("potential (V)")
plt.ylabel("current (A)")
plt.legend()
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def make_circle(r):
t = np.arange(0, np.pi * 2.0, 0.01)
t = t.reshape((len(t), 1))
x = r * np.cos(t)
y = r * np.sin(t)
return np.hstack((x, y))
def combi_plot(show_flat=False):
"""scatter plot visualizations on a 3-inch combi wafer.
coordinate system is specified in mm
"""
R = 76.2 / 2
c = make_circle(R) # 3 inch wafer --> 76.2 mm diameter
if show_flat:
sel = c[:, 1] > -35
c = c[sel]
plt.plot(c[:, 0], c[:, 1], color="k")
def scatter_wafer(X, y, label=None, figpath="wafer_plot.png"):
fig, axes = plt.subplots(figsize=(4.5, 4))
combi_plot(show_flat=True)
p = plt.scatter(X[:, 0], X[:, 1], c=y, cmap="Blues", edgecolors="k")
plt.xlabel("x (mm)")
plt.ylabel("y (mm)")
cbar = colorbar(p)
cbar.set_label(label)
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_open_circuit(current, potential, segment, figpath="open_circuit.png"):
plt.figure(figsize=(4, 5))
model = asdc.analyze.extract_open_circuit_potential(
current, potential, segment, return_model=True
)
plt.plot(-model.data, model.userkws["x"], color="b")
plt.plot(-model.best_fit, model.userkws["x"], c="r", linestyle="--", alpha=0.5)
plt.axhline(model.best_values["peak_loc"], c="k", linestyle="--", alpha=0.5)
plt.xlabel("log current (log (A)")
plt.ylabel("potential (V)")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
return
def plot_ocp_model(x, y, ocp, gridpoints, model, query_position, figure_path=None):
N, _ = gridpoints.shape
w = int(np.sqrt(N))
mu_y, var_y = model.predict_y(gridpoints)
plt.figure(figsize=(5, 4))
combi_plot()
plt.scatter(x, y, c=ocp, edgecolors="k", cmap="Blues")
plt.axis("equal")
cmap = plt.cm.Blues
colors = Normalize(vmin=mu_y.min(), vmax=mu_y.max(), clip=True)(mu_y.flatten())
# colors = mu_y.flatten()
c = cmap(colors)
a = Normalize(var_y.min(), var_y.max(), clip=True)(var_y.flatten())
# c[...,-1] = 1-a
c[np.sqrt(np.square(gridpoints).sum(axis=1)) > 76.2 / 2, -1] = 0
c = c.reshape((w, w, 4))
extent = (
np.min(gridpoints),
np.max(gridpoints),
np.min(gridpoints),
np.max(gridpoints),
)
im = plt.imshow(c, extent=extent, origin="lower", cmap=cmap)
cbar = plt.colorbar(im, extend="both")
plt.clim(mu_y.min(), mu_y.max())
plt.scatter(query_position[0], query_position[1], c="none", edgecolors="r")
plt.axis("equal")
if figure_path is not None:
plt.savefig(figure_path, bbox_inches="tight")
plt.clf()
def ternary_scatter(
composition,
value,
components=["Ni", "Al", "Ti"],
cmap="Blues",
label=None,
cticks=None,
s=50,
):
scale = 1
grid = plt.GridSpec(10, 1, wspace=1, hspace=3)
ax = plt.subplot(grid[:9, :])
filtered = value[np.isfinite(value)]
vmin, vmax = filtered.min(), filtered.max()
figure, tax = ternary.figure(scale=scale, ax=ax)
figure.set_size_inches(6, 6)
s = tax.scatter(
composition,
marker="o",
c=value,
cmap=cmap,
edgecolors="k",
vmin=vmin,
vmax=vmax,
s=s,
)
# s = tax.scatter(composition, marker='o', c=value, cmap=cmap, edgecolors='k', s=50)
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=0.1, color="k")
tax.ticks(axis="lbr", linewidth=1, multiple=0.2, tick_formats="%0.01f", offset=0.02)
tax.clear_matplotlib_ticks()
tax.get_axes().axis("off")
tax.right_corner_label(components[0], fontsize=18, offset=-0.1)
tax.top_corner_label(components[1], fontsize=18)
tax.left_corner_label(components[2], fontsize=18, offset=0.2)
ax.axis("equal")
ax = plt.subplot(grid[9:, :])
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
# norm = matplotlib.colors.Normalize(vmin=-0.9, vmax=value.max())
cb1 = matplotlib.colorbar.ColorbarBase(
ax, cmap=cmap, norm=norm, orientation="horizontal", label=label
)
if cticks is not None:
cb1.set_ticks(cticks)
# cb1.set_tick_labels([-.85, -.7, -0.5])
plt.subplots_adjust()
plt.tight_layout()
return tax
def ternary_scatter_sub(
composition,
value,
components=["Ni", "Al", "Ti"],
cmap="Blues",
label=None,
cticks=None,
s=50,
ax=None,
):
scale = 1
if ax is None:
fig, ax = plt.subplots()
# grid = plt.GridSpec(10, 1, wspace=1, hspace=3)
# ax = plt.subplot(grid[:9, :])
filtered = value[np.isfinite(value)]
vmin, vmax = filtered.min(), filtered.max()
figure, tax = ternary.figure(scale=scale, ax=ax)
# figure.set_size_inches(6, 6)
s = tax.scatter(
composition,
marker="o",
c=value,
cmap=cmap,
edgecolors="k",
vmin=vmin,
vmax=vmax,
s=s,
)
# s = tax.scatter(composition, marker='o', c=value, cmap=cmap, edgecolors='k', s=50)
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=0.1, color="k")
tax.ticks(axis="lbr", linewidth=1, multiple=0.2, tick_formats="%0.01f", offset=0.02)
tax.clear_matplotlib_ticks()
tax.get_axes().axis("off")
# tax.right_corner_label(components[0], fontsize=18, offset=0.1)
# tax.top_corner_label(components[1], fontsize=18, offset=0.1)
# tax.left_corner_label(components[2], fontsize=18, offset=0.1)
tax.bottom_axis_label(components[0], fontsize=18, offset=0.1)
tax.right_axis_label(components[1], fontsize=18, offset=0.1)
tax.left_axis_label(components[2], fontsize=18, offset=0.1)
ax.axis("equal")
# ax = plt.subplot(grid[9:,:])
# norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
# # norm = matplotlib.colors.Normalize(vmin=-0.9, vmax=value.max())
# cb1 = matplotlib.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='horizontal', label=label)
# if cticks is not None:
# cb1.set_ticks(cticks)
# # cb1.set_tick_labels([-.85, -.7, -0.5])
plt.subplots_adjust()
plt.tight_layout()
return tax
def ternary_heatmap(
model,
components=["Ni", "Al", "Ti"],
cmap="Blues",
label=None,
cticks=None,
scale=10,
plot_var=False,
nticks=5,
sample_posterior=False,
):
keys = [k for k in simplex_iterator(scale)]
# o_mu, o_var = model.predict_f(np.array(keys).astype(float) / scale)
if sample_posterior:
o_mu = model.predict_f_samples(
np.array(keys).astype(float)[:, :2] / scale, 1
).squeeze()
else:
o_mu, o_var = model.predict_f(np.array(keys).astype(float)[:, :2] / scale)
if plot_var:
v = o_var
else:
v = o_mu
tdata = {key: v for key, v in zip(keys, v.flat)}
grid = plt.GridSpec(10, 1, wspace=1, hspace=3)
ax = plt.subplot(grid[:9, :])
vmin, vmax = v.min(), v.max()
figure, tax = ternary.figure(scale=scale, ax=ax)
tax.set_axis_limits({"b": (0, 1.0), "l": (0, 1.0), "r": (0, 1.0)})
tax.get_ticks_from_axis_limits(multiple=scale / nticks)
figure.set_size_inches(6, 6)
s = tax.heatmap(tdata, scale, cmap=cmap, vmin=vmin, vmax=vmax, colorbar=False)
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=scale / nticks, color="k", alpha=0.5)
tax.clear_matplotlib_ticks()
tax.set_custom_ticks(tick_formats="%0.1f", offset=0.02)
tax.get_axes().axis("off")
tax.right_corner_label(components[0], fontsize=18, offset=-0.1)
tax.top_corner_label(components[1], fontsize=18)
tax.left_corner_label(components[2], fontsize=18, offset=0.2)
ax.axis("equal")
ax = plt.subplot(grid[9:, :])
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
# norm = matplotlib.colors.Normalize(vmin=-0.9, vmax=value.max())
cb1 = matplotlib.colorbar.ColorbarBase(
ax, cmap=cmap, norm=norm, orientation="horizontal", label=label
)
if cticks is not None:
cb1.set_ticks(cticks)
# cb1.set_tick_labels([-.85, -.7, -0.5])
plt.subplots_adjust()
plt.tight_layout()
return tax
| 10,146
| 26.350404
| 109
|
py
|
autoSDC
|
autoSDC-master/asdc/slackbot.py
|
import os
import re
import slack
import time
import asyncio
import concurrent
from datetime import datetime
# check if there is a proxy between localhost and the internet
PROXY = os.environ.get("https_proxy")
BOT_TOKEN = open("slacktoken.txt", "r").read().strip()
bot_patterns = {"sdc": "<@UHT11TM6F>", "ctl": "<@UHNHM7198>"}
class CommandRegistry(set):
""" wrap a set with a decorator to register commands """
def register(self, method):
self.add(method.__name__)
return method
def __call__(self, method):
""" overload __call__ instead of having an explicit register method """
return self.register(method)
class SlackBot(object):
command = CommandRegistry()
def __init__(self, name="sdc", token=None):
self.name = name
# self._pattern = f'@{self.name}'
self._pattern = bot_patterns.get(name) # '<@UHT11TM6F>'
if token is None:
self.token = BOT_TOKEN
else:
self.token = token
slack.RTMClient.run_on(event="message")(self.handle_message)
async def handle_message(self, **payload):
print(payload)
data = payload["data"]
text = data.get("text", "")
print(text)
bot_reference = re.match(self._pattern, text)
if bot_reference:
print("reference to bot!")
# handle bot commands
_, end = bot_reference.span()
m = text[end:]
try:
command, args = m.strip().split(None, 1)
except ValueError:
command, args = m.strip(), None
print(command)
if command not in self.command:
user = data.get("user")
r = f"sorry <@{user}>, I didn't understand what you meant by `{m.strip()}`..."
print(r)
web_client = payload["web_client"]
web_client.chat_postMessage(
channel=data["channel_id"], text=r, thread_ts=data["ts"]
)
else:
# msgdata = {'username': data['user'], 'channel': data['channel_id']}
# self.log(text)
# dispatch command method by name
# await getattr(self, command)(ws, msgdata, args)
web_client = payload["web_client"]
await getattr(self, command)(args, data, web_client)
@command
async def echo(self, text, data, web_client):
r = f"recieved command {text}"
channel = data.get("channel_id")
if channel is None:
channel = data.get("channel")
print(r)
web_client.chat_postMessage(channel=channel, text=r, thread_ts=data["ts"])
async def main(self):
if PROXY is not None:
proxy_address = f"http://{PROXY}"
else:
proxy_address = None
self.loop = asyncio.get_event_loop()
self.client = slack.RTMClient(
token=self.token, proxy=proxy_address, run_async=True, loop=self.loop
)
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
await asyncio.ensure_future(self.client.start())
if __name__ == "__main__":
bot = SlackBot()
asyncio.run(bot.main())
| 3,260
| 28.378378
| 94
|
py
|
autoSDC
|
autoSDC-master/asdc/video.py
|
import cv2
import typing
import skvideo.io
import numpy as np
import multiprocessing as mp
from contextlib import contextmanager
FFMPEG_OUTPUT_FLAGS = {
"-vcodec": "libx264",
"-b": "300000000",
"-crf": "18",
"-vf": "format=yuv420p",
}
def videocap(e, filename="testproc.mp4", camera_idx=0):
cap = cv2.VideoCapture(camera_idx)
videostream = skvideo.io.FFmpegWriter(filename, outputdict=FFMPEG_OUTPUT_FLAGS)
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
videostream.writeFrame(gray)
if e.is_set():
break
cap.release()
videostream.close()
@contextmanager
def ffmpeg_capture(filename="testproc.mp4", camera_idx=0):
e = mp.Event()
p = mp.Process(target=videocap, args=(e, filename, camera_idx))
try:
p.start()
yield
finally:
e.set()
p.join()
if __name__ == "__main__":
with ffmpeg_capture(filename="testproc.mp4"):
input("press enter to wrap.")
| 1,034
| 18.166667
| 83
|
py
|
autoSDC
|
autoSDC-master/asdc/localclient.py
|
import os
import sys
import json
import time
import logging
import argparse
import asyncio
import dataset
import functools
import subprocess
# import websockets
import numpy as np
import pandas as pd
from ruamel import yaml
from datetime import datetime
from aioconsole import ainput, aprint
from contextlib import contextmanager, asynccontextmanager
from typing import Any, List, Dict, Optional, Tuple, Union
import traceback
import cv2
import imageio
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import sympy
from sympy import geometry
from sympy.vector import express
from sympy.vector import CoordSys3D, BodyOrienter, Point
sys.path.append(".")
from asdc import sdc
from asdc import epics
from asdc import _slack
from asdc import slackbot
from asdc import visualization
from asdc.analysis import EchemData
from asdc.sdc.reglo import Channel
potentiostat_id = 17109013
asdc_channel = "CDW5JFZAR"
try:
BOT_TOKEN = open("slacktoken.txt", "r").read().strip()
except FileNotFoundError:
BOT_TOKEN = None
try:
CTL_TOKEN = open("slack_bot_token.txt", "r").read().strip()
except FileNotFoundError:
CTL_TOKEN = None
# reference to web client...
web_client = _slack.sc
# logger = logging.getLogger(__name__)
logger = logging.getLogger()
def save_plot(results: EchemData, figpath: str, post_slack: bool = True, title=None):
try:
results.plot()
except Exception as err:
logger.error(f"data check: {err}")
plt.savefig(figpath, bbox_inches="tight")
plt.clf()
plt.close()
if post_slack:
_slack.post_image(web_client, figpath, title=title)
def relative_flow(rates):
""" convert a dictionary of flow rates to ratios of each component """
total = sum(rates.values())
if total == 0.0:
return rates
return {key: rate / total for key, rate in rates.items()}
def to_vec(x, frame):
""" convert python iterable coordinates to vector in specified reference frame """
return x[0] * frame.i + x[1] * frame.j
def to_coords(x, frame):
""" express coordinates in specified reference frame """
return frame.origin.locate_new("P", to_vec(x, frame))
class SDC:
""" scanning droplet cell """
def __init__(
self,
config: Dict[str, Any] = None,
token: str = BOT_TOKEN,
resume: bool = False,
logfile: Optional[str] = None,
verbose: bool = False,
zmq_pub: bool = False,
):
"""scanning droplet cell client
this is a slack client that controls all of the hardware and executes experiments.
Arguments:
config: configuration dictionary
token: slack bot token
resume: toggle auto-registration of stage and sample coordinates
logfile: file to log slackbot commands to
verbose: toggle additional debugging output
"""
self.verbose = verbose
self.logfile = logfile
self.configvalues = config
with sdc.position.controller(ip="192.168.10.11") as pos:
initial_versastat_position = pos.current_position()
logger.debug(f"initial vs position: {initial_versastat_position}")
self.initial_versastat_position = initial_versastat_position
self.initial_combi_position = pd.Series(config["initial_combi_position"])
self.step_height = config.get("step_height", 0.0)
self.cleanup_pause = config.get("cleanup_pause", 0)
self.cleanup_pulse_duration = config.get("cleanup_pulse_duration", 0)
self.cell = config.get("cell", "INTERNAL")
self.speed = config.get("speed", 1e-3)
self.data_dir = config.get("data_dir", os.getcwd())
self.figure_dir = config.get("figure_dir", os.getcwd())
self.confirm = config.get("confirm", True)
self.confirm_experiment = config.get("confirm_experiment", True)
self.notify = config.get("notify_slack", True)
self.plot_slack = config.get("plot_slack", False)
self.plot_cv = config.get("plot_cv", False)
self.plot_current = config.get("plot_current", False)
self.default_experiment = config.get("default_experiment")
if self.default_experiment is not None and type(self.default_experiment) is str:
self.default_experiment = json.loads(self.default_experiment)
self.default_flowrate = config.get("default_flowrate")
# define a positive height to perform characterization
h = float(config.get("characterization_height", 0.004))
h = max(0.0, h)
self.characterization_height = h
# define a positive height to perform characterization
h = float(config.get("laser_scan_height", 0.015))
h = max(0.0, h)
self.laser_scan_height = h
self.xrays_height = self.laser_scan_height
self.camera_index = int(config.get("camera_index", 2))
# droplet workflow configuration
# TODO: document me
self.wetting_height = max(0, config.get("wetting_height", 0.0011))
self.droplet_height = max(0, config.get("droplet_height", 0.004))
self.fill_rate = config.get("fill_rate", 1.0)
self.fill_counter_ratio = config.get("fill_counter_ratio", 0.7)
self.fill_time = config.get("fill_time", 19)
self.shrink_counter_ratio = config.get("shrink_counter_ratio", 1.3)
self.shrink_time = config.get("shrink_time", 2)
self.test = config.get("test", False)
self.test_cell = config.get("test_cell", False)
self.solutions = config.get("solutions")
self.v_position = self.initial_versastat_position
self.c_position = self.initial_combi_position
self.initialize_z_position = config.get("initialize_z_position", False)
# which wafer direction is aligned with position controller +x direction?
self.frame_orientation = config.get("frame_orientation", "-y")
self.db_file = os.path.join(self.data_dir, config.get("db_file", "testb.db"))
self.db = dataset.connect(f"sqlite:///{self.db_file}")
self.location_table = self.db["location"]
self.experiment_table = self.db["experiment"]
self.current_threshold = 1e-5
self.resume = resume
# define reference frames
# load camera and laser offsets from configuration file
camera_offset = config.get("camera_offset", [38.3, -0.4])
laser_offset = config.get("laser_offset", [38, -0.3])
xray_offset = config.get("xray_offset", [44.74, -4.4035])
self.cell_frame = CoordSys3D("cell")
self.camera_frame = self.cell_frame.locate_new(
"camera",
camera_offset[0] * self.cell_frame.i + camera_offset[1] * self.cell_frame.j,
)
self.laser_frame = self.cell_frame.locate_new(
"laser",
laser_offset[0] * self.cell_frame.i + laser_offset[1] * self.cell_frame.j,
)
self.xray_frame = self.cell_frame.locate_new(
"xray",
xray_offset[0] * self.cell_frame.i + xray_offset[1] * self.cell_frame.j,
)
if self.resume:
self.stage_frame = self.sync_coordinate_systems(
orientation=self.frame_orientation,
register_initial=True,
resume=self.resume,
)
else:
self.stage_frame = self.sync_coordinate_systems(
orientation=self.frame_orientation, register_initial=False
)
reglo_port = config.get("reglo_port", "COM16")
orion_port = config.get("orion_port", "COM17")
adafruit_port = config.get("adafruit_port", "COM9")
pump_array_port = config.get("pump_array_port", "COM10")
self.backfill_duration = config.get("backfill_duration", 15)
diameter = config.get("syringe_diameter", 29.5)
try:
self.pump_array = sdc.pump.PumpArray(
self.solutions, port=pump_array_port, timeout=1, diameter=diameter
)
except:
logger.exception("could not connect to pump array")
# raise
self.pump_array = None
try:
self.reglo = sdc.reglo.Reglo(
address=reglo_port, debug=config.get("reglo_debug", False)
)
except:
logger.exception("could not connect to the Reglo peristaltic pump")
# raise
try:
self.phmeter = sdc.orion.PHMeter(orion_port, zmq_pub=zmq_pub)
except:
logger.exception("could not connect to the Orion pH meter")
# raise
try:
self.reflectometer = sdc.microcontroller.Reflectometer(port=adafruit_port)
self.light = sdc.microcontroller.Light(port=adafruit_port)
except:
logger.exception("could not connect to the adafruit board")
self.reflectometer = None
self.light = None
# keep track of OCP trace value to run relative scans
# without having to chain ametek backend calls explicitly
# better to chain experiments and split them at serialization time?
self.ocp_hold_value = None
def get_last_known_position(self, x_versa, y_versa, resume=False):
"""set up initial cell reference relative to a previous database entry if possible
If not, or if `resume` is False, set initial cell reference from config file. It is
the operator's responsibility to ensure this initial position matches the physical configuration
"""
# load last known combi position and update internal state accordingly
refs = pd.DataFrame(self.location_table.all())
if (resume == False) or (refs.size == 0):
init = self.initial_combi_position
logger.info(f"starting from {init}")
ref = pd.Series(
{
"x_versa": x_versa,
"y_versa": y_versa,
"x_combi": init.x,
"y_combi": init.y,
}
)
else:
# arbitrarily grab the first position
# TODO: verify that this record comes from the current session...
ref = refs.iloc[0].to_dict()
ref["x_versa"] *= 1e3
ref["y_versa"] *= 1e3
ref = pd.Series(ref)
logger.info(f"resuming from {ref}")
return ref
def current_versa_xy(self):
""" get current stage coords in mm """
with sdc.position.controller() as pos:
x_versa = pos.x * 1e3
y_versa = pos.y * 1e3
return x_versa, y_versa
def locate_wafer_center(self):
"""align reference frames to wafer center
identify a circumcircle corresponding three points on the wafer edge
"""
wafer_edge_coords = []
logger.info(
"identify coordinates of three points on the wafer edge. (start with the flat corners)"
)
for idx in range(3):
input("press enter to register coordinates...")
wafer_edge_coords.append(self.current_versa_xy())
# unpack triangle coordinates
tri = geometry.Triangle(*wafer_edge_coords)
# center is the versascan coordinate such that the camera frame is on the wafer origin
center = np.array(tri.circumcenter, dtype=float)
logger.debug(f"wafer edge coordinates: {wafer_edge_coords}")
logger.debug(f"center coordinate: {center}")
# move the stage to focus the camera on the center of the wafer...
current = np.array(self.current_versa_xy())
delta = center - current
# convert to meters!
delta = delta * 1e-3
logger.debug(f"moving cell to center: {delta}")
# specify updates in the stage frame...
with sdc.position.controller(speed=self.speed) as stage:
input("press enter to allow lateral cell motion...")
stage.update(delta=delta)
# set up the stage reference frame
# relative to the last recorded positions
cam = self.camera_frame
if self.frame_orientation == "-y":
_stage = cam.orient_new(
"_stage", BodyOrienter(sympy.pi / 2, sympy.pi, 0, "ZYZ")
)
else:
raise NotImplementedError
# find the origin of the combi wafer in the coincident stage frame
v = 0.0 * cam.i + 0.0 * cam.j
combi_origin = v.to_matrix(_stage)
# truncate to 2D vector
combi_origin = np.array(combi_origin).squeeze()[:-1]
# now find the origin of the stage frame
# xv_init = np.array([ref['x_versa'], ref['y_versa']])
xv_init = np.array(center)
l = xv_init - combi_origin
v_origin = l[1] * cam.i + l[0] * cam.j
# construct the shifted stage frame
stage = _stage.locate_new("stage", v_origin)
self.stage_frame = stage
def sync_coordinate_systems(
self, orientation=None, register_initial=False, resume=False
):
""" set up stage reference frames relative to the cell coordinate system """
with sdc.position.controller() as pos:
# map m -> mm
x_versa = pos.x * 1e3
y_versa = pos.y * 1e3
ref = self.get_last_known_position(x_versa, y_versa, resume=resume)
# set up the stage reference frame
# relative to the last recorded positions
cell = self.cell_frame
if orientation == "-y":
_stage = cell.orient_new(
"_stage", BodyOrienter(sympy.pi / 2, sympy.pi, 0, "ZYZ")
)
else:
raise NotImplementedError
# find the origin of the combi wafer in the coincident stage frame
v = ref["x_combi"] * cell.i + ref["y_combi"] * cell.j
combi_origin = v.to_matrix(_stage)
# truncate to 2D vector
combi_origin = np.array(combi_origin).squeeze()[:-1]
# now find the origin of the stage frame
xv_init = np.array([ref["x_versa"], ref["y_versa"]])
if resume:
offset = np.array([x_versa, y_versa]) - xv_init
logger.debug(f"wafer offset: {offset}")
# xv_init += offset
l = xv_init - combi_origin
v_origin = l[1] * cell.i + l[0] * cell.j
# construct the shifted stage frame
stage = _stage.locate_new("stage", v_origin)
return stage
def compute_position_update(self, x: float, y: float, frame: Any) -> np.ndarray:
"""compute frame update to map combi coordinate to the specified reference frame
Arguments:
x: wafer x coordinate (`mm`)
y: wafer y coordinate (`mm`)
frame: target reference frame (`cell`, `camera`, `laser`)
Returns:
stage frame update vector (in meters)
Important:
all reference frames are in `mm`; the position controller works with `meters`
"""
P = to_coords([x, y], frame)
target_coords = np.array(
P.express_coordinates(self.stage_frame), dtype=np.float
)
logger.debug(f"target coordinites: {target_coords}")
with sdc.position.controller() as pos:
# map m -> mm
current_coords = np.array((pos.x, pos.y, 0.0)) * 1e3
delta = target_coords - current_coords
# convert from mm to m
delta = delta * 1e-3
return delta
def move_stage(
self,
x: float,
y: float,
frame: Any,
stage: Any = None,
threshold: float = 0.0001,
):
"""specify target positions in combi reference frame
Arguments:
x: wafer x coordinate (`mm`)
y: wafer y coordinate (`mm`)
frame: target reference frame (`cell`, `camera`, `laser`)
stage: stage control interface
threshold: distance threshold in meters
Important:
If a `stage` interface is passed, [move_stage][asdc.client.SDC.move_stage] does not traverse the `z` axis at all!
"""
def _execute_update(stage, delta, confirm, verbose):
if confirm:
input("press enter to allow lateral cell motion...")
# move horizontally
stage.update(delta=delta)
logger.debug(f"stage position: {stage.current_position()}")
# map position update to position controller frame
delta = self.compute_position_update(x, y, frame)
if np.abs(delta).sum() > threshold:
logger.debug(f"position update: {delta} (mm)")
# if self.notify:
# slack.post_message(f'*confirm update*: (delta={delta})')
if stage is None:
with sdc.position.sync_z_step(
height=self.step_height, speed=self.speed
) as stage:
_execute_update(stage, delta, self.confirm, self.verbose)
else:
_execute_update(stage, delta, self.confirm, self.verbose)
if self.initialize_z_position:
# TODO: define the lower z baseline after the first move
input("*initialize z position*: press enter to continue...")
self.initialize_z_position = False
# update internal tracking of stage position
if stage is None:
with sdc.position.controller() as stage:
self.v_position = stage.current_position()
else:
self.v_position = stage.current_position()
return
def move(self, x: float, y: float, reference_frame="cell"):
"""slack bot command to move the stage
A thin json wrapper for [move_stage][asdc.client.SDC.move_stage].
Arguments:
- `x`: wafer x coordinate (`mm`)
- `y`: wafer y coordinate (`mm`)
- `reference_frame`: target reference frame (`cell`, `camera`, `laser`)
"""
if self.verbose:
logger.debug(f"local vars (move): {locals()}")
frame = {
"cell": self.cell_frame,
"laser": self.laser_frame,
"camera": self.camera_frame,
"xray": self.xray_frame,
}[reference_frame]
self.move_stage(x, y, frame)
# @ctl -- update the semaphore in the controller process
# await self.dm_controller(web_client, '<@UHNHM7198> update position is set.')
def _scale_flow(self, rates: Dict, nominal_rate: float = 0.5) -> Dict:
""" high nominal flow_rate for running out to steady state """
total_rate = sum(rates.values())
if total_rate <= 0.0:
total_rate = 1.0
return {key: val * nominal_rate / total_rate for key, val in rates.items()}
def debug_reglo_droplet(
self,
prep_height=0.004,
wetting_height=0.0011,
fill_rate=1.0,
fill_counter_ratio=0.75,
fill_time=None,
shrink_counter_ratio=1.1,
shrink_time=None,
flow_rate=0.5,
target_rate=0.05,
cleanup_duration=3,
cleanup_pulse_duration=0,
stage_speed=0.001,
):
"""slack bot command for prototyping droplet contact routine
#### json arguments
| Name | Type | Description | Default |
|------------------|-------|-----------------------------------------------------|---------|
| `prep_height` | float | z setting to grow the droplet | 4mm |
| `wetting_height` | float | z setting to wet the droplet to the surface | 1.1mm |
| `fill_rate` | float | pumping rate during droplet growth | 1 mL/min |
| `fill_counter_ratio` | float | counterpumping ratio during droplet growth | 0.75 |
| `fill_time` | float | droplet growth duration (s) | None |
| `shrink_counter_ratio` | float | counterpumping ratio during droplet wetting phase | 1.1 |
| `shrink_time` | float | droplet wetting duration (s) | None |
| `flow_rate` | float | total flow rate during droplet formation (mL/min) | 0.5 |
| `target_rate` | float | final flow rate after droplet formation (mL/min) | 0.05 |
| `cleanup` | float | duration of pre-droplet-formation cleanup siphoning | 0 |
| `stage_speed` | float | stage velocity during droplet formation op | 0.001 |
"""
# stage speed is specified in m/s
stage_speed = min(stage_speed, 1e-3)
stage_speed = max(stage_speed, 1e-5)
# start at zero
with sdc.position.sync_z_step(height=wetting_height, speed=stage_speed):
if cleanup_duration > 0:
# TODO: turn on the needle
# make an option to pulse loop and dump simultaneously, same rate opposite directions?
logger.debug("cleaning up...")
self.reglo.continuousFlow(-10.0, channel=Channel.NEEDLE)
self.reglo.stop([Channel.SOURCE, Channel.LOOP])
if cleanup_pulse_duration > 0:
pulse_flowrate = -1.0
# self.reglo.continuousFlow(pulse_flowrate, channel=Channel.LOOP)
self.reglo.continuousFlow(pulse_flowrate, channel=Channel.DRAIN)
time.sleep(cleanup_pulse_duration)
self.reglo.stop(channel=Channel.DRAIN)
time.sleep(cleanup_duration)
height_difference = prep_height - wetting_height
height_difference = max(0, height_difference)
with sdc.position.sync_z_step(height=height_difference, speed=stage_speed):
# counterpump slower to fill the droplet
logger.debug("filling droplet")
self.reglo.set_rates(
{
Channel.SOURCE: fill_rate,
Channel.LOOP: -fill_rate,
Channel.DRAIN: -fill_counter_ratio * fill_rate,
}
)
fill_start = time.time()
if fill_time is None:
input("*filling droplet*: press enter to continue...")
else:
time.sleep(fill_time)
fill_time = time.time() - fill_start
# drop down to wetting height
# counterpump faster to shrink the droplet
logger.debug("shrinking droplet")
shrink_flowrate = fill_rate * shrink_counter_ratio
self.reglo.continuousFlow(-shrink_flowrate, channel=Channel.DRAIN)
shrink_start = time.time()
if shrink_time is None:
input("*shrinking droplet*: press enter to continue...")
else:
time.sleep(shrink_time)
shrink_time = time.time() - shrink_start
logger.debug("equalizing differential pumping rate")
self.reglo.continuousFlow(-fill_rate, channel=Channel.DRAIN)
# drop down to contact height
# instructions['fill_time'] = fill_time
# instructions['shrink_time'] = shrink_time
time.sleep(3)
# purge...
logger.debug("purging solution")
purge_rate = 11.0
self.reglo.set_rates(
{
Channel.SOURCE: purge_rate,
Channel.LOOP: -purge_rate,
Channel.DRAIN: -purge_rate,
}
)
time.sleep(30)
# reverse the loop direction
self.reglo.continuousFlow(6.0, channel=Channel.LOOP)
time.sleep(3)
# disable source and dump
self.reglo.stop([Channel.SOURCE, Channel.DRAIN])
# step to target flow rate
self.reglo.set_rates({Channel.LOOP: target_rate, Channel.NEEDLE: -2.0})
# message = f"contact routine with {json.dumps(locals())}"
# logger.debug(message)
logger.debug(f"local vars: {locals}")
return
def cleanup_droplet(self):
""" pick up the cell head and do a rinse and clean. """
pulse_flowrate = -10.0
# start surface flushing system
self.reglo.set_rates({Channel.RINSE: 5.0, Channel.NEEDLE: -10.0})
time.sleep(1)
# lift up the cell; don't set it back down
with sdc.position.controller() as pos:
pos.update_z(delta=abs(self.wetting_height))
if self.cleanup_pause > 0:
cleanup_drain_rate = -10.0
cleanup_loop_rate = -cleanup_drain_rate / 2
logger.debug("cleaning up...")
self.reglo.set_rates(
{Channel.DRAIN: cleanup_drain_rate, Channel.LOOP: cleanup_loop_rate}
)
if self.cleanup_pulse_duration > 0:
self.reglo.continuousFlow(pulse_flowrate, channel=Channel.DRAIN)
time.sleep(self.cleanup_pulse_duration)
self.reglo.continuousFlow(cleanup_drain_rate, channel=Channel.DRAIN)
# run the RINSE channel for only half the cleanup duration
# to allow the NEEDLE time to clean everything up
time.sleep(self.cleanup_pause / 2)
self.reglo.stop(Channel.RINSE)
time.sleep(self.cleanup_pause / 2)
self.reglo.stop((Channel.LOOP, Channel.DRAIN))
return
def establish_droplet(
self,
flow_instructions: Dict = {},
x_wafer: Optional[float] = None,
y_wafer: Optional[float] = None,
logfile: str = None,
):
"""Form a new droplet with composition specified by `flow_instructions`.
if both `x_wafer` and `y_wafer` are specified, the cell will move to these sample coordinates before forming a droplet
Arguments:
flow_instructions: specification of droplet composition and loop flow rates.
Example: `{"op": "set_flow", "pH": 10, "flow_rate": 1.25, "relative_rates": {"NaCl": 1.0}, "purge_time": 90}`
x_wafer: sample x coordinate to move to before forming a droplet
y_wafer: sample y coordinate to move to before forming a droplet
"""
def check_syringe_levels(
volume_needed: Dict[str, float],
levels: Dict[str, float],
headroom: float = 5,
) -> List[str]:
""" compare the volume needed for a push with the current pump levels """
surplus = {
key: levels[key] - volume_needed[key] for key in volume_needed.keys()
}
# trigger a refill if there won't be more than 5 mL (default) headroom after the push
to_refill = [key for key, value in surplus.items() if value < headroom]
return to_refill
relative_rates = flow_instructions.get("relative_rates")
target_rate = float(flow_instructions.get("flow_rate", 1.0))
purge_time = float(flow_instructions.get("purge_time", 30))
pH_target = float(flow_instructions.get("pH"))
# some hardcoded configuration
pulse_flowrate = -10.0
purge_rate = 11.0
purge_ratio = 0.95
purge_rates = self._scale_flow(relative_rates, nominal_rate=purge_rate)
# compute required volumes in mL
volume_needed = {
key: purge_time * rate / 60 for key, rate in purge_rates.items()
}
logger.info(f"solution push target: {volume_needed}")
levels = self.pump_array.levels()
logger.info(f"current solution levels: {levels}")
to_refill = check_syringe_levels(volume_needed, levels)
while len(to_refill) > 0:
pump_ids = [
f"{name} (pump {self.pump_array.get_pump_id(name)})"
for name in to_refill
]
pump_ids = ", ".join(pump_ids)
logger.warning(f"Refill and reset syringes: {pump_ids}")
input("refill and reset pumps to proceed")
levels = self.pump_array.levels()
to_refill = check_syringe_levels(volume_needed, levels)
# droplet workflow -- start at zero
logger.debug("starting droplet workflow")
# start surface flushing system
self.reglo.set_rates({Channel.RINSE: 5.0, Channel.NEEDLE: -10.0})
time.sleep(1)
with sdc.position.sync_z_step(height=self.wetting_height, speed=self.speed):
if self.cleanup_pause > 0:
cleanup_drain_rate = -10.0
cleanup_loop_rate = -cleanup_drain_rate / 2
logger.debug("cleaning up...")
self.reglo.set_rates(
{Channel.DRAIN: cleanup_drain_rate, Channel.LOOP: cleanup_loop_rate}
)
if self.cleanup_pulse_duration > 0:
self.reglo.continuousFlow(pulse_flowrate, channel=Channel.DRAIN)
time.sleep(self.cleanup_pulse_duration)
self.reglo.continuousFlow(cleanup_drain_rate, channel=Channel.DRAIN)
# run the RINSE channel for only half the cleanup duration
# to allow the NEEDLE time to clean everything up
time.sleep(self.cleanup_pause / 2)
self.reglo.stop(Channel.RINSE)
time.sleep(self.cleanup_pause / 2)
self.reglo.stop((Channel.LOOP, Channel.DRAIN))
height_difference = self.droplet_height - self.wetting_height
height_difference = max(0, height_difference)
with sdc.position.sync_z_step(
height=height_difference, speed=self.speed
) as stage:
if x_wafer is not None and y_wafer is not None:
self.move_stage(x_wafer, y_wafer, self.cell_frame)
logger.debug("starting rinse")
self.reglo.set_rates({Channel.RINSE: 5.0})
time.sleep(2)
# counterpump slower to fill the droplet
logger.debug("filling droplet")
cell_fill_rates = self._scale_flow(
relative_rates, nominal_rate=self.fill_rate
)
self.pump_array.set_rates(cell_fill_rates, start=True, fast=True)
self.reglo.set_rates(
{
# Channel.SOURCE: self.fill_rate,
Channel.LOOP: -self.fill_rate,
Channel.DRAIN: -self.fill_counter_ratio * self.fill_rate,
}
)
time.sleep(self.fill_time / 2)
self.reglo.stop(Channel.RINSE)
time.sleep(self.fill_time / 2)
# drop down to wetting height
# counterpump faster to shrink the droplet
logger.debug("differentially pumping to shrink the droplet")
shrink_flowrate = self.fill_rate * self.shrink_counter_ratio
self.reglo.continuousFlow(-shrink_flowrate, channel=Channel.DRAIN)
time.sleep(self.shrink_time)
logger.debug("equalizing differential pumping rate")
self.reglo.continuousFlow(-self.fill_rate, channel=Channel.DRAIN)
# drop down to contact...
time.sleep(3)
# purge... (and monitor pH)
if logfile is None:
logfile = os.path.join(self.data_dir, "purge.csv")
with self.phmeter.monitor(interval=1, logfile=logfile):
logger.debug("purging solution")
self.pump_array.set_rates(purge_rates, start=True, fast=True)
self.reglo.set_rates(
{
Channel.LOOP: -purge_ratio * purge_rate,
Channel.DRAIN: -purge_ratio * purge_rate,
}
)
time.sleep(purge_time)
# reverse the loop direction
self.reglo.continuousFlow(6.0, channel=Channel.LOOP)
time.sleep(3)
logger.debug(f"stepping flow rates to {target_rate}")
self.reglo.set_rates({Channel.LOOP: target_rate, Channel.NEEDLE: -2.0})
self.pump_array.stop_all(fast=True)
self.reglo.stop(Channel.DRAIN)
current_pH_reading = self.phmeter.pH[-1]
if pH_target is not None:
pH_error = abs(current_pH_reading - pH_target)
else:
pH_error = 0
if pH_error > 0.5:
logger.warning(
f"current pH reading of {current_pH_reading} does not match target of {pH_target}"
)
else:
logger.info(
f"current pH reading is {current_pH_reading} (target is {pH_target})"
)
return
def syringe_establish_droplet(
self, x_wafer: float, y_wafer: float, flow_instructions: Dict
):
""" align the stage with a sample point, form a droplet, and flush lines if needed """
rates = flow_instructions.get("rates")
cell_fill_rates = self._scale_flow(rates, nominal_rate=0.5)
line_flush_rates = self._scale_flow(rates, nominal_rate=1.0)
# if relative flow rates don't match, purge solution
line_flush_duration = flow_instructions.get("hold_time", 0)
line_flush_needed = relative_flow(rates) != relative_flow(
self.pump_array.flow_setpoint
)
# droplet workflow -- start at zero
logger.debug("starting droplet workflow")
with sdc.position.sync_z_step(
height=self.wetting_height, speed=self.speed
) as stage:
if self.cleanup_pause > 0:
logger.debug("cleaning up...")
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(self.cleanup_pause)
self.move_stage(x_wafer, y_wafer, self.cell_frame)
height_difference = self.droplet_height - self.wetting_height
height_difference = max(0, height_difference)
with sdc.position.sync_z_step(height=height_difference, speed=self.speed):
# counterpump slower to fill the droplet
logger.debug("differentially pumping to grow the droplet")
self.pump_array.set_rates(
cell_fill_rates,
counterpump_ratio=self.fill_ratio,
start=True,
fast=True,
)
time.sleep(self.fill_time)
# drop down to wetting height
# counterpump faster to shrink the droplet
logger.debug("differentially pumping to shrink the droplet")
self.pump_array.set_rates(
cell_fill_rates,
counterpump_ratio=self.shrink_ratio,
start=True,
fast=True,
)
time.sleep(self.shrink_time)
logger.debug("equalizing differential pumping rate")
self.pump_array.set_rates(
line_flush_rates, counterpump_ratio=0.95, start=True, fast=True
)
# flush lines with cell in contact
if line_flush_needed:
logger.debug("performing line flush")
time.sleep(line_flush_duration)
time.sleep(3)
logger.debug(f"stepping flow rates to {rates}")
self.pump_array.set_rates(rates, counterpump_ratio=0.95, start=True, fast=True)
return
def quick_expt(
self,
instructions: Union[Dict, List[Dict]],
internal=False,
plot=True,
remeasure_ocp=False,
):
""" run a one-off e-chem sequence without touching the stages or pumps """
logger.info(f"running experiment {instructions}")
if type(instructions) is dict:
instructions = [instructions]
if internal:
cell = "INTERNAL"
else:
cell = "EXTERNAL"
meta = {
"instructions": json.dumps(instructions),
"cell": cell,
"x_combi": None,
"y_combi": None,
"x_versa": self.v_position[0],
"y_versa": self.v_position[1],
"z_versa": self.v_position[2],
}
with self.db as tx:
location_id = tx["location"].insert(meta)
summary = "-".join(step["op"] for step in instructions)
message = f"location *{location_id}*: {summary}"
self.send_notification(message, block=self.confirm_experiment)
# run e-chem experiments and store results in external csv file
basename = f"asdc_data_{location_id:03d}"
# results, metadata = sdc.experiment.run(instructions, cell=cell, verbose=self.verbose, remeasure_ocp=remeasure_ocp)
with sdc.potentiostat.controller(start_idx=potentiostat_id) as pstat:
for sequence_id, instruction in enumerate(instructions):
experiment = sdc.experiment.from_command(instruction)
opname = instruction["op"]
metadata = {
"op": opname,
"location_id": location_id,
"datafile": f"{basename}_{sequence_id}_{opname}.csv",
}
results, m = pstat.run(experiment)
status = results.check_quality()
metadata.update(m)
with self.db as tx:
experiment_id = tx["experiment"].insert(metadata)
results.to_csv(os.path.join(self.data_dir, metadata["datafile"]))
if plot:
figpath = os.path.join(
self.figure_dir, f"{opname}_plot_{location_id}.png"
)
save_plot(
results,
figpath,
post_slack=True,
title=f"{opname} {location_id}",
)
logger.info("finished experiment")
def send_notification(self, message, block=False):
if block:
message = f"*confirm*: {message}"
logger.info(message)
if block:
input("press enter to allow running the experiment...")
def run_experiment(self, instructions: List[Dict], plot=True):
"""run an SDC experiment
args should contain a sequence of SDC experiments -- basically the "instructions"
segment of an autoprotocol protocol
that comply with the SDC experiment schema (TODO: finalize and enforce schema)
TODO: define heuristic checks (and hard validation) as part of the experimental protocol API
# heuristic check for experimental error signals?
"""
# reset OCP reference value
self.ocp_hold_value = None
# check for an instruction group name/intent
intent = instructions[0].get("intent")
if intent is not None:
header = instructions[0]
instructions = instructions[1:]
x_combi, y_combi = header.get("x", None), header.get("y", None)
flow_instructions = instructions[0]
self.establish_droplet(flow_instructions, x_combi, y_combi)
# hack? set wafer coords to zero if they are not specified
if x_combi is None and y_combi is None:
x_combi, y_combi = 0, 0
meta = {
"intent": intent,
"instructions": json.dumps(instructions),
"x_combi": float(x_combi),
"y_combi": float(y_combi),
"x_versa": self.v_position[0],
"y_versa": self.v_position[1],
"z_versa": self.v_position[2],
"flag": False,
}
with self.db as tx:
location_id = tx["location"].insert(meta)
summary = "-".join(step["op"] for step in instructions)
message = f"location *{location_id}*: {summary}"
self.send_notification(message, block=self.confirm_experiment)
# run e-chem experiments and store results in external csv file
basename = f"asdc_data_{location_id:03d}"
pH_logfile = os.path.join(self.data_dir, f"pH_log_run{location_id:03d}.csv")
with self.phmeter.monitor(interval=5, logfile=pH_logfile):
with sdc.potentiostat.controller(start_idx=potentiostat_id) as pstat:
for sequence_id, instruction in enumerate(instructions):
logger.debug(f"running {instruction}")
opname = instruction.get("op")
if opname is None:
continue
logger.info(f"running {opname}")
experiment = sdc.experiment.from_command(instruction)
if experiment is None:
continue
# set up data-dependent experiments?
# check if voltage reference is vs hold (good name for this?)
# load values from db/disk -- alternatively previous experiment sets it?
experiment.update_relative_scan(self.ocp_hold_value)
metadata = {
"op": opname,
"location_id": location_id,
"datafile": f"{basename}_{sequence_id}_{opname}.csv",
}
results, m = pstat.run(experiment)
try:
status = results.check_quality()
except Exception as err:
logger.error(f"data check: {err}")
if experiment.name == "OpenCircuit":
# record open circuit potential after hold for
# reference by downstream experiments
self.ocp_hold_value = results["potential"].iloc[-5:].mean()
metadata.update(m)
if self.pump_array:
metadata["flow_setpoint"] = json.dumps(
self.pump_array.flow_setpoint
)
with self.db as tx:
experiment_id = tx["experiment"].insert(metadata)
results.to_csv(
os.path.join(self.data_dir, metadata["datafile"])
)
if plot:
figpath = os.path.join(
self.figure_dir,
f"{opname}_plot_{location_id}_{sequence_id}.png",
)
save_plot(
results,
figpath,
post_slack=self.plot_slack,
title=f"{opname} {location_id}",
)
logger.info(f"finished experiment {location_id}: {summary}")
def clean_droplet(self):
""" just clean up without a rinse """
pulse_flowrate = -10.0
if self.cleanup_pause > 0:
cleanup_drain_rate = -10.0
cleanup_loop_rate = -cleanup_drain_rate / 2
logger.debug("cleaning up...")
self.reglo.set_rates(
{Channel.DRAIN: cleanup_drain_rate, Channel.LOOP: cleanup_loop_rate}
)
if self.cleanup_pulse_duration > 0:
self.reglo.continuousFlow(pulse_flowrate, channel=Channel.DRAIN)
time.sleep(self.cleanup_pulse_duration)
self.reglo.continuousFlow(cleanup_drain_rate, channel=Channel.DRAIN)
# run the RINSE channel for only half the cleanup duration
# to allow the NEEDLE time to clean everything up
time.sleep(self.cleanup_pause)
self.reglo.stop((Channel.LOOP, Channel.DRAIN))
def collect_image(self, instructions):
x_combi, y_combi = instructions.get("x", None), instructions.get("y", None)
sample = self.db["location"].find_one(x_combi=x_combi, y_combi=y_combi)
# check for an instruction group name/intent
intent = instructions.get("intent")
# run cleanup
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(0.25)
with sdc.position.sync_z_step(height=self.wetting_height, speed=self.speed):
if self.cleanup_pause > 0:
self.clean_droplet()
height_difference = self.characterization_height - self.wetting_height
height_difference = max(0, height_difference)
with sdc.position.sync_z_step(height=height_difference, speed=self.speed):
self.move_stage(x_combi, y_combi, self.camera_frame)
self.capture_image_new(primary_key=sample.id)
self.move_stage(x_combi, y_combi, self.cell_frame)
def run_characterization(self, args: str):
"""perform cell cleanup and characterization
the header instruction should contain a list of primary keys
corresponding to sample points that should be characterized.
run_characterization [
{"intent": "characterize", "experiment_id": 22},
{"op": "surface-cam"}
{"op": "laser-reflectance"}
{"op": "xrays"}
]
"""
# the header block should contain the `experiment_id`
# for the spots to be characterized
instructions = json.loads(args)
header = instructions[0]
instructions = instructions[1:]
# check for an instruction group name/intent
intent = header.get("intent")
experiment_id = header.get("experiment_id")
# get all relevant samples
samples = self.db["experiment"].find(
experiment_id=experiment_id, intent="deposition"
)
if instructions[0].get("op") == "set_flow":
flow_instructions = instructions[0]
for sample in samples:
x_combi = sample.get("x_combi")
y_combi = sample.get("y_combi")
primary_key = sample.get("id")
self.establish_droplet(flow_instructions, x_combi, y_combi)
# run cleanup and optical characterization
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(0.25)
characterization_ops = set(i.get("op") for i in instructions if "op" in i)
with sdc.position.sync_z_step(height=self.wetting_height, speed=self.speed):
if self.cleanup_pause > 0:
time.sleep(self.cleanup_pause)
height_difference = self.characterization_height - self.wetting_height
height_difference = max(0, height_difference)
with sdc.position.sync_z_step(height=height_difference, speed=self.speed):
# run laser and camera scans
samples = self.db["experiment"].find(
experiment_id=experiment_id, intent="deposition"
)
for idx, sample in enumerate(samples):
x_combi = sample.get("x_combi")
y_combi = sample.get("y_combi")
primary_key = sample.get("id")
if "surface-cam" in characterization_ops:
if self.notify:
web_client.chat_postMessage(
channel="#asdc",
text=f"inspecting deposit quality",
icon_emoji=":sciencebear:",
)
self.move_stage(x_combi, y_combi, self.camera_frame)
self.capture_image(primary_key=primary_key)
image_name = f"deposit_pic_{primary_key:03d}.png"
figpath = os.path.join(self.data_dir, image_name)
try:
_slack.post_image(
web_client, figpath, title=f"deposit {primary_key}"
)
except:
pass
if self.notify:
web_client.chat_postMessage(
channel="#asdc",
text=f"acquiring laser reflectance data",
icon_emoji=":sciencebear:",
)
with sdc.position.sync_z_step(
height=self.laser_scan_height, speed=self.speed
) as stage:
# laser scan
if "laser-reflectance" in characterization_ops:
self.move_stage(
x_combi, y_combi, self.laser_frame, stage=stage
)
self.reflectance(primary_key=primary_key, stage=stage)
# xray scan
if "xrays" in characterization_ops:
self.move_stage(x_combi, y_combi, self.xray_frame)
time.sleep(1)
prefix = f"sdc-26-{primary_key:04d}"
logger.info(f"starting x-rays for {prefix}")
epics.dispatch_xrays(
prefix, os.path.join(self.data_dir, "xray")
)
self.move_stage(x_combi, y_combi, self.cell_frame)
self.pump_array.counterpump.stop()
def xrays(self, args: str):
"""perform 06BM x-ray routine
`@sdc xrays {"experiment_id": 1}
the header instruction should contain a list of primary keys
corresponding to sample points that should be characterized.
"""
# the header block should contain
instructions = json.loads(args)
experiment_id = instructions.get("experiment_id")
# get all relevant samples
samples = self.db["experiment"].find(experiment_id=experiment_id)
with sdc.position.sync_z_step(height=self.xrays_height, speed=self.speed):
for sample in samples:
logger.debug("xrd")
web_client.chat_postMessage(
channel="#asdc",
text=f"x-ray ops go here...",
icon_emoji=":sciencebear:",
)
x_combi = sample.get("x_combi")
y_combi = sample.get("y_combi")
primary_key = sample.get("id")
self.move_stage(x_combi, y_combi, self.xray_frame)
time.sleep(1)
prefix = f"sdc-26-{primary_key:04d}"
logger.debug(f"starting x-rays for {prefix}")
epics.dispatch_xrays(prefix, os.path.join(self.data_dir, "xray"))
# move back to the cell frame for the second spot
self.move_stage(x_combi, y_combi, self.cell_frame)
def droplet(self, args: str):
"""slack bot command for prototyping droplet contact routine
#### json arguments
| Name | Type | Description | Default |
|------------------|-------|-----------------------------------------------------|---------|
| `prep_height` | float | z setting to grow the droplet | 4mm |
| `wetting_height` | float | z setting to wet the droplet to the surface | 1.1mm |
| `fill_rate` | float | counterpumping ratio during droplet growth | 0.75 |
| `fill_time` | float | droplet growth duration (s) | None |
| `shrink_rate` | float | counterpumping ratio during droplet wetting phase | 1.1 |
| `shrink_time` | float | droplet wetting duration (s) | None |
| `flow_rate` | float | total flow rate during droplet formation (mL/min) | 0.5 |
| `target_rate` | float | final flow rate after droplet formation (mL/min) | 0.05 |
| `cleanup` | float | duration of pre-droplet-formation cleanup siphoning | 0 |
| `stage_speed` | float | stage velocity during droplet formation op | 0.001 |
| `solutions` | List[str] | list of solutions to pump with | None |
"""
instructions = json.loads(args)
prep_height = max(0, instructions.get("height", 0.004))
wetting_height = max(0, instructions.get("wetting_height", 0.0011))
fill_ratio = instructions.get("fill_rate", 0.75)
fill_time = instructions.get("fill_time", None)
shrink_ratio = instructions.get("shrink_rate", 1.1)
shrink_time = instructions.get("shrink_time", None)
flow_rate = instructions.get("flow_rate", 0.5)
target_rate = instructions.get("target_rate", 0.05)
cleanup_duration = instructions.get("cleanup", 0)
stage_speed = instructions.get("stage_speed", self.speed)
solutions = instructions.get("solutions")
# stage speed is specified in m/s
stage_speed = min(stage_speed, 1e-3)
stage_speed = max(stage_speed, 1e-5)
# just pump from the first syringe pump
# solution = next(iter(self.solutions))
if solutions is None:
solution = self.solutions[0]
s = next(iter(solution))
_rates = {s: flow_rate}
elif type(solutions) is list:
_rates = {s: 1.0 for s in solutions}
elif type(solutions) is dict:
_rates = solutions
rates = self._scale_flow(_rates, nominal_rate=flow_rate)
target_rates = self._scale_flow(_rates, nominal_rate=target_rate)
logger.info(f"rates: {rates}")
logger.info(f"target_rates: {target_rates}")
# start at zero
with sdc.position.z_step(height=wetting_height, speed=stage_speed):
if cleanup_duration > 0:
logger.info("cleaning up...")
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(cleanup_duration)
height_difference = prep_height - wetting_height
height_difference = max(0, height_difference)
with sdc.position.z_step(height=height_difference, speed=stage_speed):
# counterpump slower to fill the droplet
logger.info("filling droplet")
self.pump_array.set_rates(
rates, counterpump_ratio=fill_ratio, start=True, fast=True
)
fill_start = time.time()
if fill_time is None:
input("*filling droplet*: press enter to continue...")
else:
time.sleep(fill_time)
fill_time = time.time() - fill_start
# drop down to wetting height
# counterpump faster to shrink the droplet
logger.info("shrinking droplet")
self.pump_array.set_rates(rates, counterpump_ratio=shrink_ratio, fast=True)
shrink_start = time.time()
if shrink_time is None:
input("*shrinking droplet*: press enter to continue...")
else:
time.sleep(shrink_time)
shrink_time = time.time() - shrink_start
logger.info("equalizing differential pumping rate")
self.pump_array.set_rates(rates, fast=True, start=True)
# drop down to contact height
instructions["fill_time"] = fill_time
instructions["shrink_time"] = shrink_time
time.sleep(3)
logger.info(f"stepping flow rates to {rates}")
self.pump_array.set_rates(
target_rates, counterpump_ratio=0.95, fast=True, start=True
)
message = f"contact routine with {json.dumps(instructions)}"
web_client.chat_postMessage(
channel="#asdc", text=message, icon_emoji=":sciencebear:"
)
return
def flag(self, primary_key: int):
""" mark a datapoint as bad """
with self.db as tx:
tx["experiment"].update({"id": primary_key, "flag": True}, ["id"])
def coverage(self, primary_key: int, coverage_estimate: float):
""" record deposition coverage on (0.0,1.0). """
if coverage_estimate < 0.0 or coverage_estimate > 1.0:
_slack.post_message(
f":terriblywrong: *error:* coverage estimate should be in the range (0.0, 1.0)"
)
else:
with self.db as tx:
tx["experiment"].update(
{"id": primary_key, "coverage": coverage_estimate}, ["id"]
)
def refl(self, primary_key: int, reflectance_readout: float):
""" record the reflectance of the deposit (0.0,inf). """
if reflectance_readout < 0.0:
_slack.post_message(
f":terriblywrong: *error:* reflectance readout should be positive"
)
else:
with self.db as tx:
tx["experiment"].update(
{"id": primary_key, "reflectance": reflectance_readout}, ["id"]
)
def reflectance_linescan(
self, stepsize: float = 0.00015, n_steps: int = 32, stage: Any = None
) -> Tuple[List[float], List[float]]:
"""perform a laser reflectance linescan
Arguments:
stepsize: distance between linescan measurements (meters)
n_steps: number of measurements in the scan
stage: stage controller
Returns:
mean: list of reflectance values forming the linescan
var: uncertainty for reflectances in the linescan
Warning:
`reflectance_linescan` translates the sample stage.
Ensure that the z-stage is such that the cell is not in contact
with the sample to avoid dragging, which could potentially damage
the sample or the cell.
"""
mean, var = [], []
if stage is None:
with sdc.position.controller(speed=self.speed) as stage:
for step in range(n_steps):
reflectance_data = self.reflectometer.collect(timeout=2)
mean.append(reflectance_data)
# mean.append(np.mean(reflectance_data))
# var.append(np.var(reflectance_data))
stage.update_y(-stepsize)
time.sleep(0.25)
else:
for step in range(n_steps):
reflectance_data = self.reflectometer.collect(timeout=2)
mean.append(reflectance_data)
stage.update_y(-stepsize)
time.sleep(0.25)
return mean, var
def reflectance(self, primary_key=None, stage=None):
# get the stage position at the start of the linescan
with sdc.position.controller() as s:
metadata = {"reflectance_xv": s.x, "reflectance_yv": s.y}
mean, var = self.reflectance_linescan(stage=stage)
if primary_key is not None:
filename = f"deposit_reflectance_{primary_key:03d}.json"
metadata["id"] = primary_key
metadata["reflectance_file"] = filename
with self.db as tx:
tx["experiment"].update(metadata, ["id"])
with open(os.path.join(self.data_dir, filename), "w") as f:
data = {"reflectance": mean, "variance": var}
json.dump(data, f)
logger.info(f"reflectance: {mean}")
return mean
@contextmanager
def light_on(self):
""" context manager to toggle the light on and off for image acquisition """
self.light.set("on")
yield
self.light.set("off")
def capture_image(self, primary_key=None):
"""capture an image from the webcam.
pass an experiment index to serialize metadata to db
"""
with self.light_on():
camera = cv2.VideoCapture(self.camera_index)
# give the camera enough time to come online before reading data...
time.sleep(0.5)
status, frame = camera.read()
# BGR --> RGB format
frame = frame[..., ::-1].copy()
if primary_key is not None:
image_name = f"deposit_pic_{primary_key:03d}.png"
with sdc.position.controller() as stage:
metadata = {
"id": primary_key,
"image_xv": stage.x,
"image_yv": stage.y,
"image_name": image_name,
}
with self.db as tx:
tx["experiment"].update(metadata, ["id"])
else:
image_name = "test-image.png"
imageio.imsave(os.path.join(self.data_dir, image_name), frame)
camera.release()
return
def capture_image_new(self, sample):
"""capture an image from the webcam.
pass an experiment index to serialize metadata to db
"""
with self.light_on():
camera = cv2.VideoCapture(self.camera_index)
# give the camera enough time to come online before reading data...
time.sleep(0.5)
status, frame = camera.read()
# BGR --> RGB format
frame = frame[..., ::-1].copy()
if sample is not None:
image_name = f"deposit_pic_{sample.id:03d}.png"
with sdc.position.controller() as stage:
metadata = {
"op": "image",
"location_id": sample.id,
"image_xv": stage.x,
"image_yv": stage.y,
"datafile": image_name,
"image_name": image_name,
}
with self.db as tx:
tx["experiment"].insert(metadata)
else:
image_name = "test-image.png"
imageio.imsave(os.path.join(self.data_dir, image_name), frame)
camera.release()
return
def bubble(self, primary_key: int):
""" record a bubble in the deposit """
with self.db as tx:
tx["experiment"].update({"id": primary_key, "has_bubble": True}, ["id"])
def comment(self, primary_key: int, text: str):
""" add a comment """
row = self.experiment_table.find_one(id=primary_key)
if row["comment"]:
comment = row["comment"]
comment += "; "
comment += text
else:
comment = text
with self.db as tx:
tx["experiment"].update({"id": primary_key, "comment": comment}, ["id"])
def stop_pumps(self):
""" shut off the syringe and counterbalance pumps """
self.pump_array.stop_all(counterbalance="off")
def load_experiments(self, instructions_file=None):
root_dir = os.path.dirname(self.data_dir)
if instructions_file is None:
instructions_file = os.path.join(root_dir, "instructions.json")
with open(instructions_file, "r") as f:
instructions = json.load(f)
if self.resume:
location_idx = self.db["location"].count()
logger.info(f"resuming starting at sample location {location_idx}")
instructions = instructions[location_idx:]
return instructions
def batch_execute_experiments(self, instructions_file=None, capture_images=False):
instructions = self.load_experiments(instructions_file)
for instruction_chain in instructions:
logger.debug(json.dumps(instruction_chain))
self.run_experiment(instruction_chain)
if capture_images:
self.capture_image_new(instruction_chain[0])
return
def purge_cell(self):
return
def droplet_video(self):
flowrates = {"flow_rate": 1.0, "relative_rates": {"H2O": 1.0}, "purge_time": 15}
points = [[0, 15], [15, 15], [15, 0], [0, 0]]
for x, y in points:
logger.info(f"visiting {x}, {y}")
isdc.establish_droplet(flowrates, x, y)
time.sleep(10)
def sdc_client(config_file: str, resume: bool, zmq_pub: bool, verbose: bool):
""" set up scanning droplet cell client loading from CONFIG_FILE """
experiment_root, _ = os.path.split(config_file)
with open(config_file, "r") as f:
config = yaml.safe_load(f)
logging.basicConfig(level=logging.INFO)
if config.get("notify_slack", False):
sh = _slack.SlackHandler(client=web_client)
sh.setLevel(
logging.CRITICAL
) # only log CRITICAL events to slack until setup is finished
logger.addHandler(sh)
fh = logging.FileHandler(os.path.join(experiment_root, "isdc.log"))
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# specify target file relative to config file
target_file = config.get("target_file")
config["target_file"] = os.path.join(experiment_root, target_file)
data_dir = config.get("data_dir")
if data_dir is None:
config["data_dir"] = os.path.join(experiment_root, "data")
figure_dir = config.get("figure_dir")
if figure_dir is None:
config["figure_dir"] = os.path.join(experiment_root, "figures")
os.makedirs(config["data_dir"], exist_ok=True)
os.makedirs(config["figure_dir"], exist_ok=True)
# make sure step_height is positive!
if config["step_height"] is not None:
config["step_height"] = abs(config["step_height"])
logfile = config.get("command_logfile", "commands.log")
logfile = os.path.join(config["data_dir"], logfile)
logger.info("connecting to the SDC...")
sdc_interface = SDC(
verbose=verbose,
config=config,
logfile=logfile,
token=BOT_TOKEN,
resume=resume,
zmq_pub=zmq_pub,
)
if config.get("notify_slack", False):
sh.setLevel(logging.INFO)
logger.info("connected!")
return sdc_interface
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SDC client")
parser.add_argument("configfile", type=str, help="config file")
parser.add_argument(
"--no-resume", action="store_true", help="ignore starting from checkpoint"
)
parser.add_argument(
"--dashboard", action="store_true", help="set up ZMQ publisher for dashboard"
)
parser.add_argument(
"--verbose", action="store_true", help="include extra debugging output"
)
args = parser.parse_args()
logger.debug(f"{args}")
resume = not args.no_resume
logger.debug(f"resume?: {resume}")
if args.dashboard:
dashboard_log = open(
os.path.join(os.path.split(args.configfile)[0], "dashboard.log"), "wb"
)
dashboard_proc = subprocess.Popen(
["panel", "serve", "asdc/dashboard.py"],
stdout=dashboard_log,
stderr=dashboard_log,
)
isdc = sdc_client(args.configfile, resume, args.dashboard, args.verbose)
| 67,334
| 35.634929
| 126
|
py
|
autoSDC
|
autoSDC-master/asdc/calibration.py
|
import os
import re
import datetime
import numpy as np
import pandas as pd
from scipy import signal
from scipy import interpolate
def composition(solution):
m = re.match("\d+.\d+", solution)
try:
return float(m.group())
except AttributeError:
return np.nan
def experiment_start_time(header, verbose=False):
""" parse times from Jae's excel format """
# get the time
t = header.iloc[2, 1].strip()
dt = datetime.datetime.strptime(t, "%H:%M:%S %p %z")
if verbose:
print("time:", dt.time().isoformat())
# get the date
t = header.iloc[1, 1:4].values.astype(str)
t[-1] = int(float(t[-1]))
t = " ".join(map(str.strip, t))
_dt = datetime.datetime.strptime(t, "%A %B %d %Y").date()
if verbose:
print("date:", _dt.isoformat())
# combine date and time
return dt.replace(year=_dt.year, month=_dt.month, day=_dt.day)
def parse_calibration_sheet(df, name=None, verbose=False):
# pattern match our way through the header to set up extraction
h = df.iloc[0]
concentration_pattern = "\d+.\d+ M \w+"
solution_ids = h.str.match(concentration_pattern, na=False)
bg_pattern = "boric acid background"
solution_ids[h == bg_pattern] = True
s = h[np.where(solution_ids)[0]].to_dict()
s = [(v, k) for k, v in s.items()]
# hardcode replicates...
solutions = []
replicate_offset = 5
for key, idx in s:
solutions.append((key, idx))
if "M" in key:
solutions.append((key, idx + replicate_offset))
# pull all the data out of the excel sheet...
data = []
for key, idx in solutions:
offset = idx + 1
if verbose:
print(key, offset)
row = {"solution": key}
# load experiment header data
h = df.iloc[:3, offset : offset + 4]
row["start_time"] = experiment_start_time(h).isoformat()
# load results
d = df.iloc[4:, offset : offset + 3]
d = d.dropna()
d.columns = ["time", "voltage", "current"]
row.update(
{col: np.array(d.values)[:, idx] for idx, col in enumerate(d.columns)}
)
data.append(row)
data = pd.DataFrame(data)
data["calibration"] = name
return data
def load_calibration_data(
calibration_file="../data/calibration/Ni-Co-calibration-curves-2019-04-23.xlsx",
):
# Ni and Co data live in different sheets
# for each, we have three solution concentrations each with two replicates
# then there's a boric acid background run
xl = pd.ExcelFile(calibration_file)
data = []
for sheet in xl.sheet_names:
name, *rest = sheet.split(None)
print(name)
df = xl.parse(sheet, header=None)
data.append(parse_calibration_sheet(df, name=name))
data = pd.concat(data, ignore_index=True)
data["composition"] = data.solution.apply(composition)
return data
def load_old_calibration(calibration_file="../data/Nickel and Boric Acid Data.xlsx"):
# load calibration data from two Excel sheets
df = pd.read_excel(calibration_file)
solution = {"0.2 M Ni": 0, "0.1 M Ni": 4, "0.025 M Ni": 8, "Ni": 12, "Co": 16}
data = []
for key, offset in solution.items():
row = {"solution": key}
row
d = df.iloc[3:, offset : offset + 3]
d = d.dropna()
d.columns = ["time", "voltage", "current"]
row.update(
{col: np.array(d.values)[:, idx] for idx, col in enumerate(d.columns)}
)
data.append(row)
data = pd.DataFrame(data)
data["calibration"] = "Nickel"
data["calibration"][data["solution"] == "Co"] = "Cobalt"
df = pd.read_excel("../data/Cobalt Series of Depositions.xlsx")
solution = {
"0.1 M Co": 2,
"0.05 M Co": 6,
"0.025 M Co": 10,
}
data2 = []
for key, offset in solution.items():
row = {"solution": key}
d = df.iloc[:, offset : offset + 3]
d = d.dropna()
d.columns = ["voltage", "current", "abscurrent"]
row.update(
{col: np.array(d.values)[:, idx] for idx, col in enumerate(d.columns)}
)
data2.append(row)
data2 = pd.DataFrame(data2)
data2["calibration"] = "Cobalt"
data = pd.concat([data, data2], ignore_index=True)
data["solution"][data["solution"].isin(("Ni", "Co"))] = "boric acid background"
data["composition"] = data.solution.apply(composition)
return data
| 4,474
| 27.870968
| 85
|
py
|
autoSDC
|
autoSDC-master/asdc/_slack.py
|
import os
import json
import time
import slack
import logging
import requests
try:
with open("slacktoken.txt", "r") as f:
SLACK_TOKEN = f.read()
except:
SLACK_TOKEN = os.environ.get("ASDC_SLACK_TOKEN")
sc = slack.WebClient(SLACK_TOKEN)
class SlackHandler(logging.Handler):
# channel = "UC4U7SBV2" # brian
def __init__(self, client=sc, channel="#asdc"):
logging.Handler.__init__(self)
self.client = client
self.channel = channel
def emit(self, record):
self.client.chat_postMessage(
channel=self.channel, text=self.format(record), icon_emoji=":sciencebear:"
)
def post_message(message, sleep=1):
"""post text to #asdc
by default, sleep for 1s to respect slack's API limits
"""
sc.chat_postMessage(
channel="#asdc", text=message,
)
time.sleep(sleep)
def post_file_to_slack(text, file_name, file_bytes, file_type=None, title=None):
return requests.post(
"https://slack.com/api/files.upload",
{
"token": SLACK_TOKEN,
"filename": file_name,
"channels": "#asdc",
"filetype": file_type,
"initial_comment": text,
"title": title,
},
files={"file": file_bytes},
).json()
def post_image(web_client, image_path, title="an image...", sleep=1):
"""post a figure to #asdc
by default, sleep for 1s to respect slack's API limits
"""
with open(image_path, "rb") as file_content:
status = post_file_to_slack(title, f"{title}.png", file_content)
time.sleep(sleep)
| 1,610
| 23.784615
| 86
|
py
|
autoSDC
|
autoSDC-master/asdc/ocp.py
|
import os
import glob
import json
import numpy as np
import pandas as pd
import gpflow
from asdc import analyze
from asdc import visualization
def load_dataset(data_dir):
files = glob.glob(os.path.join(data_dir, "*.json"))
df = None
for idx, datafile in enumerate(files):
with open(datafile, "r") as ff:
d = json.load(ff)
_df = pd.DataFrame.from_dict(d, orient="index").T
if df is None:
df = _df
else:
df = pd.concat((df, _df))
df = df.reset_index(drop=True)
return df
def gp_select(data_dir, plot_model=True, idx=0):
df = load_dataset(data_dir)
ocp = [
analyze.extract_open_circuit_potential(row.current, row.potential, row.segment)
for idx, row in df.iterrows()
]
x = df.position_combi.apply(lambda x: x[0])
y = df.position_combi.apply(lambda x: x[1])
# fit a GP regression model
X = np.vstack((x.values, y.values)).T
ocp = np.array(ocp)
with gpflow.defer_build():
m = gpflow.models.GPR(
X,
ocp[:, None],
kern=gpflow.kernels.RBF(2, ARD=True, variance=1.0)
+ gpflow.kernels.White(2),
mean_function=gpflow.mean_functions.Constant(),
)
m.kern.kernels[0].lengthscales.prior = gpflow.priors.LogNormal(
[np.log(30), np.log(30)], [1.0, 1.0]
)
m.compile()
opt = gpflow.train.ScipyOptimizer()
opt.minimize(m)
# make grid predictions
dx = 0.1
w = 40
xx, yy = np.meshgrid(np.arange(-w, w + dx, dx), np.arange(-w, w + dx, dx),)
h, w = xx.shape
extent = (np.min(xx), np.max(xx), np.min(xx), np.max(xx))
gridpoints = np.c_[xx.ravel(), yy.ravel()]
mu_y, var_y = m.predict_y(gridpoints)
# no queries closer than 11mm to the edge of the wafer...
R_max = (76.2 / 2) - 11
sel = np.sqrt(np.square(xx) + np.square(yy)) > R_max
var_y[sel.flatten()] = 0
query_id = np.argmax(var_y)
query_position = gridpoints[query_id]
# plot the model...
figure_path = os.path.join(data_dir, "ocp_predictions_{}.png".format(idx))
visualization.plot_ocp_model(x, y, ocp, gridpoints, m, query_position, figure_path)
# convert query position to pd.Series format...
query_position = pd.Series(dict(x=query_position[0], y=query_position[1]))
return query_position
| 2,367
| 26.218391
| 87
|
py
|
autoSDC
|
autoSDC-master/asdc/client.py
|
import os
import sys
import json
import time
import click
import asyncio
import dataset
import functools
import websockets
import numpy as np
import pandas as pd
from ruamel import yaml
from datetime import datetime
from aioconsole import ainput, aprint
from contextlib import contextmanager, asynccontextmanager
from typing import Any, List, Dict, Optional, Tuple
import traceback
import cv2
import imageio
import sympy
from sympy import geometry
from sympy.vector import express
from sympy.vector import CoordSys3D, BodyOrienter, Point
sys.path.append(".")
from asdc import sdc
from asdc import epics
from asdc import _slack
from asdc import slackbot
from asdc import visualization
asdc_channel = "CDW5JFZAR"
try:
BOT_TOKEN = open("slacktoken.txt", "r").read().strip()
except FileNotFoundError:
BOT_TOKEN = None
try:
CTL_TOKEN = open("slack_bot_token.txt", "r").read().strip()
except FileNotFoundError:
CTL_TOKEN = None
def relative_flow(rates):
""" convert a dictionary of flow rates to ratios of each component """
total = sum(rates.values())
if total == 0.0:
return rates
return {key: rate / total for key, rate in rates.items()}
def to_vec(x, frame):
""" convert python iterable coordinates to vector in specified reference frame """
return x[0] * frame.i + x[1] * frame.j
def to_coords(x, frame):
""" express coordinates in specified reference frame """
return frame.origin.locate_new("P", to_vec(x, frame))
class SDC(slackbot.SlackBot):
""" scanning droplet cell """
command = slackbot.CommandRegistry()
def __init__(
self,
config: Dict[str, Any] = None,
token: str = BOT_TOKEN,
resume: bool = False,
logfile: Optional[str] = None,
verbose: bool = False,
):
"""scanning droplet cell client
this is a slack client that controls all of the hardware and executes experiments.
Arguments:
config: configuration dictionary
token: slack bot token
resume: toggle auto-registration of stage and sample coordinates
logfile: file to log slackbot commands to
verbose: toggle additional debugging output
"""
super().__init__(name="sdc", token=token)
self.command.update(super().command)
self.msg_id = 0
self.verbose = verbose
self.logfile = logfile
with sdc.position.controller(ip="192.168.10.11") as pos:
initial_versastat_position = pos.current_position()
if self.verbose:
print(f"initial vs position: {initial_versastat_position}")
self.initial_versastat_position = initial_versastat_position
self.initial_combi_position = pd.Series(config["initial_combi_position"])
self.step_height = config.get("step_height", 0.0)
self.cleanup_pause = config.get("cleanup_pause", 0)
self.cell = config.get("cell", "INTERNAL")
self.speed = config.get("speed", 1e-3)
self.data_dir = config.get("data_dir", os.getcwd())
self.figure_dir = config.get("figure_dir", os.getcwd())
self.confirm = config.get("confirm", True)
self.confirm_experiment = config.get("confirm_experiment", True)
self.notify = config.get("notify_slack", True)
self.plot_cv = config.get("plot_cv", False)
self.plot_current = config.get("plot_current", False)
# define a positive height to perform characterization
h = float(config.get("characterization_height", 0.004))
h = max(0.0, h)
self.characterization_height = h
# define a positive height to perform characterization
h = float(config.get("laser_scan_height", 0.015))
h = max(0.0, h)
self.laser_scan_height = h
self.xrays_height = self.laser_scan_height
self.camera_index = int(config.get("camera_index", 2))
# droplet workflow configuration
# TODO: document me
self.wetting_height = max(0, config.get("wetting_height", 0.0011))
self.droplet_height = max(0, config.get("droplet_height", 0.004))
self.fill_ratio = config.get("fill_rate", 0.7)
self.fill_time = config.get("fill_time", 19)
self.shrink_ratio = config.get("shrink_rate", 1.3)
self.shrink_time = config.get("shrink_time", 2)
self.test = config.get("test", False)
self.test_cell = config.get("test_cell", False)
self.solutions = config.get("solutions")
self.v_position = self.initial_versastat_position
self.c_position = self.initial_combi_position
self.initialize_z_position = config.get("initialize_z_position", False)
# which wafer direction is aligned with position controller +x direction?
self.frame_orientation = config.get("frame_orientation", "-y")
self.db_file = os.path.join(self.data_dir, config.get("db_file", "testb.db"))
self.db = dataset.connect(f"sqlite:///{self.db_file}")
self.experiment_table = self.db["experiment"]
self.current_threshold = 1e-5
self.resume = resume
# define reference frames
# load camera and laser offsets from configuration file
camera_offset = config.get("camera_offset", [38.3, -0.4])
laser_offset = config.get("laser_offset", [38, -0.3])
xray_offset = config.get("xray_offset", [44.74, -4.4035])
self.cell_frame = CoordSys3D("cell")
self.camera_frame = self.cell_frame.locate_new(
"camera",
camera_offset[0] * self.cell_frame.i + camera_offset[1] * self.cell_frame.j,
)
self.laser_frame = self.cell_frame.locate_new(
"laser",
laser_offset[0] * self.cell_frame.i + laser_offset[1] * self.cell_frame.j,
)
self.xray_frame = self.cell_frame.locate_new(
"xray",
xray_offset[0] * self.cell_frame.i + xray_offset[1] * self.cell_frame.j,
)
if self.resume:
self.stage_frame = self.sync_coordinate_systems(
orientation=self.frame_orientation,
register_initial=True,
resume=self.resume,
)
else:
self.stage_frame = self.sync_coordinate_systems(
orientation=self.frame_orientation, register_initial=False
)
adafruit_port = config.get("adafruit_port", "COM9")
pump_array_port = config.get("pump_array_port", "COM10")
self.backfill_duration = config.get("backfill_duration", 15)
try:
self.pump_array = sdc.pump.PumpArray(
self.solutions,
port=pump_array_port,
counterpump_port=adafruit_port,
timeout=1,
)
except:
print("could not connect to pump array")
self.pump_array = None
self.reflectometer = sdc.microcontroller.Reflectometer(port=adafruit_port)
self.light = sdc.microcontroller.Light(port=adafruit_port)
def get_last_known_position(self, x_versa, y_versa, resume=False):
# load last known combi position and update internal state accordingly
refs = pd.DataFrame(self.experiment_table.all())
if (resume == False) or (refs.size == 0):
init = self.initial_combi_position
print(f"starting from {init}")
ref = pd.Series(
{
"x_versa": x_versa,
"y_versa": y_versa,
"x_combi": init.x,
"y_combi": init.y,
}
)
else:
# arbitrarily grab the first position
# TODO: verify that this record comes from the current session...
ref = refs.iloc[0].to_dict()
ref["x_versa"] *= 1e3
ref["y_versa"] *= 1e3
ref = pd.Series(ref)
print(f"resuming from {ref}")
return ref
def current_versa_xy(self):
""" get current stage coords in mm """
with sdc.position.controller() as pos:
x_versa = pos.x * 1e3
y_versa = pos.y * 1e3
return x_versa, y_versa
@command
async def locate_wafer_center(self, args: str, msgdata: Dict, web_client: Any):
"""align reference frames to wafer center
identify a circumcircle corresponding three points on the wafer edge
"""
wafer_edge_coords = []
print(
"identify coordinates of three points on the wafer edge. (start with the flat corners)"
)
for idx in range(3):
await ainput("press enter to register coordinates...", loop=self.loop)
wafer_edge_coords.append(self.current_versa_xy())
# unpack triangle coordinates
tri = geometry.Triangle(*wafer_edge_coords)
# center is the versascan coordinate such that the camera frame is on the wafer origin
center = np.array(tri.circumcenter, dtype=float)
print(wafer_edge_coords)
print(center)
# move the stage to focus the camera on the center of the wafer...
current = np.array(self.current_versa_xy())
delta = center - current
# convert to meters!
delta = delta * 1e-3
print(delta)
# specify updates in the stage frame...
async with sdc.position.acontroller(loop=self.loop, speed=self.speed) as stage:
await ainput("press enter to allow lateral cell motion...", loop=self.loop)
# move horizontally
f = functools.partial(stage.update, delta=delta)
await self.loop.run_in_executor(None, f)
# set up the stage reference frame
# relative to the last recorded positions
cam = self.camera_frame
if self.frame_orientation == "-y":
_stage = cam.orient_new(
"_stage", BodyOrienter(sympy.pi / 2, sympy.pi, 0, "ZYZ")
)
else:
raise NotImplementedError
# find the origin of the combi wafer in the coincident stage frame
v = 0.0 * cam.i + 0.0 * cam.j
combi_origin = v.to_matrix(_stage)
# truncate to 2D vector
combi_origin = np.array(combi_origin).squeeze()[:-1]
# now find the origin of the stage frame
# xv_init = np.array([ref['x_versa'], ref['y_versa']])
xv_init = np.array(center)
l = xv_init - combi_origin
v_origin = l[1] * cam.i + l[0] * cam.j
# construct the shifted stage frame
stage = _stage.locate_new("stage", v_origin)
self.stage_frame = stage
def sync_coordinate_systems(
self, orientation=None, register_initial=False, resume=False
):
with sdc.position.controller() as pos:
# map m -> mm
x_versa = pos.x * 1e3
y_versa = pos.y * 1e3
ref = self.get_last_known_position(x_versa, y_versa, resume=resume)
# set up the stage reference frame
# relative to the last recorded positions
cell = self.cell_frame
if orientation == "-y":
_stage = cell.orient_new(
"_stage", BodyOrienter(sympy.pi / 2, sympy.pi, 0, "ZYZ")
)
else:
raise NotImplementedError
# find the origin of the combi wafer in the coincident stage frame
v = ref["x_combi"] * cell.i + ref["y_combi"] * cell.j
combi_origin = v.to_matrix(_stage)
# truncate to 2D vector
combi_origin = np.array(combi_origin).squeeze()[:-1]
# now find the origin of the stage frame
xv_init = np.array([ref["x_versa"], ref["y_versa"]])
if resume:
offset = np.array([x_versa, y_versa]) - xv_init
print(offset)
# xv_init += offset
l = xv_init - combi_origin
v_origin = l[1] * cell.i + l[0] * cell.j
# construct the shifted stage frame
stage = _stage.locate_new("stage", v_origin)
return stage
def compute_position_update(self, x: float, y: float, frame: Any) -> np.ndarray:
"""compute frame update to map combi coordinate to the specified reference frame
Arguments:
x: wafer x coordinate (`mm`)
y: wafer y coordinate (`mm`)
frame: target reference frame (`cell`, `camera`, `laser`)
Returns:
stage frame update vector (in meters)
Important:
all reference frames are in `mm`; the position controller works with `meters`
"""
P = to_coords([x, y], frame)
target_coords = np.array(
P.express_coordinates(self.stage_frame), dtype=np.float
)
print(target_coords)
with sdc.position.controller() as pos:
# map m -> mm
current_coords = np.array((pos.x, pos.y, 0.0)) * 1e3
delta = target_coords - current_coords
# convert from mm to m
delta = delta * 1e-3
return delta
async def move_stage(
self,
x: float,
y: float,
frame: Any,
stage: Any = None,
threshold: float = 0.0001,
):
"""specify target positions in combi reference frame
Arguments:
x: wafer x coordinate (`mm`)
y: wafer y coordinate (`mm`)
frame: target reference frame (`cell`, `camera`, `laser`)
stage: stage control interface
threshold: distance threshold in meters
Important:
If a `stage` interface is passed, [move_stage][asdc.client.SDC.move_stage] does not traverse the `z` axis at all!
"""
async def _execute_update(stage, delta, loop, confirm, verbose):
if confirm:
await ainput("press enter to allow lateral cell motion...", loop=loop)
# move horizontally
f = functools.partial(stage.update, delta=delta)
await loop.run_in_executor(None, f)
if self.verbose:
print(stage.current_position())
# map position update to position controller frame
delta = self.compute_position_update(x, y, frame)
if np.abs(delta).sum() > threshold:
if self.verbose:
print(f"position update: {delta} (mm)")
# if self.notify:
# slack.post_message(f'*confirm update*: (delta={delta})')
if stage is None:
async with sdc.position.acontroller(
loop=self.loop, z_step=self.step_height, speed=self.speed
) as stage:
await _execute_update(
stage, delta, self.loop, self.confirm, self.verbose
)
else:
await _execute_update(
stage, delta, self.loop, self.confirm, self.verbose
)
if self.initialize_z_position:
# TODO: define the lower z baseline after the first move
await ainput(
"*initialize z position*: press enter to continue...", loop=self.loop
)
self.initialize_z_position = False
# update internal tracking of stage position
if stage is None:
with sdc.position.controller() as stage:
self.v_position = stage.current_position()
else:
self.v_position = stage.current_position()
return
@command
async def move(self, args: str, msgdata: Dict, web_client: Any):
"""slack bot command to move the stage
A thin json wrapper for [move_stage][asdc.client.SDC.move_stage].
Arguments:
args: json string containing command arguments
msgdata: slack message metadata
ws: slack WebClient connection
Note:
json arguments:
- `x`: wafer x coordinate (`mm`)
- `y`: wafer y coordinate (`mm`)
- `reference_frame`: target reference frame (`cell`, `camera`, `laser`)
"""
args = json.loads(args)
if self.verbose:
print(args)
reference = args.get("reference_frame", "cell")
frame = {
"cell": self.cell_frame,
"laser": self.laser_frame,
"camera": self.camera_frame,
"xray": self.xray_frame,
}[reference]
await self.move_stage(args["x"], args["y"], frame)
# @ctl -- update the semaphore in the controller process
await self.dm_controller(web_client, "<@UHNHM7198> update position is set.")
def _scale_flow(self, rates: Dict, nominal_rate: float = 0.5) -> Dict:
""" high nominal flow_rate for running out to steady state """
total_rate = sum(rates.values())
if total_rate <= 0.0:
total_rate = 1.0
return {key: val * nominal_rate / total_rate for key, val in rates.items()}
async def establish_droplet(
self, x_wafer: float, y_wafer: float, flow_instructions: Dict
):
""" align the stage with a sample point, form a droplet, and flush lines if needed """
rates = flow_instructions.get("rates")
cell_fill_rates = self._scale_flow(rates, nominal_rate=0.5)
line_flush_rates = self._scale_flow(rates, nominal_rate=1.0)
# if relative flow rates don't match, purge solution
line_flush_duration = flow_instructions.get("hold_time", 0)
line_flush_needed = relative_flow(rates) != relative_flow(
self.pump_array.flow_setpoint
)
# droplet workflow -- start at zero
print("starting droplet workflow")
async with sdc.position.z_step(
loop=self.loop, height=self.wetting_height, speed=self.speed
) as stage:
if self.cleanup_pause > 0:
print("cleaning up...")
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(self.cleanup_pause)
await self.move_stage(x_wafer, y_wafer, self.cell_frame)
height_difference = self.droplet_height - self.wetting_height
height_difference = max(0, height_difference)
async with sdc.position.z_step(
loop=self.loop, height=height_difference, speed=self.speed
):
# counterpump slower to fill the droplet
print("differentially pumping to grow the droplet")
self.pump_array.set_rates(
cell_fill_rates,
counterpump_ratio=self.fill_ratio,
start=True,
fast=True,
)
time.sleep(self.fill_time)
# drop down to wetting height
# counterpump faster to shrink the droplet
print("differentially pumping to shrink the droplet")
self.pump_array.set_rates(
cell_fill_rates,
counterpump_ratio=self.shrink_ratio,
start=True,
fast=True,
)
time.sleep(self.shrink_time)
print("equalizing differential pumping rate")
self.pump_array.set_rates(
line_flush_rates, counterpump_ratio=0.95, start=True, fast=True
)
# flush lines with cell in contact
if line_flush_needed:
print("performing line flush")
time.sleep(line_flush_duration)
time.sleep(3)
print(f"stepping flow rates to {rates}")
self.pump_array.set_rates(rates, counterpump_ratio=0.95, start=True, fast=True)
return
@command
async def run_experiment(self, args: str, msgdata: Dict, web_client: Any):
""" run an SDC experiment """
# args should contain a sequence of SDC experiments -- basically the "instructions"
# segment of an autoprotocol protocol
# that comply with the SDC experiment schema (TODO: finalize and enforce schema)
instructions = json.loads(args)
# check for an instruction group name/intent
intent = instructions[0].get("intent")
experiment_id = instructions[0].get("experiment_id")
if intent is not None:
header = instructions[0]
instructions = instructions[1:]
x_combi, y_combi = header.get("x"), header.get("y")
await self.establish_droplet(x_combi, y_combi, instructions[0])
meta = {
"intent": intent,
"experiment_id": experiment_id,
"instructions": json.dumps(instructions),
"x_combi": float(x_combi),
"y_combi": float(y_combi),
"x_versa": self.v_position[0],
"y_versa": self.v_position[1],
"z_versa": self.v_position[2],
"flag": False,
}
# wrap the whole experiment in a transaction
# this way, if the experiment is cancelled, it's not committed to the db
with self.db as tx:
stem = "asdc"
meta["id"] = tx["experiment"].insert(meta)
datafile = "{}_data_{:03d}.csv".format(stem, meta["id"])
summary = "-".join(step["op"] for step in instructions)
_msg = f"experiment *{meta['id']}*: {summary}"
if self.confirm_experiment:
if self.notify:
web_client.chat_postMessage(
channel="#asdc",
text=f"*confirm*: {_msg}",
icon_emoji=":sciencebear:",
)
else:
print(f"*confirm*: {_msg}")
await ainput(
"press enter to allow running the experiment...", loop=self.loop
)
elif self.notify:
web_client.chat_postMessage(
channel="#asdc", text=_msg, icon_emoji=":sciencebear:"
)
f = functools.partial(
sdc.experiment.run, instructions, cell=self.cell, verbose=self.verbose
)
if self.test_cell:
web_client.chat_postMessage(
channel="#asdc",
text=f"we would run the experiment here...",
icon_emoji=":sciencebear:",
)
await self.loop.run_in_executor(None, time.sleep, 10)
else:
results, metadata = await self.loop.run_in_executor(None, f)
metadata["parameters"] = json.dumps(metadata.get("parameters"))
if self.pump_array:
metadata["flow_setpoint"] = json.dumps(
self.pump_array.flow_setpoint
)
# TODO: define heuristic checks (and hard validation) as part of the experimental protocol API
# heuristic check for experimental error signals?
if np.median(np.abs(results["current"])) < self.current_threshold:
print(
f"WARNING: median current below {self.current_threshold} threshold"
)
if self.notify:
msg = f":terriblywrong: *something went wrong:* median current below {self.current_threshold} threshold"
web_client.chat_postMessage(
channel="#asdc", text=msg, icon_emoji=":sciencebear:"
)
meta.update(metadata)
meta["datafile"] = datafile
tx["experiment"].update(meta, ["id"])
# store SDC results in external csv file
results.to_csv(os.path.join(self.data_dir, datafile))
if self.plot_current:
figpath = os.path.join(
self.figure_dir, "current_plot_{}.png".format(meta["id"])
)
visualization.plot_i(
results["elapsed_time"], results["current"], figpath=figpath
)
if self.notify:
web_client.chat_postMessage(
channel="#asdc",
text=f"finished experiment {meta['id']}: {summary}",
icon_emoji=":sciencebear:",
)
_slack.post_image(
web_client, figpath, title=f"current vs time {meta['id']}"
)
if self.plot_cv:
figpath = os.path.join(
self.figure_dir, "cv_plot_{}.png".format(meta["id"])
)
visualization.plot_cv(
results["potential"],
results["current"],
segment=results["segment"],
figpath=figpath,
)
if self.notify:
try:
_slack.post_image(
web_client, figpath, title=f"CV {meta['id']}"
)
except:
pass
# if the last op is a post_flush, flush the lines with the set rates...
final_op = instructions[-1]
if final_op.get("op") == "post_flush":
rates = final_op.get("rates")
duration = final_op.get("duration")
print(f"flushing the lines with {rates} for {duration} s")
self.pump_array.set_rates(
rates, counterpump_ratio=0.95, start=True, fast=True
)
time.sleep(duration)
# # run cleanup
# self.pump_array.stop_all(counterbalance='full', fast=True)
# time.sleep(0.25)
# async with sdc.position.z_step(loop=self.loop, height=self.wetting_height, speed=self.speed):
# if self.cleanup_pause > 0:
# time.sleep(self.cleanup_pause)
# self.pump_array.counterpump.stop()
await self.dm_controller(web_client, "<@UHNHM7198> go")
@command
async def run_characterization(self, args: str, msgdata: Dict, web_client: Any):
"""perform cell cleanup and characterization
the header instruction should contain a list of primary keys
corresponding to sample points that should be characterized.
run_characterization [
{"intent": "characterize", "experiment_id": 22},
{"op": "surface-cam"}
{"op": "laser-reflectance"}
{"op": "xrays"}
]
"""
# the header block should contain the `experiment_id`
# for the spots to be characterized
instructions = json.loads(args)
header = instructions[0]
instructions = instructions[1:]
# check for an instruction group name/intent
intent = header.get("intent")
experiment_id = header.get("experiment_id")
# get all relevant samples
samples = self.db["experiment"].find(
experiment_id=experiment_id, intent="deposition"
)
if instructions[0].get("op") == "set_flow":
flow_instructions = instructions[0]
for sample in samples:
x_combi = sample.get("x_combi")
y_combi = sample.get("y_combi")
primary_key = sample.get("id")
await self.establish_droplet(x_combi, y_combi, flow_instructions)
# run cleanup and optical characterization
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(0.25)
characterization_ops = set(i.get("op") for i in instructions if "op" in i)
async with sdc.position.z_step(
loop=self.loop, height=self.wetting_height, speed=self.speed
):
if self.cleanup_pause > 0:
time.sleep(self.cleanup_pause)
height_difference = self.characterization_height - self.wetting_height
height_difference = max(0, height_difference)
async with sdc.position.z_step(
loop=self.loop, height=height_difference, speed=self.speed
):
# run laser and camera scans
samples = self.db["experiment"].find(
experiment_id=experiment_id, intent="deposition"
)
for idx, sample in enumerate(samples):
x_combi = sample.get("x_combi")
y_combi = sample.get("y_combi")
primary_key = sample.get("id")
if "surface-cam" in characterization_ops:
if self.notify:
web_client.chat_postMessage(
channel="#asdc",
text=f"inspecting deposit quality",
icon_emoji=":sciencebear:",
)
await self.move_stage(x_combi, y_combi, self.camera_frame)
await self._capture_image(primary_key=primary_key)
image_name = f"deposit_pic_{primary_key:03d}.png"
figpath = os.path.join(self.data_dir, image_name)
try:
_slack.post_image(
web_client, figpath, title=f"deposit {primary_key}"
)
except:
pass
if self.notify:
web_client.chat_postMessage(
channel="#asdc",
text=f"acquiring laser reflectance data",
icon_emoji=":sciencebear:",
)
async with sdc.position.z_step(
loop=self.loop, height=self.laser_scan_height, speed=self.speed
) as stage:
# laser scan
if "laser-reflectance" in characterization_ops:
await self.move_stage(
x_combi, y_combi, self.laser_frame, stage=stage
)
await self._reflectance(
primary_key=primary_key, stage=stage
)
# xray scan
if "xrays" in characterization_ops:
await self.move_stage(x_combi, y_combi, self.xray_frame)
time.sleep(1)
prefix = f"sdc-26-{primary_key:04d}"
print(f"starting x-rays for {prefix}")
epics.dispatch_xrays(
prefix, os.path.join(self.data_dir, "xray")
)
await self.move_stage(x_combi, y_combi, self.cell_frame)
self.pump_array.counterpump.stop()
await self.dm_controller(web_client, "<@UHNHM7198> go")
@command
async def xrays(self, args: str, msgdata: Dict, web_client: Any):
"""perform 06BM x-ray routine
`@sdc xrays {"experiment_id": 1}
the header instruction should contain a list of primary keys
corresponding to sample points that should be characterized.
"""
# the header block should contain
instructions = json.loads(args)
experiment_id = instructions.get("experiment_id")
# get all relevant samples
samples = self.db["experiment"].find(experiment_id=experiment_id)
async with sdc.position.z_step(
loop=self.loop, height=self.xrays_height, speed=self.speed
):
for sample in samples:
print("xrd")
web_client.chat_postMessage(
channel="#asdc",
text=f"x-ray ops go here...",
icon_emoji=":sciencebear:",
)
x_combi = sample.get("x_combi")
y_combi = sample.get("y_combi")
primary_key = sample.get("id")
await self.move_stage(x_combi, y_combi, self.xray_frame)
time.sleep(1)
prefix = f"sdc-26-{primary_key:04d}"
print(f"starting x-rays for {prefix}")
epics.dispatch_xrays(prefix, os.path.join(self.data_dir, "xray"))
# move back to the cell frame for the second spot
await self.move_stage(x_combi, y_combi, self.cell_frame)
# await self.dm_controller(web_client, '<@UHNHM7198> go')
@command
async def droplet(self, args: str, msgdata: Dict, web_client: Any):
"""slack bot command for prototyping droplet contact routine
#### json arguments
| Name | Type | Description | Default |
|------------------|-------|-----------------------------------------------------|---------|
| `prep_height` | float | z setting to grow the droplet | 4mm |
| `wetting_height` | float | z setting to wet the droplet to the surface | 1.1mm |
| `fill_rate` | float | counterpumping ratio during droplet growth | 0.75 |
| `fill_time` | float | droplet growth duration (s) | None |
| `shrink_rate` | float | counterpumping ratio during droplet wetting phase | 1.1 |
| `shrink_time` | float | droplet wetting duration (s) | None |
| `flow_rate` | float | total flow rate during droplet formation (mL/min) | 0.5 |
| `target_rate` | float | final flow rate after droplet formation (mL/min) | 0.05 |
| `cleanup` | float | duration of pre-droplet-formation cleanup siphoning | 0 |
| `stage_speed` | float | stage velocity during droplet formation op | 0.001 |
| `solutions` | List[str] | list of solutions to pump with | None |
"""
instructions = json.loads(args)
prep_height = max(0, instructions.get("height", 0.004))
wetting_height = max(0, instructions.get("wetting_height", 0.0011))
fill_ratio = instructions.get("fill_rate", 0.75)
fill_time = instructions.get("fill_time", None)
shrink_ratio = instructions.get("shrink_rate", 1.1)
shrink_time = instructions.get("shrink_time", None)
flow_rate = instructions.get("flow_rate", 0.5)
target_rate = instructions.get("target_rate", 0.05)
cleanup_duration = instructions.get("cleanup", 0)
stage_speed = instructions.get("stage_speed", self.speed)
solutions = instructions.get("solutions")
# stage speed is specified in m/s
stage_speed = min(stage_speed, 1e-3)
stage_speed = max(stage_speed, 1e-5)
# just pump from the first syringe pump
# solution = next(iter(self.solutions))
if solutions is None:
solution = self.solutions[0]
s = next(iter(solution))
_rates = {s: flow_rate}
elif type(solutions) is list:
_rates = {s: 1.0 for s in solutions}
elif type(solutions) is dict:
_rates = solutions
rates = self._scale_flow(_rates, nominal_rate=flow_rate)
target_rates = self._scale_flow(_rates, nominal_rate=target_rate)
print(f"rates: {rates}")
print(f"target_rates: {target_rates}")
# start at zero
async with sdc.position.z_step(
loop=self.loop, height=wetting_height, speed=stage_speed
):
if cleanup_duration > 0:
print("cleaning up...")
self.pump_array.stop_all(counterbalance="full", fast=True)
time.sleep(cleanup_duration)
height_difference = prep_height - wetting_height
height_difference = max(0, height_difference)
async with sdc.position.z_step(
loop=self.loop, height=height_difference, speed=stage_speed
):
# counterpump slower to fill the droplet
print("filling droplet")
self.pump_array.set_rates(
rates, counterpump_ratio=fill_ratio, start=True, fast=True
)
fill_start = time.time()
if fill_time is None:
await ainput(
"*filling droplet*: press enter to continue...", loop=self.loop
)
else:
time.sleep(fill_time)
fill_time = time.time() - fill_start
# drop down to wetting height
# counterpump faster to shrink the droplet
print("shrinking droplet")
self.pump_array.set_rates(rates, counterpump_ratio=shrink_ratio, fast=True)
shrink_start = time.time()
if shrink_time is None:
await ainput(
"*shrinking droplet*: press enter to continue...", loop=self.loop
)
else:
time.sleep(shrink_time)
shrink_time = time.time() - shrink_start
print("equalizing differential pumping rate")
self.pump_array.set_rates(rates, fast=True, start=True)
# drop down to contact height
instructions["fill_time"] = fill_time
instructions["shrink_time"] = shrink_time
time.sleep(3)
print(f"stepping flow rates to {rates}")
self.pump_array.set_rates(
target_rates, counterpump_ratio=0.95, fast=True, start=True
)
message = f"contact routine with {json.dumps(instructions)}"
web_client.chat_postMessage(
channel="#asdc", text=message, icon_emoji=":sciencebear:"
)
return
@command
async def checkpoint(self, args: str, msgdata: Dict, web_client: Any):
""" hold until user input is given to allow experiment to proceed """
if self.notify:
_slack.post_message("*checkpoint reached*")
await ainput("*checkpoint*: press enter to continue...", loop=self.loop)
return await self.dm_controller(web_client, "<@UHNHM7198> go")
@command
async def flag(self, args: str, msgdata: Dict, web_client: Any):
"""mark a datapoint as bad
TODO: format checking
"""
primary_key = int(args)
with self.db as tx:
tx["experiment"].update({"id": primary_key, "flag": True}, ["id"])
@command
async def coverage(self, args: str, msgdata: Dict, web_client: Any):
""" record deposition coverage on (0.0,1.0). """
primary_key, text = args.split(" ", 1) # need to do format checking...
primary_key = int(primary_key)
coverage_estimate = float(text)
if coverage_estimate < 0.0 or coverage_estimate > 1.0:
_slack.post_message(
f":terriblywrong: *error:* coverage estimate should be in the range (0.0, 1.0)"
)
else:
with self.db as tx:
tx["experiment"].update(
{"id": primary_key, "coverage": coverage_estimate}, ["id"]
)
@command
async def refl(self, args: str, msgdata: Dict, web_client: Any):
""" record the reflectance of the deposit (0.0,inf). """
primary_key, text = args.split(" ", 1) # need to do format checking...
primary_key = int(primary_key)
reflectance_readout = float(text)
if reflectance_readout < 0.0:
_slack.post_message(
f":terriblywrong: *error:* reflectance readout should be positive"
)
else:
with self.db as tx:
tx["experiment"].update(
{"id": primary_key, "reflectance": reflectance_readout}, ["id"]
)
async def reflectance_linescan(
self, stepsize: float = 0.00015, n_steps: int = 32, stage: Any = None
) -> Tuple[List[float], List[float]]:
"""perform a laser reflectance linescan
Arguments:
stepsize: distance between linescan measurements (meters)
n_steps: number of measurements in the scan
stage: stage controller
Returns:
mean: list of reflectance values forming the linescan
var: uncertainty for reflectances in the linescan
Warning:
`reflectance_linescan` translates the sample stage.
Ensure that the z-stage is such that the cell is not in contact
with the sample to avoid dragging, which could potentially damage
the sample or the cell.
"""
mean, var = [], []
if stage is None:
async with sdc.position.acontroller(
loop=self.loop, speed=self.speed
) as stage:
for step in range(n_steps):
reflectance_data = self.reflectometer.collect(timeout=2)
mean.append(reflectance_data)
# mean.append(np.mean(reflectance_data))
# var.append(np.var(reflectance_data))
stage.update_y(-stepsize)
time.sleep(0.25)
else:
for step in range(n_steps):
reflectance_data = self.reflectometer.collect(timeout=2)
mean.append(reflectance_data)
stage.update_y(-stepsize)
time.sleep(0.25)
return mean, var
async def _reflectance(self, primary_key=None, stage=None):
# get the stage position at the start of the linescan
with sdc.position.controller() as s:
metadata = {"reflectance_xv": s.x, "reflectance_yv": s.y}
mean, var = await self.reflectance_linescan(stage=stage)
if primary_key is not None:
filename = f"deposit_reflectance_{primary_key:03d}.json"
metadata["id"] = primary_key
metadata["reflectance_file"] = filename
with self.db as tx:
tx["experiment"].update(metadata, ["id"])
with open(os.path.join(self.data_dir, filename), "w") as f:
data = {"reflectance": mean, "variance": var}
json.dump(data, f)
return mean
@command
async def reflectance(self, args: str, msgdata: Dict, web_client: Any):
""" record the reflectance of the deposit (0.0,inf). """
if args is None:
primary_key = None
elif len(args) > 0:
primary_key = int(args)
mean_reflectance = await self._reflectance(primary_key=primary_key)
print("reflectance:", mean_reflectance)
@contextmanager
def light_on(self):
""" context manager to toggle the light on and off for image acquisition """
self.light.set("on")
yield
self.light.set("off")
async def _capture_image(self, primary_key=None):
"""capture an image from the webcam.
pass an experiment index to serialize metadata to db
"""
with self.light_on():
camera = cv2.VideoCapture(self.camera_index)
# give the camera enough time to come online before reading data...
time.sleep(0.5)
status, frame = camera.read()
# BGR --> RGB format
frame = frame[..., ::-1].copy()
if primary_key is not None:
image_name = f"deposit_pic_{primary_key:03d}.png"
with sdc.position.controller() as stage:
metadata = {
"id": primary_key,
"image_xv": stage.x,
"image_yv": stage.y,
"image_name": image_name,
}
with self.db as tx:
tx["experiment"].update(metadata, ["id"])
else:
image_name = "test-image.png"
imageio.imsave(os.path.join(self.data_dir, image_name), frame)
camera.release()
return
@command
async def imagecap(self, args: str, msgdata: Dict, web_client: Any):
"""capture an image from the webcam.
pass an experiment index to serialize metadata to db
"""
if args is not None and len(args) > 0:
primary_key = int(args)
else:
primary_key = None
await self._capture_image(primary_key=primary_key)
@command
async def bubble(self, args: str, msgdata: Dict, web_client: Any):
"""slack bot command to record a bubble in the deposit
trigger with `@sdc bubble ${primary_key: int}`.
this updates the corresponding record in the sqlite database
with `has_bubble=True`.
"""
primary_key = args # need to do format checking...
primary_key = int(primary_key)
with self.db as tx:
tx["experiment"].update({"id": primary_key, "has_bubble": True}, ["id"])
@command
async def comment(self, args: str, msgdata: Dict, web_client: Any):
""" add a comment """
primary_key, text = args.split(" ", 1) # need to do format checking...
primary_key = int(primary_key)
row = self.experiment_table.find_one(id=primary_key)
if row["comment"]:
comment = row["comment"]
comment += "; "
comment += text
else:
comment = text
with self.db as tx:
tx["experiment"].update({"id": primary_key, "comment": comment}, ["id"])
async def dm_controller(self, web_client, text, channel="#asdc"):
# , channel='DHNHM74TU'):
web_client.chat_postMessage(
channel=channel, text=text, icon_emoji=":sciencebear:"
)
@command
async def dm(self, args: str, msgdata: Dict, web_client: Any):
""" echo random string to DM channel """
dm_channel = "DHNHM74TU"
print("got a dm command: ", args)
web_client.chat_postMessage(
channel=channel, text=args, icon_emoji=":sciencebear:"
)
@command
async def stop_pumps(self, args: str, msgdata: Dict, web_client: Any):
""" shut off the syringe and counterbalance pumps """
self.pump_array.stop_all(counterbalance="off")
@command
async def _abort_running_handlers(self, args: str, msgdata: Dict, web_client: Any):
"""cancel all currently running task handlers...
Warning:
does not do any checks on the potentiostat -- don't call this while an experiment is running...
we could register the coroutine address when we start it up, and broadcast that so it's cancellable...?
"""
channel = "<@UC537488J>"
text = f"sdc: {msgdata['user']} said abort_running_handlers"
print(text)
# dm UC537488J (brian)
web_client.chat_postMessage(
channel=channel, text=args, icon_emoji=":sciencebear:"
)
return
current_task = asyncio.current_task()
for task in asyncio.all_tasks():
if task._coro == current_task._coro:
continue
if task._coro.__name__ == "handle":
print(f"killing task {task._coro}")
task.cancel()
# ask the controller to cancel the caller task too...!
await self.dm_controller(web_client, "<@UHNHM7198> abort_running_handlers")
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--resume/--no-resume", default=False)
@click.option("--verbose/--no-verbose", default=False)
def sdc_client(config_file, resume, verbose):
with open(config_file, "r") as f:
config = yaml.safe_load(f)
experiment_root, _ = os.path.split(config_file)
# specify target file relative to config file
target_file = config.get("target_file")
config["target_file"] = os.path.join(experiment_root, target_file)
data_dir = config.get("data_dir")
if data_dir is None:
config["data_dir"] = os.path.join(experiment_root, "data")
figure_dir = config.get("figure_dir")
if figure_dir is None:
config["figure_dir"] = os.path.join(experiment_root, "figures")
os.makedirs(config["data_dir"], exist_ok=True)
os.makedirs(config["figure_dir"], exist_ok=True)
# make sure step_height is positive!
if config["step_height"] is not None:
config["step_height"] = abs(config["step_height"])
logfile = config.get("command_logfile", "commands.log")
logfile = os.path.join(config["data_dir"], logfile)
sdc_bot = SDC(
verbose=verbose, config=config, logfile=logfile, token=BOT_TOKEN, resume=resume
)
asyncio.run(sdc_bot.main())
if __name__ == "__main__":
sdc_client()
| 49,099
| 35.20944
| 129
|
py
|
autoSDC
|
autoSDC-master/asdc/controller.py
|
import os
import sys
import json
import time
import click
import asyncio
import dataset
import functools
import numpy as np
import pandas as pd
from ruamel import yaml
from aioconsole import ainput
sys.path.append("../scirc")
sys.path.append(".")
import scirc
from asdc import slack
BOT_TOKEN = open("slack_bot_token.txt", "r").read().strip()
SDC_TOKEN = open("slacktoken.txt", "r").read().strip()
def load_experiment_files(csv_files, dir="."):
dir, _ = os.path.split(dir)
experiments = pd.concat(
(
pd.read_csv(os.path.join(dir, csv_file), index_col=0)
for csv_file in csv_files
),
ignore_index=True,
)
return experiments
def load_experiment_json(experiment_files, dir="."):
""" an experiment file contains a json list of experiment definitions """
dir, _ = os.path.split(dir)
experiments = None
for experiment_file in experiment_files:
with open(os.path.join(dir, experiment_file), "r") as f:
if experiments is None:
experiments = json.load(f)
else:
experiments.append(json.load(f))
return experiments
class Controller(scirc.SlackClient):
""" autonomous scanning droplet cell client """
command = scirc.CommandRegistry()
def __init__(self, config=None, verbose=False, logfile=None, token=BOT_TOKEN):
super().__init__(verbose=verbose, logfile=logfile, token=token)
self.command.update(super().command)
self.msg_id = 0
self.update_event = asyncio.Event(loop=self.loop)
self.confirm = config.get("confirm", True)
self.notify = config.get("notify_slack", True)
self.data_dir = config.get("data_dir", os.getcwd())
self.figure_dir = config.get("figure_dir", os.getcwd())
self.db_file = os.path.join(self.data_dir, config.get("db_file", "test.db"))
self.db = dataset.connect(f"sqlite:///{self.db_file}")
self.experiment_table = self.db["experiment"]
self.targets = pd.read_csv(
config["target_file"], index_col=0, dtype={"x": np.float, "y": np.float}
)
self.experiments = load_experiment_json(
config["experiment_file"], dir=self.data_dir
)
async def post(self, msg, ws, channel):
# TODO: move this to the base Client class...
response = {
"id": self.msg_id,
"type": "message",
"channel": channel,
"text": msg,
}
self.msg_id += 1
await ws.send_str(json.dumps(response))
async def dm_sdc(self, text, channel="DHY5REQ0H"):
response = await self.slack_api_call(
"chat.postMessage",
data={
"channel": channel,
"text": text,
"as_user": False,
"username": "ctl",
},
token=SDC_TOKEN,
)
def load_experiment_indices(self):
# indices start at 0...
# sqlite integer primary keys start at 1...
df = pd.DataFrame(self.experiment_table.all())
target_idx = self.experiment_table.count()
experiment_idx = self.experiment_table.count(flag=False)
return df, target_idx, experiment_idx
def get_next_experiment(self, experiment_idx):
if len(self.experiments) == 1:
experiment = self.experiments[0]
else:
experiment = self.experiments[experiment_idx]
return experiment
@command
async def go(self, ws, msgdata, args):
"""keep track of target positions and experiment list
target and experiment indices start at 0
sqlite integer primary keys start at 1...
"""
# need to be more subtle here: filter experiment conditions on 'ok' or 'flag'
# but also: filter everything on wafer_id, and maybe session_id?
# also: how to allow cancelling tasks and adding combi spots to a queue to redo?
target_idx = self.db["experiment"].count()
target = self.targets.iloc[target_idx]
print(target)
experiment_idx = self.db["experiment"].count(flag=False)
experiment = self.get_next_experiment(experiment_idx)
print(experiment)
# send the move command -- message @sdc
self.update_event.clear()
args = {"x": target.x, "y": target.y}
await self.dm_sdc(f"<@UHT11TM6F> move {json.dumps(args)}")
# wait for the ok
# @sdc will message us with @ctl update position ...
await self.update_event.wait()
# the move was successful and we've had our chance to check the previous spot
# reload the experiment in case flags have changed
experiment_idx = self.db["experiment"].count(flag=False)
experiment = self.get_next_experiment(experiment_idx)
print(experiment)
# send the experiment command
await self.dm_sdc(f"<@UHT11TM6F> run_experiment {json.dumps(experiment)}")
return
@command
async def update(self, ws, msgdata, args):
update_type, rest = args.split(" ", 1)
print(update_type)
self.update_event.set()
return
@command
async def dm(self, ws, msgdata, args):
""" echo random string to DM channel """
dm_channel = "DHY5REQ0H"
# dm_channel = 'DHNHM74TU'
response = await self.slack_api_call(
"chat.postMessage",
token=SDC_TOKEN,
data={
"channel": dm_channel,
"text": args,
"as_user": False,
"username": "ctl",
},
)
@command
async def abort_running_handlers(self, ws, msgdata, args):
"""cancel all currently running task handlers...
WARNING: does not do any checks on the potentiostat -- don't call this while an experiment is running...
we could register the coroutine address when we start it up, and broadcast that so it's cancellable...?
"""
current_task = asyncio.current_task()
for task in asyncio.all_tasks():
if task._coro == current_task._coro:
continue
if task._coro.__name__ == "handle":
print(f"killing task {task._coro}")
task.cancel()
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--verbose/--no-verbose", default=False)
def sdc_controller(config_file, verbose):
with open(config_file, "r") as f:
config = yaml.safe_load(f)
experiment_root, _ = os.path.split(config_file)
# specify target file relative to config file
target_file = config.get("target_file")
config["target_file"] = os.path.join(experiment_root, target_file)
data_dir = config.get("data_dir")
if data_dir is None:
config["data_dir"] = os.path.join(experiment_root, "data")
figure_dir = config.get("figure_dir")
if figure_dir is None:
config["figure_dir"] = os.path.join(experiment_root, "figures")
os.makedirs(config["data_dir"], exist_ok=True)
os.makedirs(config["figure_dir"], exist_ok=True)
if config["step_height"] is not None:
config["step_height"] = abs(config["step_height"])
# logfile = config.get('command_logfile', 'commands.log')
logfile = "controller.log"
logfile = os.path.join(config["data_dir"], logfile)
ctl = Controller(verbose=verbose, config=config, logfile=logfile)
ctl.run()
if __name__ == "__main__":
sdc_controller()
| 7,551
| 30.206612
| 112
|
py
|
autoSDC
|
autoSDC-master/asdc/characterization.py
|
import json
import dataset
import imageio
import pathlib
import numpy as np
import pandas as pd
from scipy import integrate
from sklearn import linear_model
def load_rates(expt):
instructions = json.loads(expt["instructions"])
return instructions[0].get("rates")
def deposition_potential(expt):
instructions = json.loads(expt["instructions"])
return instructions[1].get("potential")
def load_image(expt, data_dir="data"):
data_dir = pathlib.Path(data_dir)
return imageio.imread(data_dir / expt["image_name"])
def load_deposition_data(expt, data_dir="data"):
data_dir = pathlib.Path(data_dir)
return pd.read_csv(data_dir / expt["datafile"], index_col=0)
def load_corrosion_data(experiment_table, expt, data_dir="data"):
data_dir = pathlib.Path(data_dir)
first_rep = experiment_table.find_one(experiment_id=expt["experiment_id"])
if expt["id"] != first_rep["id"]:
return None
corr = experiment_table.find_one(
experiment_id=expt["experiment_id"], intent="corrosion"
)
if corr is not None:
return pd.read_csv(data_dir / corr["datafile"], index_col=0)
def polarization_resistance(expt, experiment_table, data_dir="data"):
""" compute polarization resistance (Ohms) """
data_dir = pathlib.Path(data_dir)
corr = load_corrosion_data(experiment_table, expt, data_dir=data_dir)
if corr is None:
return np.nan
pr = corr[corr["segment"] == 0]
log_I = np.log10(np.abs(pr["current"]))
idx = log_I.idxmin()
n_skip = int(pr.shape[0] * 0.3)
slc = slice(idx - n_skip, idx + n_skip)
lm = linear_model.HuberRegressor()
try:
lm.fit(pr["potential"][slc, None], pr["current"][slc])
return 1 / lm.coef_[0]
except ValueError:
return np.nan
def load_laser_data(expt, data_dir="data"):
data_dir = pathlib.Path(data_dir)
if expt["reflectance_file"] is None:
# print(expt['id'])
return [np.nan]
with open(data_dir / expt["reflectance_file"], "r") as f:
data = json.load(f)
return data["reflectance"]
def load_xrf_file(datafile, header_rows=32):
# load the column names
with open(datafile, "r") as f:
lines = f.readlines()
header = lines[header_rows]
names = header.strip()[2:].split()
return pd.read_csv(
datafile, skiprows=header_rows + 1, delim_whitespace=True, names=names
)
def load_xrf_data(expt, data_dir="data", scan="middle"):
id = expt["id"]
if scan not in ("down", "middle", "up", "slits"):
raise ValueError("scans should be one of {down, middle, up}")
data_dir = pathlib.Path(data_dir)
if scan == "slits":
# switch to sdc-25-{id:04d}_slitscan.dat
datafile = data_dir / "xray" / f"sdc-26-{id:04d}_slitscan.dat"
else:
datafile = data_dir / "xray" / f"sdc-26-{id:04d}_linescan_{scan}.dat"
# print(datafile)
try:
return load_xrf_file(datafile)
except FileNotFoundError:
return None
def load_reference_xrf(reference_datafile="rawgold.dat", data_dir="data"):
data_dir = pathlib.Path(data_dir)
reference = load_xrf_file(data_dir / "xray" / reference_datafile, header_rows=17)
reference = reference / reference["I0"][:, None]
Au_counts = np.median(reference["DTC3_1"])
Ni_bg = Au_counts / np.median(reference["DTC1"])
Zn_bg = Au_counts / np.median(reference["DTC2_1"])
return {"Ni": Ni_bg, "Zn": Zn_bg}
def xrf_Ni_ratio(expt, midpoint=False, data_dir="data", scan="middle"):
"""
Dead-time-corrected counts:
Ni: DTC1 Zn: DTC2_1 Au: DTC3_1
Au vs elastic: DTC3_2
Ni: ROI1
Zn: ROI2_1
Au: ROI3_1
"""
data_dir = pathlib.Path(data_dir)
# print(data_dir)
xrf = load_xrf_data(expt, data_dir=data_dir, scan=scan)
# print(xrf)
if xrf is None:
return [np.nan]
xrf = xrf / xrf["I0"][:, None]
bg = load_reference_xrf(data_dir=data_dir)
Ni_counts = xrf["DTC1"] - bg["Ni"]
Zn_counts = xrf["DTC2_1"] - bg["Zn"]
NiZn_counts = Ni_counts + Zn_counts
Ni = Ni_counts / NiZn_counts
if midpoint:
midpoint_idx = Ni.size // 2
return Ni[30]
return Ni
def integral_corrosion_current(expt, experiment_table, start_time=5, data_dir="data"):
data_dir = pathlib.Path(data_dir)
corr = load_corrosion_data(experiment_table, expt, data_dir=data_dir)
if corr is None:
return np.nan
if len(corr["segment"].unique()) > 1:
corr = corr[corr["segment"] == 1]
s = corr["elapsed_time"] > start_time
integral_current = integrate.trapz(corr["current"][s], corr["elapsed_time"][s])
return integral_current
def load_results(expt, experiment_table, data_dir, scan="middle"):
rates = load_rates(expt)
res = {
"refl": np.mean(load_laser_data(expt, data_dir=data_dir)),
# 'Ni_ratio': xrf_Ni_ratio(expt, midpoint=True),
"potential": deposition_potential(expt),
"Ni_ratio": np.median(xrf_Ni_ratio(expt, data_dir=data_dir, scan=scan)[7:14]),
"Ni_variance": np.var(xrf_Ni_ratio(expt, data_dir=data_dir, scan=scan)[3:18]),
"integral_current": integral_corrosion_current(
expt, experiment_table, data_dir=data_dir
),
"polarization_resistance": polarization_resistance(
expt, experiment_table, data_dir=data_dir
),
}
res.update(rates)
res["id"] = expt["experiment_id"]
return res
def load_characterization_results(dbfile, scan="slits"):
print(scan)
dbfile = pathlib.Path(dbfile)
data_dir = dbfile.parent
db = dataset.connect(f"sqlite:///{str(dbfile)}")
experiment_table = db["experiment"]
df = pd.DataFrame(
[
load_results(e, experiment_table, data_dir, scan=scan)
for e in experiment_table.find(intent="deposition")
]
)
return df
| 5,908
| 25.859091
| 86
|
py
|
autoSDC
|
autoSDC-master/asdc/__init__.py
| 0
| 0
| 0
|
py
|
|
autoSDC
|
autoSDC-master/asdc/analyze.py
|
import numpy as np
import pandas as pd
from scipy import stats
from scipy import signal
from sklearn import metrics
from sklearn import linear_model
from skimage import filters
import lmfit
from lmfit import models
def laplace(x, loc, scale, amplitude):
""" laplace peak shape for fitting open circuit potential on polarization curve """
return amplitude * stats.laplace.pdf(x, loc=loc, scale=scale)
def model_autorange_artifacts(
V, I, threshold=0.5, tau_increasing=10, tau_decreasing=3.8
):
"""autorange artifacts occur when the potentiostat switches current ranges
The effect is an apparent spike in measured current by about an order of magnitude...
This function attempts to detect and model these artifacts as step functions with exponential decay
"""
artifact_model = np.zeros_like(I)
# detect artifacts by thresholding the numerical derivative of
# the log absolute current...
dI = np.diff(np.log10(np.abs(I)))
(autorange_idx,) = np.where(np.abs(dI) > threshold)
# get the signed step height on the log current
steps = dI[autorange_idx]
# model each artifact as an exponentially decaying step function...
for idx, step_magnitude in zip(autorange_idx, steps):
# different relaxation times depending on current direction (and voltage ramp direction...)
if step_magnitude > 0:
tau = tau_increasing
elif step_magnitude < 0:
tau = tau_decreasing
# offset the index of the step by 1 (due numpy.diff using a right-handed difference)
pulse = signal.exponential(
artifact_model.size, center=idx + 1, tau=tau, sym=False
)
# signal.exponential generates a symmetric window... zero out the left half
pulse[: idx + 1] = 0
artifact_model += step_magnitude * pulse
return artifact_model
def guess_open_circuit_potential(V, log_I):
open_circuit = V[np.argmin(log_I)]
return open_circuit
def model_open_circuit_potential(V, log_I, bg_order=3):
""" extract open circuit potential by modeling log(I)-V curve an laplace peak with a polynomial background """
y = -log_I
peak = lmfit.Model(laplace, prefix="peak_")
bg = models.PolynomialModel(bg_order, prefix="bg_")
loc = np.argmax(y)
pars = bg.guess(y, x=V)
pars += peak.make_params(peak_loc=V[loc], peak_scale=0.01, peak_amplitude=0.1)
model = peak + bg
fitted_model = model.fit(y, x=V, params=pars, nan_policy="omit")
return fitted_model
def to_odd(x):
if x % 2:
return x
return x - 1
def piecewise_savgol(x, y, x_split=0, window_length=121, polyorder=5):
"""piecewise smoothing with a savgol filter
smooth y piecewise, splitting on x
"""
sel = x <= x_split
if sel.sum() < window_length:
wl = to_odd(sel.sum() - 1)
else:
wl = window_length
y[sel] = signal.savgol_filter(y[sel], wl, polyorder)
sel = x > x_split
y[sel] = signal.savgol_filter(y[sel], window_length, polyorder)
return y
def model_polarization_curve(
V,
log_I,
bg_order=3,
smooth=True,
smooth_window=121,
shoulder_percentile=0.96,
lm_method="huber",
):
"""extract features from polarization curve.
open circuit potential by modeling log(I)-V curve an laplace peak with a polynomial background
extract passivation region by fitting a robust regression model (with the laplace peak as a hint for where to start)
"""
peak = model_open_circuit_potential(V, log_I, bg_order=bg_order)
V_oc = peak.best_values["peak_loc"]
# apply piecewise smoothing
# i.e. don't oversmooth the open circuit peak
if smooth:
log_I = piecewise_savgol(V, log_I, x_split=V_oc, window_length=smooth_window)
peak_shoulder_idx = np.argmax(
stats.laplace.cdf(V, loc=V_oc, scale=peak.best_values["peak_scale"])
> shoulder_percentile
)
# fit robust regression model to passivation region
# shift the peak shoulder to the origin and constrain
# the intercept to pass through that point...
vp = V[peak_shoulder_idx:]
ip = log_I[peak_shoulder_idx:]
_vp = vp - vp[0]
_ip = ip - ip[0]
if lm_method == "thiel-sen":
lm = linear_model.TheilSenRegressor(fit_intercept=False)
elif lm_method == "huber":
lm = linear_model.HuberRegressor(fit_intercept=False)
elif lm_method == "ransac":
lm = linear_model.RANSACRegressor(fit_intercept=False)
score = []
n_fit = np.arange(100, 800, 5)
for n in n_fit:
lm.fit(_vp[:n, None], _ip[:n])
score.append(metrics.mean_squared_error(_ip, lm.predict(_vp[:, None])))
# refit with the best model...
n = n_fit[np.argmax(score)]
lm.fit(_vp[:n, None], _ip[:n])
deviation = _ip - lm.predict(_vp[:, None])
thresh = filters.threshold_triangle(deviation)
id_thresh = np.argmax(deviation > thresh)
V_transpassive = vp[id_thresh]
I_passive = np.median(ip[:id_thresh])
polarization_data = {
"V_oc": V_oc,
"V_tp": V_transpassive,
"I_p": I_passive,
}
fit_data = {"peak": peak, "lm": lm, "vref": vp[0], "iref": ip[0]}
return log_I, polarization_data, fit_data
def extract_open_circuit_potential(current, potential, segment, return_model=False):
# use the first CV cycle...
sel = np.array(segment) == 2
I = np.array(current)[sel]
V = np.array(potential)[sel]
# hack: use just the increasing ramp...
# this works for 75 mV/s scan from -1V to 1.2V...
I = I[:1000]
V = V[:1000]
# now correct for autorange artifacts
a = model_autorange_artifacts(V, I, tau_increasing=10)
model = model_open_circuit_potential(V, np.log10(np.abs(I)) - a)
if return_model:
return model
return model.best_values["peak_loc"]
def voltage_turning_points(V):
dV = np.diff(signal.savgol_filter(V, 11, 4))
# find zero-crossings on the derivative...
(turning_points,) = np.where(np.diff(np.sign(dV)))
return turning_points
def segment_IV(I, V, segment=1):
""" by default give the middle segment """
t = voltage_turning_points(V)
if segment is not None:
zeros = voltage_turning_points(V)
if segment == 0:
I = I[: t[0]]
V = V[: t[0]]
elif segment == 1:
I = I[t[0] : t[1]]
V = V[t[0] : t[1]]
elif segment == 2:
I = I[t[1] :]
V = V[t[1] :]
return I, V
def split_data(data, segment=0, split=0):
""" data should be a versastat result dictionary """
V_applied = np.array(data["applied_potential"])
s = np.array(data["segment"])
# assume there is only one vertex per segment...
# look for the change point on the sign of the derivative of
# the applied potential. Should be about halfway for CV curves
sgn = np.sign(np.diff(V_applied[s == segment]))
vertex = np.abs(np.diff(sgn)).argmax()
if split == 0:
sel = slice(0, vertex)
elif split == 1:
sel = slice(vertex, -1)
elif split == -1:
sel = slice(0, -1)
res = {
key: np.array(value)[s == segment][sel]
for key, value in data.items()
if key in ("current", "potential", "elapsed_time")
}
return res
def extract_cv_features(
data, return_raw_data=False, shoulder_percentile=0.99, autorange=True, eps=1e-12
):
I = data["current"]
V = data["potential"]
absval = np.abs(I)
log_I = np.log(np.clip(absval, absval[absval > 0].min(), np.inf))
if autorange:
# log_I = correct_autorange_artifacts(V, I)
a = model_autorange_artifacts(V, I)
log_I = log_I - a
_log_I, cv_features, fit_data = model_polarization_curve(
V,
log_I,
bg_order=5,
lm_method="huber",
smooth=True,
smooth_window=121,
shoulder_percentile=shoulder_percentile,
)
cv_features["slope"] = fit_data["lm"].coef_[0]
if return_raw_data:
meta = {"V": V, "log_I": _log_I, "fit_data": fit_data}
return cv_features, meta
return cv_features
| 8,145
| 27.784452
| 120
|
py
|
autoSDC
|
autoSDC-master/asdc/emulation.py
|
import gpflow
import dataset
import numpy as np
import pandas as pd
import tensorflow as tf
def simplex_grid(n=3, buffer=0.1):
""" construct a regular grid on the ternary simplex """
xx, yy = np.meshgrid(np.linspace(0.0, 1.0, n), np.linspace(0.0, 1.0, n))
s = np.c_[xx.flat, yy.flat]
sel = np.abs(s).sum(axis=1) <= 1.0
s = s[sel]
ss = 1 - s.sum(axis=1)
s = np.hstack((s, ss[:, None]))
scale = 1 - (3 * buffer)
s = buffer + s * scale
return s
def model_ternary(
composition,
target,
reset_tf_graph=True,
drop_last=True,
optimize_noise_variance=True,
initial_noise_var=1e-4,
):
if drop_last:
X = composition[:, :-1] # ignore the last composition column
else:
X = composition
Y = target
# sel = np.isfinite(Y).sum(axis=1)
sel = np.isfinite(Y).flat
X, Y = X[sel], Y[sel]
N, D = X.shape
if reset_tf_graph:
gpflow.reset_default_graph_and_session()
with gpflow.defer_build():
m = gpflow.models.GPR(
X,
Y,
# kern=gpflow.kernels.Linear(D, ARD=True) + gpflow.kernels.RBF(D, ARD=True) + gpflow.kernels.Constant(D) + gpflow.kernels.White(D)
kern=gpflow.kernels.Matern52(D, ARD=True)
+ gpflow.kernels.Constant(D)
+ gpflow.kernels.White(D, variance=initial_noise_var) # \sigma_noise = 0.01
# kern=gpflow.kernels.RationalQuadratic(D, ARD=True) + gpflow.kernels.Constant(D) + gpflow.kernels.White(D, variance=initial_noise_var)
)
# set a weakly-informative lengthscale prior
# e.g. half-normal(0, dx/3) -> gamma(0.5, 2*dx/3)
# another choice might be to use an inverse gamma prior...
# m.kern.kernels[0].lengthscales.prior = gpflow.priors.Gamma(0.5, 2.0/3)
m.kern.kernels[0].lengthscales.prior = gpflow.priors.Gamma(0.5, 0.5)
# m.kern.kernels[0].variance.prior = gpflow.priors.Gamma(0.5, 4.)
# m.kern.kernels[1].variance.prior = gpflow.priors.Gamma(0.5, 4.)
# m.kern.kernels[2].variance.prior = gpflow.priors.Gamma(0.5, 2.)
m.kern.kernels[0].variance.prior = gpflow.priors.Gamma(2.0, 2.0)
m.kern.kernels[1].variance.prior = gpflow.priors.Gamma(2.0, 2.0)
# m.kern.kernels[2].variance.prior = gpflow.priors.Gamma(2.0, 2.0)
if not optimize_noise_variance:
m.kern.kernels[2].variance.trainable = False
m.likelihood.variance = 1e-6
m.compile()
return m
def model_property(X, y, dx=1.0, optimize=False):
sel = np.isfinite(y).flat
X, y = X[sel], y[sel]
N, D = X.shape
with gpflow.defer_build():
model = gpflow.models.GPR(
X, y, kern=gpflow.kernels.RBF(D, ARD=True) + gpflow.kernels.Constant(D),
)
model.kern.kernels[0].variance.prior = gpflow.priors.Gamma(2, 1 / 2)
model.kern.kernels[0].lengthscales.prior = gpflow.priors.Gamma(2.0, 2 * dx / 3)
model.likelihood.variance = 0.01
model.compile()
if optimize:
opt = gpflow.train.ScipyOptimizer()
opt.minimize(model)
return model
def model_quality(X, y, dx=1.0, likelihood="beta", optimize=False):
sel = np.isfinite(y).flat
X, y = X[sel], y[sel]
N, D = X.shape
if likelihood == "beta":
# bounded regression
lik = gpflow.likelihoods.Beta()
elif likelihood == "bernoulli":
# classification
lik = gpflow.likelihoods.Bernoulli()
with gpflow.defer_build():
model = gpflow.models.VGP(
X, y, kern=gpflow.kernels.RBF(D, ARD=True), likelihood=lik
)
model.kern.variance.prior = gpflow.priors.Gamma(2, 2)
model.kern.lengthscales.prior = gpflow.priors.Gamma(1.0, 2 * dx / 3)
model.likelihood.variance = 0.1
model.compile()
if optimize:
opt = gpflow.train.ScipyOptimizer()
opt.minimize(model)
return model
class NiTiAlEmulator:
def __init__(
self,
composition,
df,
components=["Ni", "Al", "Ti"],
targets=["V_oc", "I_p", "V_tp", "slope", "fwhm"],
dx=1.0,
):
""" fit independent GP models for each target -- read compositions and targets from a csv file... """
self.composition = composition
self.components = list(composition.columns)
self.df = df
self.targets = targets
# self.composition = self.df.loc[:,self.components].values
self.dx = dx
self.models = {}
self.session = gpflow.get_default_session()
self.opt = gpflow.training.ScipyOptimizer()
self.fit()
def fit(self):
print(self.composition.shape)
with self.session.as_default():
for target in self.targets:
model = model_property(
self.composition.values[:, :-1],
self.df[target].values[:, None],
dx=self.dx,
)
self.opt.minimize(model)
self.models[target] = model
def likelihood_variance(self, target=None):
return self.models[target].likelihood.variance.value.item()
def __call__(
self,
composition,
target=None,
return_var=False,
sample_posterior=False,
n_samples=1,
seed=None,
):
""" evaluate GP models on compositions """
model = self.models[target]
with self.session.as_default():
if sample_posterior:
if seed is not None:
tf.set_random_seed(seed)
mu = model.predict_f_samples(composition[:, :-1], n_samples)
return mu.squeeze()
else:
mu, var = model.predict_y(composition[:, :-1])
if return_var:
return mu, var
else:
return mu.squeeze()
class ExperimentEmulator:
def __init__(
self,
db_file,
components=["Ni", "Al", "Ti"],
targets=["V_oc", "I_p", "V_tp", "slope", "fwhm"],
optimize_noise_variance=True,
):
""" fit independent GP models for each target -- read compositions and targets from a csv file... """
# load all the unflagged data from sqlite to pandas
# use sqlite id as pandas index
self.db = dataset.connect(f"sqlite:///{db_file}")
self.df = pd.DataFrame(self.db["experiment"].all(flag=False))
self.df.set_index("id", inplace=True)
# # drop the anomalous point 45 that has a negative jog in the passivation...
# self.df = self.df.drop(45)
self.components = components
self.targets = targets
self.optimize_noise_variance = optimize_noise_variance
self.models = {}
self.fit()
def fit(self):
self.composition = self.df.loc[:, self.components].values
self.opt = gpflow.training.ScipyOptimizer()
for target in self.targets:
model = model_ternary(
self.composition,
self.df[target].values[:, None],
optimize_noise_variance=self.optimize_noise_variance,
)
session = gpflow.get_default_session()
self.opt.minimize(model)
self.models[target] = (session, model)
def __call__(
self,
composition,
target=None,
return_var=False,
sample_posterior=False,
n_samples=1,
seed=None,
):
""" evaluate GP models on compositions """
session, model = self.models[target]
with session.as_default():
if sample_posterior:
if seed is not None:
tf.set_random_seed(seed)
mu = model.predict_f_samples(composition[:, :-1], n_samples)
return mu.squeeze()
else:
mu, var = model.predict_y(composition[:, :-1])
if return_var:
return mu, var
else:
return mu.squeeze()
| 8,029
| 29.301887
| 147
|
py
|
autoSDC
|
autoSDC-master/asdc/gp_controller.py
|
import os
import sys
import json
import time
import click
import asyncio
import dataset
import functools
import numpy as np
import pandas as pd
from ruamel import yaml
from aioconsole import ainput
import gpflow
from gpflowopt import acquisition
from scipy import stats
from scipy import spatial
from datetime import datetime
sys.path.append("../scirc")
sys.path.append(".")
import scirc
from asdc import slack
from asdc import analyze
from asdc import emulation
from asdc import visualization
BOT_TOKEN = open("slack_bot_token.txt", "r").read().strip()
SDC_TOKEN = open("slacktoken.txt", "r").read().strip()
def load_experiment_files(csv_files, dir="."):
dir, _ = os.path.split(dir)
experiments = pd.concat(
(
pd.read_csv(os.path.join(dir, csv_file), index_col=0)
for csv_file in csv_files
),
ignore_index=True,
)
return experiments
def load_experiment_json(experiment_files, dir="."):
""" an experiment file contains a json list of experiment definitions """
dir, _ = os.path.split(dir)
experiments = None
for experiment_file in experiment_files:
with open(os.path.join(dir, experiment_file), "r") as f:
if experiments is None:
experiments = json.load(f)
else:
experiments.append(json.load(f))
return experiments
class Controller(scirc.SlackClient):
""" autonomous scanning droplet cell client """
command = scirc.CommandRegistry()
def __init__(self, config=None, verbose=False, logfile=None, token=BOT_TOKEN):
super().__init__(verbose=verbose, logfile=logfile, token=token)
self.command.update(super().command)
self.msg_id = 0
self.update_event = asyncio.Event(loop=self.loop)
self.confirm = config.get("confirm", True)
self.notify = config.get("notify_slack", True)
self.data_dir = config.get("data_dir", os.getcwd())
self.figure_dir = config.get("figure_dir", os.getcwd())
self.db_file = os.path.join(self.data_dir, config.get("db_file", "test.db"))
self.db = dataset.connect(f"sqlite:///{self.db_file}")
self.experiment_table = self.db["experiment"]
self.targets = pd.read_csv(config["target_file"], index_col=0)
self.experiments = load_experiment_json(
config["experiment_file"], dir=self.data_dir
)
# gpflowopt minimizes objectives...
# UCB switches to maximizing objectives...
# swap signs for things we want to minimize (everything but V_tp)
self.objectives = ("I_p", "slope", "V_oc", "V_tp")
self.objective_alphas = [3, 3, 2, 1]
self.sgn = np.array([-1, -1, -1, 1])
async def post(self, msg, ws, channel):
# TODO: move this to the base Client class...
response = {
"id": self.msg_id,
"type": "message",
"channel": channel,
"text": msg,
}
self.msg_id += 1
await ws.send_str(json.dumps(response))
async def dm_sdc(self, text, channel="DHY5REQ0H"):
response = await self.slack_api_call(
"chat.postMessage",
data={
"channel": channel,
"text": text,
"as_user": False,
"username": "ctl",
},
token=SDC_TOKEN,
)
def load_experiment_indices(self):
# indices start at 0...
# sqlite integer primary keys start at 1...
df = pd.DataFrame(self.experiment_table.all())
target_idx = self.experiment_table.count()
experiment_idx = self.experiment_table.count(flag=False)
return df, target_idx, experiment_idx
def analyze_corrosion_features(self, segment=3):
rtab = self.db.get_table("result", primary_id=False)
for row in self.db["experiment"].all():
# extract features for any data that's missing
if rtab.find_one(id=row["id"]):
continue
d = {"id": row["id"]}
echem_data = pd.read_csv(
os.path.join(self.data_dir, row["datafile"]), index_col=0
)
autorange = (
echem_data["current_range"][echem_data["segment"] == segment]
.unique()
.size
> 1
)
data = analyze.split_data(
echem_data.to_dict(orient="list"), segment=segment
)
cv_features = analyze.extract_cv_features(
data, shoulder_percentile=0.999, autorange=autorange
)
d.update(cv_features)
d["ts"] = datetime.now()
rtab.upsert(d, ["id"])
return
def random_scalarization_cb(self, model_wrapper, candidates, cb_beta):
""" random scalarization upper confidence bound acquisition policy function """
objective = np.zeros(candidates.shape[0])
# sample one set of weights from a dirichlet distribution
# that specifies our general preference on the objective weightings
weights = stats.dirichlet.rvs(self.objective_alphas).squeeze()
if self.notify:
slack.post_message(f"sampled objective fn weights: {weights}")
for model, weight in zip(model_wrapper.models, weights):
mean, var = model.predict_y(candidates)
ucb = mean + cb_beta * np.sqrt(var)
objective += weight * ucb.squeeze()
return objective
def gp_acquisition(self):
if self.notify:
slack.post_message(f"analyzing CV features...")
# make sure all experiments are postprocessed and have values in the results table
self.analyze_corrosion_features()
# load positions, compositions, and measured values from db
df = pd.DataFrame(self.db["experiment"].all())
r = pd.DataFrame(self.db["result"].all())
X = df.loc[:, ("x_combi", "y_combi")].values
Y = r.loc[:, self.objectives].values
candidates = self.targets.values
# set confidence bound beta
t = X.shape[0]
cb_beta = 0.125 * np.log(2 * t + 1)
# set up scaling for X (if X is wafer coords...)
# scale targets to range (-0.5, 0.5)
scale_factor = 2 * self.targets.max().values
X = X / scale_factor
candidates = candidates / scale_factor
# remove previously measured candidates
mindist = spatial.distance.cdist(X, candidates).min(axis=0)
candidates = candidates[mindist > 1e-3]
# reset tf graph -- long-running program!
gpflow.reset_default_graph_and_session()
if self.notify:
slack.post_message(f"fitting GP models")
# set up models
# don't drop the last input dimension with wafer position inputs...
models = [
emulation.model_ternary(
X,
(self.sgn * Y)[:, idx][:, None],
reset_tf_graph=False,
drop_last=False,
)
for idx in range(Y.shape[1])
]
# set up multiobjective acquisition...
# use this as a convenient model wrapper for now...
criterion = acquisition.HVProbabilityOfImprovement(models)
# rescale model outputs to balance objectives...
for model in criterion.models:
model.normalize_output = True
criterion.root._needs_setup = True
criterion.optimize_restarts = 1
# fit the surrogate models
# gpflowopt objective will optimize the full model list...
criterion._optimize_models()
if self.notify:
slack.post_message(f"evaluating acquisition function")
# evaluate the acquisition function on a grid
# acq = criterion.evaluate(candidates)
acq = self.random_scalarization_cb(criterion, candidates, cb_beta)
# plot the acquisition function...
figpath = os.path.join(self.figure_dir, f"acquisition_plot_{t}.png")
visualization.scatter_wafer(
candidates * scale_factor, acq, label="acquisition", figpath=figpath
)
if self.notify:
slack.post_image(figpath, title=f"acquisition at t={t}")
query_idx = np.argmax(acq)
guess = candidates[query_idx]
target = guess * scale_factor
target = pd.Series({"x": target[0], "y": target[1]})
return target
@command
async def go(self, ws, msgdata, args):
"""keep track of target positions and experiment list
target and experiment indices start at 0
sqlite integer primary keys start at 1...
"""
# need to be more subtle here: filter experiment conditions on 'ok' or 'flag'
# but also: filter everything on wafer_id, and maybe session_id?
# also: how to allow cancelling tasks and adding combi spots to a queue to redo?
target = self.gp_acquisition()
# target_idx = self.db['experiment'].count()
# target = self.targets.iloc[target_idx]
print(target)
experiment_idx = self.db["experiment"].count(flag=False)
experiment = self.experiments[experiment_idx]
print(experiment)
# send the move command -- message @sdc
self.update_event.clear()
args = {"x": target.x, "y": target.y}
await self.dm_sdc(f"<@UHT11TM6F> move {json.dumps(args)}")
# wait for the ok
# @sdc will message us with @ctl update position ...
await self.update_event.wait()
# the move was successful and we've had our chance to check the previous spot
# reload the experiment in case flags have changed
experiment_idx = self.db["experiment"].count(flag=False)
experiment = self.experiments[experiment_idx]
print(experiment)
# send the experiment command
await self.dm_sdc(f"<@UHT11TM6F> run_experiment {json.dumps(experiment)}")
return
@command
async def update(self, ws, msgdata, args):
update_type, rest = args.split(" ", 1)
print(update_type)
self.update_event.set()
return
@command
async def dm(self, ws, msgdata, args):
""" echo random string to DM channel """
dm_channel = "DHY5REQ0H"
# dm_channel = 'DHNHM74TU'
response = await self.slack_api_call(
"chat.postMessage",
token=SDC_TOKEN,
data={
"channel": dm_channel,
"text": args,
"as_user": False,
"username": "ctl",
},
)
@command
async def abort_running_handlers(self, ws, msgdata, args):
"""cancel all currently running task handlers...
WARNING: does not do any checks on the potentiostat -- don't call this while an experiment is running...
we could register the coroutine address when we start it up, and broadcast that so it's cancellable...?
"""
current_task = asyncio.current_task()
for task in asyncio.all_tasks():
if task._coro == current_task._coro:
continue
if task._coro.__name__ == "handle":
print(f"killing task {task._coro}")
task.cancel()
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--verbose/--no-verbose", default=False)
def sdc_controller(config_file, verbose):
with open(config_file, "r") as f:
config = yaml.safe_load(f)
experiment_root, _ = os.path.split(config_file)
# specify target file relative to config file
target_file = config.get("target_file")
config["target_file"] = os.path.join(experiment_root, target_file)
data_dir = config.get("data_dir")
if data_dir is None:
config["data_dir"] = os.path.join(experiment_root, "data")
figure_dir = config.get("figure_dir")
if figure_dir is None:
config["figure_dir"] = os.path.join(experiment_root, "figures")
os.makedirs(config["data_dir"], exist_ok=True)
os.makedirs(config["figure_dir"], exist_ok=True)
if config["step_height"] is not None:
config["step_height"] = abs(config["step_height"])
# logfile = config.get('command_logfile', 'commands.log')
logfile = "controller.log"
logfile = os.path.join(config["data_dir"], logfile)
ctl = Controller(verbose=verbose, config=config, logfile=logfile)
ctl.run()
if __name__ == "__main__":
sdc_controller()
| 12,555
| 31.444444
| 112
|
py
|
autoSDC
|
autoSDC-master/asdc/scripts/electroplate.py
|
#!/usr/bin/env python
import os
import sys
import glob
import json
import time
from ruamel import yaml
import click
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from asdc import sdc
from asdc import slack
from asdc import visualization
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--verbose/--no-verbose", default=False)
def electroplate(config_file, verbose):
"""keep in mind that sample frame and versastat frame have x and y flipped:
x_combi is -y_versastat
y_combi is -x_versastat
Also, combi wafer frame is in mm, versastat frame is in meters.
Assume we start at the standard combi layout spot 1 (-9.04, -31.64)
"""
with open(config_file, "r") as f:
config = yaml.safe_load(f)
if config["data_dir"] is None:
config["data_dir"] = os.path.join(os.path.split(config_file)[0], "data")
if config["figure_dir"] is None:
config["figure_dir"] = os.path.join(os.path.split(config_file)[0], "figures")
if config["delta_z"] is not None:
config["delta_z"] = abs(config["delta_z"])
df = pd.read_csv(config["target_file"], index_col=0)
if config["initial_spot"] is not None:
current_spot = df.loc[config["initial_spot"]]
# drop any targets that we want to skip outright
# df = df.drop(config['skip_spots'])
if config["restart_in_place"]:
df = df[df.index >= config["initial_spot"]]
else:
df = df[df.index > config["initial_spot"]]
data_files = glob.glob(os.path.join(config["data_dir"], "*.json"))
for composition_file in config["composition_file"]:
stem, ext = os.path.splitext(composition_file)
lockfile = os.path.join(config["data_dir"], stem + ".lock")
if not os.path.isfile(lockfile):
break
print("running depositions from ", composition_file)
comp = pd.read_csv(os.path.join(config["data_dir"], composition_file), index_col=0)
# # add an initial dummy row for the CV...
# comp = pd.concat((comp.iloc[0:1], comp))
# comp.iloc[0] *= np.nan
if config["initial_spot"] is None:
# assume we start from combi spot one
if len(data_files) == 0:
current_spot = pd.Series(dict(x=-9.04, y=-31.64))
else:
# or from a previous spot...
current_spot = df.iloc[len(data_files) - 1]
df = df.iloc[len(data_files) :]
# check to see if we're starting in the middle of a composition...
current_solution_datafiles = [
data_file for data_file in data_files if stem in data_file
]
if len(current_solution_datafiles) > 0:
n_current = len(current_solution_datafiles)
comp = comp.iloc[n_current:]
run_cv = False
print("start: ", current_spot.x, current_spot.y)
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
initial_versastat_position = pos.current_position()
print("initial vs position: ", initial_versastat_position)
if config["confirm"]:
input("press enter to start the experiment")
for (idx, target), (_, C) in zip(df.iterrows(), comp.iterrows()):
# update position: convert from mm to m
# x_vs is -y_c, y_vs is x
dy = -(target.x - current_spot.x) * 1e-3
dx = -(target.y - current_spot.y) * 1e-3
delta = [dx, dy, 0.0]
current_spot = target
if verbose:
print(current_spot.x, current_spot.y)
print("position update:", dx, dy)
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
pos.update(
delta=delta,
step_height=config["delta_z"],
compress=config["compress_dz"],
)
current_v_position = pos.current_position()
# run CV scan
if run_cv:
print("CV", current_spot.x, current_spot.y)
if config["confirm"]:
input("press enter to run experiment")
if config["initial_delay"] > 0:
time.sleep(config["initial_delay"])
slack.post_message("Running a CV for {}.".format(config["target_file"]))
the_data = sdc.experiment.run_cv_scan(cell=config["cell"], verbose=verbose)
run_cv = False
figpath = os.path.join(config["figure_dir"], "CV_{}.png".format(idx))
visualization.plot_vi(
the_data["current"], the_data["potential"], figpath=figpath
)
slack.post_image(figpath, title="CV {}".format(idx))
else:
potential = C["V"]
duration = C["time"] # time in seconds
print("plate", C["f_Co"], "Co")
print(
"x={}, y={}, V={}, t={}".format(
current_spot.x, current_spot.y, potential, duration
)
)
print("make sure the flow rate is set to ", C["flow_rate"])
if config["confirm"]:
input("press enter to run experiment")
if config["initial_delay"] > 0:
time.sleep(config["initial_delay"])
slack.post_message(
"Running electrodeposition targeting {} Co. ({}V for {}s at {})".format(
C["f_Co"], potential, duration, C["flow_rate"]
)
)
the_data = sdc.experiment.run_potentiostatic(
potential, duration, cell=config["cell"], verbose=verbose
)
the_data.update(C.to_dict())
figpath = os.path.join(
config["figure_dir"], "current_plot_{}.png".format(idx)
)
visualization.plot_i(
the_data["elapsed_time"], the_data["current"], figpath=figpath
)
slack.post_image(figpath, title="current vs time {}".format(idx))
the_data["index_in_sequence"] = int(idx)
the_data["position_versa"] = current_v_position
_spot = current_spot.to_dict()
the_data["position_combi"] = [float(_spot["x"]), float(_spot["y"])]
# log data
logfile = "{}_data_{:03d}.json".format(stem, idx)
with open(os.path.join(config["data_dir"], logfile), "w") as f:
json.dump(the_data, f)
if config["confirm"]:
input(
"deposition finished; turn off the pump and press enter to move the stage..."
)
open(lockfile, "a").close()
if __name__ == "__main__":
electroplate()
| 6,590
| 33.150259
| 93
|
py
|
autoSDC
|
autoSDC-master/asdc/scripts/cli.py
|
#!/usr/bin/env python
import os
import sys
import glob
import json
import time
import click
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
from asdc import sdc
from asdc import visualization
@click.group()
def cli():
pass
@cli.command()
@click.option("--verbose/--no-verbose", default=False)
def reset(verbose):
""" try to reset the potentiostat controller... """
with sdc.potentiostat.controller(start_idx=17109013) as pstat:
pstat.stop()
pstat.clear()
@cli.command()
@click.option(
"-d",
"--direction",
default="x",
type=click.Choice(["x", "y", "+x", "-x", "+y", "-y"]),
)
@click.option("--delta", default=5e-3, type=float, help="x step in meters")
@click.option("--delta-z", default=5e-5, type=float, help="z step in meters")
@click.option("--speed", default=1e-3, type=float, help="speed in meters/s")
@click.option(
"--lift/--no-lift",
default=False,
help="ease off vertically before horizontal motion.",
)
@click.option(
"--press/--no-press",
default=True,
help="press down below vertical setpoint to reseat probe after horizontal motion",
)
@click.option("--verbose/--no-verbose", default=False)
def step(direction, delta, delta_z, speed, lift, press, verbose):
"""1mm per second scan speed.
up. over. down. down. up
lift: up, over, down.
press: after horizontal step, press down and release by delta_z
"""
# constrain absolute delta_z to avoid crashing....
delta_z = np.clip(delta_z, -5e-5, 5e-5)
with sdc.position.controller(ip="192.168.10.11", speed=speed) as pos:
# vertical step
if lift:
pos.update_z(delta=delta_z, verbose=verbose)
# take the position step
if verbose:
pos.print_status()
if "x" in direction:
update_position = pos.update_x
elif "y" in direction:
update_position = pos.update_y
if "-" in direction:
delta *= -1
update_position(delta=delta, verbose=verbose)
if verbose:
pos.print_status()
if lift:
# vertical step back down:
pos.update_z(delta=-delta_z, verbose=verbose)
if press:
# compress, then release
pos.update_z(delta=-delta_z, verbose=verbose)
pos.update_z(delta=delta_z, verbose=verbose)
if verbose:
pos.print_status()
@cli.command()
@click.option("--data-dir", default="data", type=click.Path())
@click.option(
"-c", "--cell", default="INTERNAL", type=click.Choice(["INTERNAL", "EXTERNAL"])
)
@click.option("--verbose/--no-verbose", default=False)
def cv(data_dir, cell, verbose):
""" run a CV experiment """
# check on experiment status periodically:
poll_interval = 1
# load previous datasets just to get current index...
datafiles = glob.glob(os.path.join(data_dir, "*.json"))
scan_idx = len(datafiles)
with sdc.position.controller(ip="192.168.10.11") as pos:
cv_data = sdc.experiment.run_cv_scan(cell=cell, verbose=verbose)
logfile = "cv_{:03d}.json".format(scan_idx)
with open(os.path.join(data_dir, logfile), "w") as f:
json.dump(cv_data, f)
print("plotting...")
visualization.plot_v(
cv_data["elapsed_time"], cv_data["potential"], scan_idx, data_dir=data_dir
)
print("first plot done")
visualization.plot_iv(
cv_data["current"], cv_data["potential"], scan_idx, data_dir
)
print("second plot done")
return
if __name__ == "__main__":
cli()
| 3,641
| 25.977778
| 86
|
py
|
autoSDC
|
autoSDC-master/asdc/scripts/auto.py
|
#!/usr/bin/env python
import os
import sys
import glob
import json
import time
import yaml
import click
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from asdc import sdc
from asdc import ocp
from asdc import slack
from asdc import analyze
from asdc import visualization
def update_position_combi(
target, current_spot, speed=1e-3, delta_z=5e-3, compress_dz=5e-5, verbose=False
):
# update position: convert from mm to m
# x_vs is -y_c, y_vs is x
dy = -(target.x - current_spot.x) * 1e-3
dx = -(target.y - current_spot.y) * 1e-3
delta = [dx, dy, 0.0]
current_spot = target
if verbose:
print("position update:", dx, dy)
with sdc.position.controller(ip="192.168.10.11", speed=speed) as pos:
pos.update(delta=delta, step_height=delta_z, compress=compress_dz)
current_v_position = pos.current_position()
return current_v_position
# 5mm up, 50 microns down
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--verbose/--no-verbose", default=False)
def run_auto_scan(config_file, verbose):
"""collect three CV curves from predetermined spots, then use a Gaussian Process model to map out open circuit potential
keep in mind that sample frame and versastat frame have x and y flipped:
x_combi is -y_versastat
y_combi is -x_versastat
Also, combi wafer frame is in mm, versastat frame is in meters.
Assume we start at the standard combi layout spot 1 (-9.04, -31.64)
"""
with open(config_file, "r") as f:
config = yaml.load(f)
if config["data_dir"] is None:
config["data_dir"] = os.path.split(config_file)[0]
if config["delta_z"] is not None:
config["delta_z"] = np.clip(abs(config["delta_z"]), 0, 1e-1)
# assume we start at standard combi layout spot 1
current_spot = pd.Series(dict(x=-9.04, y=-31.64))
# get the corresponding versastat reference coordinates
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
initial_versastat_position = pos.current_position()
# kickstart with a few pre-determined scans...
df = pd.read_csv(config["target_file"], index_col=0)
n_initial, _ = df.shape
n_total = n_initial + config["n_acquisitions"]
pre_collected_data = glob.glob(os.path.join(config["data_dir"], "*.json"))
# start_idx = len(pre_collected_data)
most_recent_file = sorted(pre_collected_data)[-1]
bn, _ = os.path.splitext(os.path.basename(most_recent_file))
_, _, start_idx = bn.split("_")
start_idx = int(start_idx)
for idx in range(start_idx, n_total):
if idx < n_initial:
slack.post_message("acquiring predetermined spot {}".format(idx))
target = df.iloc[idx]
else:
slack.post_message("acquiring GP spot {}".format(idx))
target = ocp.gp_select(config["data_dir"], plot_model=True, idx=idx)
figpath = os.path.join(
config["data_dir"], "ocp_predictions_{}.png".format(idx)
)
slack.post_image(figpath, title="OCP map {}".format(idx))
# update position
# specify target position in combi sample coordinates
current_v_position = update_position_combi(
target,
current_spot,
delta_z=config["delta_z"],
compress=config["compress_dz"],
speed=config["speed"],
)
current_spot = target
# run CV scan
if config["initial_delay"]:
time.sleep(config["initial_delay"])
cv_data = sdc.experiment.run_cv_scan(cell=config["cell"], verbose=verbose)
cv_data["index_in_sequence"] = int(idx)
cv_data["position_versa"] = current_v_position
_spot = current_spot.to_dict()
cv_data["position_combi"] = [float(_spot["x"]), float(_spot["y"])]
# log data
logfile = "cv_scan_{:03d}.json".format(idx)
with open(os.path.join(config["data_dir"], logfile), "w") as f:
json.dump(cv_data, f)
figpath = os.path.join(config["data_dir"], "open_circuit_{}.png".format(idx))
visualization.plot_open_circuit(
cv_data["current"],
cv_data["potential"],
cv_data["segment"],
figpath=figpath,
)
slack.post_image(figpath, title="open circuit {}".format(idx))
# visualization.plot_iv(cv_data['current'], cv_data['potential'], idx, data_dir)
# visualization.plot_v(cv_data['elapsed_time'], cv_data['potential'], idx, data_dir=data_dir)
# re-fit the GP after the final measurement
slack.post_message("fitting final GP model.")
target = ocp.gp_select(config["data_dir"], plot_model=True)
slack.post_image(
os.path.join(config["data_dir"], "ocp_predictions_final.png"), title="OCP map"
)
# go back to the original position....
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
x_initial, y_initial, z_initial = initial_versastat_position
x_current, y_current, z_current = pos.current_position()
delta = [x_initial - x_current, y_initial - y_current, 0.0]
pos.update(
delta=delta, step_height=config["delta_z"], compress=config["compress_dz"]
)
if __name__ == "__main__":
run_auto_scan()
| 5,411
| 34.372549
| 124
|
py
|
autoSDC
|
autoSDC-master/asdc/scripts/__init__.py
| 0
| 0
| 0
|
py
|
|
autoSDC
|
autoSDC-master/asdc/scripts/find_circle.py
|
#!/usr/bin/env python
""" Perform sparse measurements to estimate the location of the interface between the gold dot sample and the epoxy """
import GPy
import json
import time
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import os
import sys
vs_path = os.path.expanduser(os.path.join("~", "a-sdc"))
print(vs_path)
sys.path.append(vs_path)
from asdc import sdc
# step size 100 microns
grid_step_size = 80 * 1e-6
grid_extent = 0.008 # 8 mm
INITIAL_Z_DELTA = 2.0e-4
n_measurements = 100
n_initial = 5
POTENTIAL_THRESHOLD = -0.22
CURRENT_THRESHOLD = -4.5 * 1e-9
min_cur = CURRENT_THRESHOLD
max_cur = CURRENT_THRESHOLD
KERNEL_LENGTHSCALE_CONSTRAINTS = (5e-4, 5e-3)
LOG_DIR = "logs"
FIG_DIR = "figures"
os.makedirs(LOG_DIR, exist_ok=True)
os.makedirs(FIG_DIR, exist_ok=True)
def acquire_cv_data(pstat, pos, idx, poll_interval=8):
# run a CV experiment
status, params = pstat.multi_cyclic_voltammetry(
initial_potential=0.0,
vertex_potential_1=-0.25,
vertex_potential_2=0.65,
final_potential=0.0,
scan_rate=0.1,
cell_to_use="EXTERNAL",
e_filter="1Hz",
i_filter="1Hz",
cycles=3,
)
pstat.start()
while pstat.sequence_running():
time.sleep(poll_interval)
# collect and log data
scan_data = {
"measurement": "multi_cyclic_voltammetry",
"parameters": params,
"index_in_sequence": idx,
"timestamp": datetime.now().isoformat(),
"current": pstat.current(),
"potential": pstat.potential(),
"position": pos.current_position(),
}
pstat.clear()
return scan_data
def update_position(pos, new_xy):
current_position = pos.current_position()
z = current_position[-1]
new_x, new_y = new_xy
target_position = np.array([new_x, new_y, z])
delta = target_position - current_position
# make really sure delta_z is zero
delta[-1] = 0
pos.update(delta, verbose=False)
def evaluate_CV_curves(
potential,
current,
potential_threshold=POTENTIAL_THRESHOLD,
current_threshold=CURRENT_THRESHOLD,
):
""" make a polymer/metal decision by thresholding the current at the low end of the potenial curve """
# global current_history
potential, current = np.array(potential), np.array(current)
avg_current = np.mean(current[potential < potential_threshold])
if avg_current < current_threshold:
return 1, avg_current
else:
return 0, avg_current
def fit_gp(X, y, observed):
m = GPy.models.GPClassification(
X[observed],
y[observed, None],
kernel=GPy.kern.RBF(2, lengthscale=1.0) + GPy.kern.White(2, variance=0.05),
)
# NOTE: tune these lengthscales!
m.kern.rbf.lengthscale.constrain_bounded(*KERNEL_LENGTHSCALE_CONSTRAINTS)
m.kern.white.variance.constrain_bounded(1e-2, 0.1)
m.optimize("bfgs", max_iters=100)
print("gp opt ok.")
mu, var = m.predict_noiseless(X)
p = m.likelihood.gp_link.transf(mu)
v = p - np.square(p)
print("predictions ok.")
vv = v.copy()
vv[observed] = 0
query_id = np.argmax(vv)
return m, p, v, query_id
def plot_predictions(X, y, observed, p, var, query_id, idx):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 6))
print("subplots created.")
n_points, dim = X.shape
s = int(np.sqrt(n_points))
print(s)
xx, yy = X[:, 0], X[:, 1]
xx, yy = xx.reshape((s, s)), yy.reshape((s, s))
extent = (np.min(xx), np.max(xx), np.min(yy), np.max(yy))
print("prepare to imshow.")
ax1.imshow(
p.reshape((s, s)),
origin="lower",
aspect="equal",
alpha=0.5,
extent=extent,
interpolation="bilinear",
cmap="Reds",
)
print("1")
ax2.imshow(
var.reshape((s, s)),
origin="lower",
aspect="equal",
alpha=0.5,
extent=extent,
interpolation="bilinear",
cmap="Reds",
)
print("2. now scatter:")
for ax in (ax1, ax2):
ax.scatter(
X[observed, 0], X[observed, 1], c=y[observed], edgecolors="k", cmap="Reds"
)
ax.scatter(X[query_id, 0], X[query_id, 1], c="b", edgecolors="k")
ax.contour(
xx,
yy,
p.reshape((s, s)),
levels=[0.5],
colors="k",
alpha=0.5,
linestyles="dashed",
)
ax.axis("off")
print("ok, saving")
plt.savefig(os.path.join(FIG_DIR, "gp_predictions_{:04d}.png".format(idx)))
print("ok.")
plt.clf()
plt.close()
def plot_CV(potential, current, idx):
plt.plot(potential, current, alpha=0.8)
plt.savefig(os.path.join(FIG_DIR, "CV_curve_{:04d}.png".format(idx)))
plt.clf()
plt.close()
def write_logfile(scan_data, idx):
""" serialize scan data to json """
logfile = os.path.join(LOG_DIR, "line_scan_{:04d}.json".format(idx))
with open(logfile, "w") as f:
json.dump(scan_data, f)
def find_circle(speed=1e-5, poll_interval=5):
"""perform a sparse set of CV experiments, recording position, current, potential, and parameters in json log files
Position units are METERS!
"""
delta = [1e-4, 1e-4, 0.0]
initial_delta = [0.0, 0.0, -INITIAL_Z_DELTA]
final_delta = [0.0, 0.0, INITIAL_Z_DELTA]
with sdc.position.controller(ip="192.168.10.11", speed=speed) as pos:
pos.print_status()
pos.update(delta=initial_delta, verbose=True)
pos.print_status()
# define a square measurement grid
start_position = pos.current_position()
xx, yy = np.meshgrid(
np.arange(0, grid_extent, grid_step_size),
np.arange(0, grid_extent, grid_step_size),
)
# add the grid offset positions to the start position
xx += start_position[0]
yy = start_position[1] - yy
plt.hist(xx.flat, bins=100, label="x")
plt.hist(yy.flat, bins=100, label="y")
plt.legend()
plt.savefig("posit_2.png")
plt.clf()
plt.close()
# raise KeyboardInterrupt
X = np.c_[xx.ravel(), yy.ravel()]
y = np.zeros(X.shape[0])
observed = np.zeros_like(y, dtype=bool)
with sdc.potentiostat.controller(start_idx=17109013) as pstat:
pstat.set_current_range("20nA")
pstat.stop()
pstat.clear()
# start in the lower left corner...
query_id = 0
for idx in range(n_measurements):
# train model, update position, scan, log,
print("collecting point {}".format(idx))
# run the experiment
scan_data = acquire_cv_data(pstat, pos, idx)
observed[query_id] = True
# make a decision
label, avg_potential = evaluate_CV_curves(
scan_data["potential"], scan_data["current"]
)
scan_data["label"] = label
y[query_id] = label
observed[query_id] = True
# save metadata
write_logfile(scan_data, idx)
plot_CV(scan_data["potential"], scan_data["current"], idx)
# select the next point to measure
if idx < n_initial:
# randomly select a point to query
query_id = np.random.choice(X.shape[0])
else:
# actively acquire data
print("fitting gp")
model, p, var, query_id = fit_gp(X, y, observed)
print("ok, plotting...")
plot_predictions(X, y, observed, p, var, query_id, idx)
print("ok.")
new_xy = X[query_id]
# move the probe
update_position(pos, new_xy)
# bring the probe back up
pos.update(delta=final_delta, verbose=True)
pos.print_status()
if __name__ == "__main__":
find_circle()
| 8,091
| 26.245791
| 119
|
py
|
autoSDC
|
autoSDC-master/asdc/scripts/combi_cv.py
|
#!/usr/bin/env python
import os
import sys
import glob
import json
import time
import yaml
import click
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from asdc import sdc
from asdc import visualization
# 5mm up, 50 microns down
@click.command()
@click.argument("config-file", type=click.Path())
@click.option("--verbose/--no-verbose", default=False)
def run_combi_scan(config_file, verbose):
"""keep in mind that sample frame and versastat frame have x and y flipped:
x_combi is -y_versastat
y_combi is -x_versastat
Also, combi wafer frame is in mm, versastat frame is in meters.
Assume we start at the standard combi layout spot 1 (-9.04, -31.64)
"""
with open(config_file, "r") as f:
config = yaml.load(f)
if config["data_dir"] is None:
config["data_dir"] = os.path.split(config_file)[0]
if config["delta_z"] is not None:
config["delta_z"] = abs(config["delta_z"])
df = pd.read_csv(config["target_file"], index_col=0)
current_spot = pd.Series(dict(x=-9.04, y=-31.64))
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
initial_versastat_position = pos.current_position()
for idx, target in df.iterrows():
# update position: convert from mm to m
# x_vs is -y_c, y_vs is x
dy = -(target.x - current_spot.x) * 1e-3
dx = -(target.y - current_spot.y) * 1e-3
delta = [dx, dy, 0.0]
current_spot = target
if verbose:
print("position update:", dx, dy)
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
pos.update(
delta=delta,
step_height=config["delta_z"],
compress=config["compress_dz"],
)
current_v_position = pos.current_position()
# run CV scan
if config["initial_delay"]:
time.sleep(config["initial_delay"])
cv_data = sdc.experiment.run_cv_scan(
cell=config["cell"], verbose=config["verbose"]
)
cv_data["index_in_sequence"] = int(idx)
cv_data["position_versa"] = current_v_position
_spot = current_spot.to_dict()
cv_data["position_combi"] = [float(_spot["x"]), float(_spot["y"])]
# log data
logfile = "grid_scan_{:03d}.json".format(idx)
with open(os.path.join(config["data_dir"], logfile), "w") as f:
json.dump(cv_data, f)
visualization.plot_iv(
cv_data["current"], cv_data["potential"], idx, config["data_dir"]
)
visualization.plot_v(
cv_data["elapsed_time"],
cv_data["potential"],
idx,
data_dir=config["data_dir"],
)
# go back to the original position....
with sdc.position.controller(ip="192.168.10.11", speed=config["speed"]) as pos:
x_initial, y_initial, z_initial = initial_versastat_position
x_current, y_current, z_current = pos.current_position()
delta = [x_initial - x_current, y_initial - y_current, 0.0]
pos.update(
delta=delta, step_height=config["delta_z"], compress=config["compress_dz"]
)
if __name__ == "__main__":
run_combi_scan()
| 3,312
| 31.165049
| 87
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/potentiostat.py
|
""" asdc.control: pythonnet .NET interface to VersaSTAT/VersaSCAN libraries """
import os
import clr
import sys
import json
import time
import asyncio
import inspect
import logging
import streamz
import numpy as np
import pandas as pd
from datetime import datetime
from contextlib import contextmanager
from streamz.dataframe import DataFrame
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# pythonnet checks PYTHONPATH for assemblies to load...
# so add the VersaSCAN libraries to sys.path
vdkpath = "C:/Program Files (x86)/Princeton Applied Research/VersaSTAT Development Kit"
sys.path.append(vdkpath)
# load instrument control library...
clr.AddReference("VersaSTATControl")
from VersaSTATControl import Instrument
class VersaStatError(Exception):
pass
@contextmanager
def controller(start_idx=17109013, initial_mode="potentiostat"):
""" context manager that wraps potentiostat controller class Control. """
ctl = Potentiostat(start_idx=start_idx, initial_mode=initial_mode)
try:
ctl.stop()
ctl.clear()
yield ctl
except Exception as exc:
print(exc)
print("Exception: unwind potentiostat controller...")
ctl.stop()
time.sleep(1)
ctl.clear()
time.sleep(1)
ctl.disconnect()
raise
finally:
print("disconnect from potentiostat controller.")
ctl.stop()
ctl.clear()
ctl.disconnect()
class Potentiostat:
"""Interface to the VersaSTAT SDK library for instrument control
methods are broken out into `Immediate` (direct instrument control) and `Experiment`.
"""
def __init__(self, start_idx=0, initial_mode="potentiostat", poll_interval=1):
self.instrument = Instrument()
self.start_idx = start_idx
self.connect()
self.serial_number = self.instrument.GetSerialNumber()
self.model = self.instrument.GetModel()
self.options = self.instrument.GetOptions()
self.low_current_interface = self.instrument.GetIsLowCurrentInterfacePresent()
self.mode = initial_mode
self.poll_interval = poll_interval
self.current_range = None
return
def __call__(self, experiment):
return self.run(experiment)
def check_overload(self):
self.update_status()
overload_status = self.overload_status()
if overload_status != 0:
print("OVERLOAD:", overload_status)
return overload_status
def read_buffers(self, start=0):
num_points = self.points_available() - start
return pd.DataFrame(
{
"current": self.current(start, num_points),
"potential": self.potential(start, num_points),
"elapsed_time": self.elapsed_time(start, num_points),
"applied_potential": self.applied_potential(start, num_points),
"current_range": self.current_range_history(start, num_points),
"segment": self.segment(start, num_points),
}
)
def run(self, experiment, clear=True):
""" run an SDC experiment sequence -- busy wait until it's finished """
# this is a bit magical...
# `experiment` has an attribute `setup_func` that holds the name of the .NET function
# that should be invoked to add an experiment
# `experiment.setup` is responsible for looking it up and invoking it
# with e.g. `f = getattr(pstat.instrument.Experiment, experiment.setup_func)`
# this way, an `SDCSequence` can call all the individual `setup` methods
argstring = experiment.setup(self.instrument.Experiment)
metadata = {"timestamp_start": datetime.now(), "parameters": argstring}
self.start()
error_codes = set()
source = streamz.Stream()
# build a list of pd.DataFrames
# to concat into the full measurement data
chunks = source.sink_to_list()
# # publish the pd.DataFrame chunk over zmq
# send_data = source.sink(lambda x: socket.send_pyobj(x))
# streaming dataframe for early stopping (and potential error checking) callbacks
example = pd.DataFrame(
{
"current": [],
"potential": [],
"elapsed_time": [],
"applied_potential": [],
"current_range": [],
"segment": [],
}
)
sdf = DataFrame(source, example=example)
early_stop = experiment.register_early_stopping(sdf)
data_cursor = 0
stop_flagged = False
while self.sequence_running():
time.sleep(self.poll_interval)
error_codes.add(self.check_overload())
data_chunk = self.read_buffers(start=data_cursor)
chunksize, _ = data_chunk.shape
data_cursor += chunksize
if chunksize > 0:
source.emit(data_chunk)
if experiment.stop_execution and not stop_flagged:
logger.debug("stopping experiment early")
self.skip()
stop_flagged = True
logger.debug("finished running experiment")
metadata["timestamp_end"] = datetime.now()
metadata["error_codes"] = json.dumps(list(map(int, error_codes)))
# results = self.read_buffers()
results = pd.concat(chunks, ignore_index=True)
# cast results into specific e-chem result type
# (which subclass pandas.DataFrame and have a validation and plotting interface)
results = experiment.marshal(results)
self.clear()
return results, metadata
def connect(self):
self.index = self.instrument.FindNext(self.start_idx)
self.connected = self.instrument.Connect(self.index)
return
def disconnect(self):
self.instrument.Close()
# Immediate methods -- direct instrument control
def set_cell(self, status="on"):
""" turn the cell on or off """
if status not in ("on", "off"):
raise ArgumentError("specify valid cell status in {on, off}")
if status == "on":
self.instrument.Immediate.SetCellOn()
else:
self.instrument.Immediate.SetCellOff()
def choose_cell(self, choice="external"):
""" choose between the internal and external cells. """
if choice not in ("internal", "external"):
raise ArgumentError("specify valid cell in {internal, external}")
if choice == "external":
self.instrument.Immediate.SetCellExternal()
elif choice == "internal":
self.instrument.Immediate.SetCellExternal()
def set_mode(self, mode):
""" choose between potentiostat and galvanostat modes. """
if mode not in ("potentiostat", "galvanostat"):
raise ArgumentError("set mode = {potentiostat, galvanostat}")
if mode == "potentiostat":
self.instrument.Immediate.SetModePotentiostat()
elif mode == "galvanostat":
self.instrument.Immediate.SetModeGalvanostat()
def set_current_range(self, current_range):
valid_current_ranges = [
"2A",
"200mA",
"20mA",
"2mA",
"200uA",
"20uA",
"2uA",
"200nA",
"20nA",
"2nA",
]
if current_range not in valid_current_ranges:
raise ArgumentError(
"specify valid current range ({})".format(valid_current_ranges)
)
self.current_range = current_range
# dispatch the right SetIRange_* function....
current = "SetIRange_{}".format(current_range)
set_current = getattr(self.instrument.Immediate, current)
set_current()
def set_dc_potential(self, potential):
""" Set the output DC potential (in Volts). This voltage must be within the instruments capability."""
self.instrument.Immediate.SetDCPotential(potential)
def set_dc_current(self, current):
"""Set the output DC current (in Amps). This current must be within the instruments capability.
Calling this method also changes to Galvanostat mode and sets the current range to the correct value.
WARNING: Once cell is enabled after setting the DC current, do not change to potentiostatic mode or change the current range.
These will affect the value being applied to the cell.
"""
self.instrument.Immediate.SetDCCurrent(current)
def set_ac_frequency(self, frequency):
""" Sets the output AC Frequency (in Hz). This frequency must be within the instruments capability."""
self.instrument.Immediate.SetACFrequency(frequency)
def set_ac_amplitude(self, amplitude):
""" Sets the output AC Amplitude (in RMS Volts). This amplitude must be within the instruments capabilities."""
self.instrument.Immediate.SetACAmplitude(amplitude)
def set_ac_waveform(self, mode="on"):
waveform_modes = ["on", "off"]
if mode not in waveform_modes:
raise ArgumentError("specify valid AC waveform mode {on, off}.")
if mode == "on":
self.instrument.Immediate.SetACWaveformOn()
elif mode == "off":
self.instrument.Immediate.SetACWaveformOff()
def update_status(self):
"""Retrieve the status information from the instrument.
Also auto-ranges the current if an experiment sequence is not in progress.
Call this prior to calling the status methods below.
"""
self.instrument.Immediate.UpdateStatus()
def latest_potential(self):
""" get the latest stored E value. """
return self.instrument.Immediate.GetE()
def latest_current(self):
""" get the latest stored I value. """
return self.instrument.Immediate.GetI()
def overload_status(self, raise_exception=False):
"""check for overloading.
0 indicates no overload, 1 indicates I (current) Overload, 2
indicates E, Power Amp or Thermal Overload has occurred.
"""
overload_cause = {
1: "I (current) overload",
2: "E, Power Amp, or Thermal overload",
}
overload_code = self.instrument.Immediate.GetOverload()
if overload_code and raise_exception:
msg = "A " + overload_cause[overload_code] + " has occurred."
raise VersaStatError(msg)
return overload_code
def booster_enabled(self):
""" check status of the booster switch. """
return self.instrument.Immediate.GetBoosterEnabled()
def cell_enabled(self):
""" check status of the cell. """
return self.instrument.Immediate.GetCellEnabled()
def autorange_current(self, auto):
"""Enable or disable (default is enabled) automatic current ranging while an experiment is not running.
Disabling auto-ranging is useful when wanting to apply a DC current in immediate mode.
"""
if auto:
self.instrument.Immediate.SetAutoIRangeOn()
else:
self.instrument.Immediate.SetAutoIRangeOff()
# Experiment methods
# Experiment actions apparently can be run asynchronously
def actions(self):
""" get the current experiment action queue. """
# Returns a list of comma delimited action names that are supported by the instrument that is currently connected
action_list = self.instrument.Experiment.GetActionList()
return action_list.split(",")
def clear(self):
""" clear the experiment action queue. """
self.instrument.Experiment.Clear()
def start(self, max_wait_time=30, poll_interval=2):
"""Starts the sequence of actions in the instrument that is currently connected.
Wait until the instrument starts the action to return control flow."""
self.instrument.Experiment.Start()
# Note: ctl.start() can return before the sequence actually starts running,
# so it's possible to skip right past the data collection spin-waiting loop
# which writes a data-less log file and pushes the next experiment onto the queue
# while the instrument is still going on with the current one.
# it appears that this is not safe....
elapsed = 0
while not self.sequence_running():
time.sleep(poll_interval)
elapsed += poll_interval
if elapsed > max_wait_time:
raise VersaStatError("could not start experiment")
raise KeyboardInterrupt("could not start.")
break
print("started experiment sequence successfully.")
return
def stop(self):
""" Stops the sequence of actions that is currently running in the instrument that is currently connected. """
self.instrument.Experiment.Stop()
def skip(self):
"""Skips the currently running action and immediately starts the next action.
If there is no more actions to run, the sequence is simply stopped.
"""
self.instrument.Experiment.Skip()
def sequence_running(self):
""" Returns true if a sequence is currently running on the connected instrument, false if not. """
return self.instrument.Experiment.IsSequenceRunning()
def points_available(self):
"""Returns the number of points that have been stored by the instrument after a sequence of actions has begun.
Returns -1 when all data has been retrieved from the instrument.
"""
return self.instrument.Experiment.GetNumPointsAvailable()
def last_open_circuit(self):
"""Returns the last measured Open Circuit value.
This value is stored at the beginning of the sequence (and updated anytime the “AddMeasureOpenCircuit” action is called)"""
return self.instrument.Experiment.GetLastMeasuredOC()
# The following Action Methods can be called in order to create a sequence of Actions.
# A single string argument encodes multiple parameters as comma-separated lists...
# For example, AddOpenCircuit( string ) could be called, then AddEISPotentiostatic( string ) called.
# This would create a sequence of two actions, when started, the open circuit experiment would run, then the impedance experiment.
# TODO: write a class interface for different experimental actions to streamline logging and serialization?
# TODO: code-generation for GetData* interface?
def potential(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available()
num_points = num_points - start
values = self.instrument.Experiment.GetDataPotential(start, num_points)
if as_list:
return [value for value in values]
return values
def current(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available()
num_points = num_points - start
values = self.instrument.Experiment.GetDataCurrent(start, num_points)
if as_list:
return [value for value in values]
return values
def elapsed_time(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available()
num_points = num_points - start
values = self.instrument.Experiment.GetDataElapsedTime(start, num_points)
if as_list:
return [value for value in values]
return values
def applied_potential(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available()
num_points = num_points - start
values = self.instrument.Experiment.GetDataAppliedPotential(start, num_points)
if as_list:
return [value for value in values]
return values
def segment(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available()
num_points = num_points - start
values = self.instrument.Experiment.GetDataSegment(start, num_points)
if as_list:
return [value for value in values]
return values
def current_range_history(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available()
num_points = num_points - start
values = self.instrument.Experiment.GetDataCurrentRange(start, num_points)
if as_list:
return [value for value in values]
return values
def hardcoded_open_circuit(self, params):
default_params = (
"1,10,NONE,<,0,NONE,<,0,2MA,AUTO,AUTO,AUTO,INTERNAL,AUTO,AUTO,AUTO"
)
status = self.instrument.Experiment.AddOpenCircuit(default_params)
return status, default_params
def measure_open_circuit(self):
status = self.instrument.Experiment.AddMeasureOpenCircuit()
return status, None
| 17,240
| 33.760081
| 134
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/reglo.py
|
""" interface for the Reglo peristaltic pump """
import time
import typing
from typing import Dict
from enum import IntEnum
from collections import Iterable
import regloicclib
class Channel(IntEnum):
"""index organization for the pump channels
needle defaults counterclockwise (-)
drain (-)
loop (+)
rinse (+)
"""
ALL = 0
NEEDLE = 1
DRAIN = 2
LOOP = 3
RINSE = 4
# 12 mL/min
# order SOURCE, LOOP, DRAIN
CHANNEL_UPDATE_DELAY = 0.001
class Reglo(regloicclib.Pump):
"""thin wrapper around the pump interface from regloicc
TODO: rewrite the serial interface...
"""
def __init__(self, address=None, debug=False, tubing_inner_diameter=1.52):
super().__init__(address=address, debug=debug)
self.tubing_inner_diameter = tubing_inner_diameter
# TODO: this should maybe be a python property
# so that max flow rates can be automatically kept in sync
for channel in range(1, 5):
self.setTubingInnerDiameter(self.tubing_inner_diameter, channel=channel)
self.maxrates = {}
for channel in self.channels:
self.maxrates[channel] = float(self.hw.query("%d?" % channel).split(" ")[0])
time.sleep(CHANNEL_UPDATE_DELAY)
def set_rates(self, setpoints: Dict[Channel, float]):
for channel, rate in setpoints.items():
if rate == 0:
self.stop(channel=channel.value)
else:
self.continuousFlow(rate, channel=channel.value)
time.sleep(CHANNEL_UPDATE_DELAY)
return
def continuousFlow(self, rate, channel=None):
"""
Start continuous flow at rate (ml/min) on specified channel or
on all channels.
"""
if type(channel) is Channel:
channel = channel.value
if channel is None or channel == 0:
channel = 0
# this enables fairly synchronous start
maxrate = min(self.maxrates.values())
else:
maxrate = self.maxrates[channel]
assert channel in self.channels or channel == 0
# flow rate mode
self.hw.write("%dM" % channel)
self.hw.write(f"{channel}M")
# set flow direction
if rate < 0:
self.hw.write(f"{channel}K")
else:
self.hw.write(f"{channel}J")
# set flow rate
if abs(rate) > maxrate:
rate = rate / abs(rate) * maxrate
flowrate = self._volume2(rate)
self.hw.query(f"{channel}f{flowrate}")
# maintain internal running status in python client
self.hw.setRunningStatus(True, channel)
# start pumps command
self.hw.write(f"{channel}H")
def stop(self, channel=None):
if channel is None or type(channel) is int:
super().stop(channel=channel)
elif type(channel) is Channel:
super().stop(channel=channel.value)
elif isinstance(channel, Iterable):
for c in channel:
super().stop(channel=c.value)
return
| 3,096
| 25.02521
| 88
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/position.py
|
""" asdc.position: pythonnet .NET interface to VersaSTAT motion controller """
CONTROLLER_ADDRESS = "192.168.10.11"
import os
import clr
import sys
import time
import asyncio
import functools
import numpy as np
from contextlib import contextmanager, asynccontextmanager
import traceback
# pythonnet checks PYTHONPATH for assemblies to load...
# so add the VeraScan libraries to sys.path
versascan_path = "C:/Program Files (x86)/Princeton Applied Research/VersaSCAN"
sys.path.append(versascan_path)
sys.path.append(os.path.join(versascan_path, "Devices"))
dlls = ["CommsLibrary", "DeviceInterface", "ScanDevices", "NanomotionXCD"]
for dll in dlls:
clr.AddReference(dll)
clr.AddReference("System")
clr.AddReference("System.Net")
from System.Net import IPAddress
from SolartronAnalytical.DeviceInterface.NanomotionXCD import XCD, XcdSettings
@contextmanager
def controller(ip=CONTROLLER_ADDRESS, speed=1e-4):
""" context manager that wraps position controller class Position. """
pos = Position(ip=ip, speed=speed)
try:
pos.controller.Connect()
yield pos
except Exception as exc:
print("unwinding position controller due to exception.")
traceback.print_exc()
pos.controller.Disconnect()
raise exc
finally:
pos.controller.Disconnect()
@contextmanager
def sync_z_step(ip=CONTROLLER_ADDRESS, height=None, speed=1e-4):
"""wrap position controller context manager
perform vertical steps before lateral cell motion with the ctx manager
so that the cell drops back down to baseline z level if the `move` task is completed
"""
with controller(ip=ip, speed=speed) as pos:
baseline_z = pos.z
try:
if height is not None:
if height <= 0:
raise ValueError("z_step should be positive")
pos.update_z(delta=height)
yield pos
finally:
if height is not None:
dz = baseline_z - pos.z
pos.update_z(delta=dz)
# @contextmanager
# def sync_z_step(height=0.002, ip=CONTROLLER_ADDRESS, speed=1e-4):
# """ sync controller context manager for z step
# perform a vertical step with no horizontal movement
# """
# if height <= 0:
# raise ValueError("z_step should be positive")
# try:
# with controller(ip=ip, speed=speed) as pos:
# baseline_z = pos.z
# pos.update_z(delta=height)
# yield
# finally:
# with controller(ip=ip, speed=speed) as pos:
# dz = baseline_z - pos.z
# pos.update_z(delta=dz)
@asynccontextmanager
async def acontroller(loop=None, z_step=None, ip=CONTROLLER_ADDRESS, speed=1e-4):
"""wrap position controller context manager
perform vertical steps before lateral cell motion with the ctx manager
so that the cell drops back down to baseline z level if the `move` task is completed
"""
with controller(ip=ip, speed=speed) as pos:
baseline_z = pos.z
try:
if z_step is not None:
if z_step <= 0:
raise ValueError("z_step should be positive")
f = functools.partial(pos.update_z, delta=z_step)
await loop.run_in_executor(None, f)
yield pos
finally:
if z_step is not None:
dz = baseline_z - pos.z
f = functools.partial(pos.update_z, delta=dz)
await loop.run_in_executor(None, f)
@asynccontextmanager
async def z_step(loop=None, height=0.002, ip=CONTROLLER_ADDRESS, speed=1e-4):
"""async controller context manager for z step
perform a vertical step with no horizontal movement
"""
if height <= 0:
raise ValueError("z_step should be positive")
try:
with controller(ip=ip, speed=speed) as pos:
baseline_z = pos.z
f = functools.partial(pos.update_z, delta=height)
await loop.run_in_executor(None, f)
yield
finally:
with controller(ip=ip, speed=speed) as pos:
dz = baseline_z - pos.z
f = functools.partial(pos.update_z, delta=dz)
await loop.run_in_executor(None, f)
class Position:
""" Interface to the VersaSTAT motion controller library """
def __init__(self, ip="192.168.10.11", speed=0.0001):
""" instantiate a Position controller context manager """
self._ip = ip
self._speed = speed
# Set up and connect to the position controller
self.controller = XCD()
self.settings = XcdSettings()
self.settings.Speed = self._speed
self.settings.IPAddress = IPAddress.Parse(self._ip)
self.controller.Connect()
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, speed):
self._speed = speed
self.settings.Speed = self._speed
@property
def x(self):
""" the current stage x position """
return self.current_position()[0]
@property
def y(self):
""" the current stage y position """
return self.current_position()[1]
@property
def z(self):
""" the current stage z position """
return self.current_position()[2]
def current_position(self):
"""return the current coordinates as a list
axis.Values holds (position, speed, error)
"""
return [axis.Values[0] for axis in self.controller.Parameters]
def home(block_interval=1):
"""execute the homing operation, blocking for `block_interval` seconds.
Warning: this will cause the motion stage to return to it's origin.
This happens to be the maximum height for the stage...
"""
if not self.controller.IsHomingDone:
self.controller.DoHoming()
time.sleep(block_interval)
def print_status(self):
""" print motion controller status for each axis. """
for axis in self.controller.Parameters:
print(
"{} setpoint = {} {}".format(axis.Quantity, axis.SetPoint, axis.Units)
)
for idx in range(axis.ValueNames.Length):
print(axis.ValueNames[idx], axis.Values[idx], axis.Units)
print()
def at_setpoint(self, verbose=False):
""" check that each axis of the position controller is at its setpoint """
for ax in self.controller.Parameters:
if verbose:
print(ax.Values[0], ax.Units)
if not ax.IsAtSetPoint:
return False
return True
def update_single_axis(self, axis=0, delta=0.001, verbose=False, poll_interval=0.1):
"""update position setpoint and busy-wait until the motion controller has finished.
poll_interval: busy-waiting polling interval (seconds)
"""
# update the setpoint for the x axis
for idx, ax in enumerate(self.controller.Parameters):
if idx == axis:
if verbose:
print(ax.Quantity)
ax.SetPoint = ax.Values[0] + delta
break
# busy-wait while the motion controller moves the stage
while not self.at_setpoint(verbose=verbose):
time.sleep(poll_interval)
return
def update_x(self, delta=0.001, verbose=False, poll_interval=0.1):
return self.update_single_axis(
axis=0, delta=delta, verbose=verbose, poll_interval=poll_interval
)
def update_y(self, delta=0.001, verbose=False, poll_interval=0.1):
return self.update_single_axis(
axis=1, delta=delta, verbose=verbose, poll_interval=poll_interval
)
def update_z(self, delta=0.001, verbose=False, poll_interval=0.1):
return self.update_single_axis(
axis=2, delta=delta, verbose=verbose, poll_interval=poll_interval
)
def update(
self,
delta=[0.001, 0.001, 0.0],
step_height=None,
compress=None,
verbose=False,
poll_interval=0.1,
max_wait_time=25,
):
"""update position setpoint and busy-wait until the motion controller has finished.
delta: position update [dx, dy, dz]
step_height: ease off vertically before updating position
poll_interval: busy-waiting polling interval (seconds)
"""
if step_height is not None and step_height > 0:
step_height = abs(step_height)
self.update_z(delta=step_height, verbose=verbose)
for d, ax in zip(delta, self.controller.Parameters):
if verbose:
print(ax.Quantity)
if d != 0.0:
ax.SetPoint = ax.Values[0] + d
# busy-wait while the motion controller moves the stage
time_elapsed = 0
while not self.at_setpoint(verbose=verbose):
time.sleep(poll_interval)
time_elapsed += poll_interval
if time_elapsed > max_wait_time:
raise TimeoutError(
"Max position update time of {}s exceeded".format(max_wait_time)
)
if step_height is not None and step_height > 0:
self.update_z(delta=-step_height, verbose=verbose)
if compress is not None and abs(compress) > 0:
compress = np.clip(abs(compress), 0, 5e-5)
self.update_z(delta=-compress)
self.update_z(delta=compress)
| 9,539
| 28.263804
| 91
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/experiment.py
|
import sys
import json
import time
import numpy as np
import pandas as pd
from datetime import datetime
from typing import Optional, Dict, List, Sequence
import streamz
import streamz.dataframe
from asdc import _slack
from asdc import analysis
from .experiment_defaults import *
# if sys.platform == 'win32':
# from . import potentiostat
# else:
# # except ModuleNotFoundError:
# from .shims import potentiostat
MIN_SAMPLING_FREQUENCY = 1.0e-5
def from_command(instruction):
""" {"op": "lpr", "initial_potential": -0.5, "final_potential": 0.5, "step_height": 0.1, "step_time": 0.5} """
# don't mangle the original dictionary at all
instruction_data = instruction.copy()
opname = instruction_data.get("op")
Expt = potentiostat_ops.get(opname)
if Expt is None:
return None
del instruction_data["op"]
return Expt(**instruction_data)
@dataclass
class LPR(LPRArgs):
"""linear polarization resistance
Attributes:
initial_potential (float): starting potential (V)
final_potential (float): ending potential (V)
step_height (float): scan step size (V)
step_time (float): scan point duration (s)
Example:
```json
{"op": "lpr", "initial_potential": -0.5, "final_potential": 0.5, "step_height": 0.1, "step_time": 0.5}
```
"""
versus: str = "VS OC"
stop_execution: bool = False
setup_func: str = "AddLinearPolarizationResistance"
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
args["versus_initial"] = args["versus_final"] = args["versus"]
args = LPRArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.LPRData(echem_data)
@dataclass
class StaircaseLSV(StaircaseLSVArgs):
"""staircase linear scan voltammetry
Attributes:
initial_potential (float): starting potential (V)
final_potential (float): ending potential (V)
step_height (float): scan step size (V)
step_time (float): scan point duration (s)
Example:
```json
{"op": "staircase_lsv", "initial_potential": 0.0, "final_potential": 1.0, "step_height": 0.001, "step_time": 0.8}
```
"""
versus: str = "VS REF"
stop_execution: bool = False
setup_func: str = "AddStaircaseLinearScanVoltammetry"
filter: Optional[str] = None
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
args["versus_initial"] = args["versus_final"] = args["versus"]
if args["filter"] is not None:
args["e_filter"] = args["i_filter"] = args["filter"]
args = StaircaseLSVArgs.from_dict(args)
return args.format()
@dataclass
class Potentiostatic(PotentiostaticArgs):
"""potentiostatic: hold at constant potential
Attributes:
potential (float): (V)
duration (float) : (s)
Example:
```json
{"op": "potentiostatic", "potential": Number(volts), "duration": Time(seconds)}
```
"""
n_points: int = 3000
duration: int = 10
versus: str = "VS REF"
stop_execution: bool = False
setup_func: str = "AddPotentiostatic"
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
time_per_point = np.maximum(
self.duration / self.n_points, MIN_SAMPLING_FREQUENCY
)
# override any default arguments...
args = self.__dict__
args["time_per_point"] = time_per_point
args["versus_initial"] = args["versus"]
args = PotentiostaticArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.PotentiostaticData(echem_data)
@dataclass
class Potentiodynamic(PotentiodynamicArgs):
"""potentiodynamic
Attributes:
initial_potential (float): starting potential (V)
final_potential (float): ending potential (V)
step_height (float): scan step size (V)
step_time (float): scan point duration (s)
Example:
```json
{"op": "potentiodynamic", "initial_potential": 0.0, "final_potential": 1.0, "step_height": 0.001, "step_time": 0.8}
```
"""
n_points: int = 3000
duration: int = 10
versus: str = "VS REF"
stop_execution: bool = False
setup_func: str = "AddPotentiodynamic"
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
args["versus_initial"] = args["versus_final"] = args["versus"]
if args["filter"] is not None:
args["e_filter"] = args["i_filter"] = args["filter"]
args = PotentiodynamicArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.PotentiodynamicData(echem_data)
@dataclass
class LSV(LSVArgs):
"""linear scan voltammetry
Attributes:
initial_potential (float): starting potential (V)
final_potential (float): ending potential (V)
scan_rate (float): scan rate (V/s)
current_range (str): current range setting to use
Example:
```json
{"op": "lsv", "initial_potential": 0.0, "final_potential": 1.0, "scan_rate": 0.075}
```
"""
versus: str = "VS REF"
stop_execution: bool = False
setup_func: str = "AddLinearScanVoltammetry"
filter: Optional[str] = None
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
args["versus_initial"] = args["versus_final"] = args["versus"]
if args["filter"] is not None:
args["e_filter"] = args["i_filter"] = args["filter"]
args = LSVArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.LSVData(echem_data)
@dataclass
class Tafel(TafelArgs):
"""Tafel analysis
Attributes:
initial_potential (float): starting potential (V)
final_potential (float): ending potential (V)
step_height (float): scan step size (V)
step_time (float): scan point duration (s)
Example:
```json
{"op": "tafel", "initial_potential": V, "final_potential": V, "step_height": V, "step_time": s}
```
"""
versus: str = "VS OC"
stop_execution: bool = False
setup_func: str = "AddTafel"
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
args["versus_initial"] = args["versus_final"] = args["versus"]
args = TafelArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.TafelData(echem_data)
@dataclass
class OpenCircuit(OpenCircuitArgs):
"""Open circuit hold
If a `stabilization_window` is specified, allow the OCP hold to terminate early
if the OCP fluctuation is less than `stabilization_range` volts over the window.
Attributes:
duration (float): maximum OCP hold duration (s)
time_per_point (float): voltage sampling period (s)
Attributes:
stabilization_range (float): maximum allowed fluctuation for OCP stabilization (V)
stabilization_window (float): OCP stabilization time period (s)
smoothing_window (float): window for rolling mean applied to OCP before computing range
minimum_duration (float): minimum OCP stabilization time period (s)
Example:
json:
```json
{"op": "open_circuit", "duration": 60, "time_per_point": 0.5}
```
python:
```python
expt = OpenCircuit(duration=60, time_per_point=0.5)
```
"""
stabilization_range: float = 0.01
stabilization_window: float = 0
smoothing_window: float = 10
minimum_duration: float = 0
stop_execution: bool = False
setup_func: str = "AddOpenCircuit"
def getargs(self):
# override any default arguments...
args = self.__dict__
args = OpenCircuitArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.OCPData(echem_data)
def signal_stop(self, value):
elapsed = datetime.now() - self.start_ts
if (
elapsed.total_seconds() > self.minimum_duration
and value < self.stabilization_range
):
self.stop_execution = True
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
"""streaming dataframe -> early stopping criterion
the potentiostat interface will check `experiment.stop_execution`
"""
self.start_ts = datetime.now()
if self.stabilization_window <= 0:
# by default, do not register early stopping at all
# if the stabilization window is set to some positive time interval, proceed
return None
# set up streams to compute windowed potential range and trigger early stopping.
# is there a more composable way to do this?
# maybe the experiment object can have a function that accepts a streaming results dataframe
# and builds and returns additional streams?
# this might also be a decent way to register online error checkers
def _min(old, new):
chunk_min = min(new.values)
return min(old, chunk_min)
# compute rolling mean
smoothed_potential = sdf.rolling(self.smoothing_window).potential.mean()
# compute rolling window range on potential
potential_max = smoothed_potential.rolling(self.stabilization_window).max()
potential_min = smoothed_potential.rolling(self.stabilization_window).min()
# compute minimum rolling window range in each chunk
potential_range = (potential_max - potential_min).stream.accumulate(
_min, start=np.inf
)
return potential_range.sink(self.signal_stop)
@dataclass
class CorrosionOpenCircuit(CorrosionOpenCircuitArgs):
"""Corrosion open circuit hold
Attributes:
duration (float) : (s)
Example:
```json
{"op": "corrosion_oc", "duration": Time, "time_per_point": Time}
```
"""
stop_execution: bool = False
setup_func: str = "AddCorrosionOpenCircuit"
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
args = CorrosionOpenCircuitArgs.from_dict(args)
return args.format()
@dataclass
class CyclicVoltammetry(CyclicVoltammetryArgs):
"""set up a CV experiment
Attributes:
initial_potential (float): (V)
vertex_1_potential (float): (V)
vertex_2_potential (float): (V)
final_potential (float) : (V)
scan_rate (float): scan rate in (V/s)
cycles (int): number of cycles
Example:
```json
{
"op": "cv",
"initial_potential": 0.0,
"vertex_potential_1": -1.0,
"vertex_potential_2": 1.2,
"final_potential": 0.0,
"scan_rate": 0.075,
"cycles": 2
}
```
"""
versus: str = "VS REF"
stop_execution: bool = False
setup_func: str = "AddMultiCyclicVoltammetry"
def register_early_stopping(self, sdf: streamz.dataframe.DataFrame):
return None
def getargs(self):
# override any default arguments...
args = self.__dict__
for key in ("initial", "vertex_1", "vertex_2", "final"):
args[f"versus_{key}"] = args["versus"]
args = CyclicVoltammetryArgs.from_dict(args)
return args.format()
def marshal(self, echem_data: Dict[str, Sequence[float]]):
return analysis.CVData(echem_data)
potentiostat_ops = {
"cv": CyclicVoltammetry,
"lsv": LSV,
"lpr": LPR,
"tafel": Tafel,
"corrosion_oc": CorrosionOpenCircuit,
"open_circuit": OpenCircuit,
"potentiostatic": Potentiostatic,
"potentiodynamic": Potentiodynamic,
"staircase_lsv": StaircaseLSV,
}
| 12,820
| 27.302428
| 123
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/pump.py
|
from __future__ import annotations
import time
import chempy
import serial
import logging
import numpy as np
from scipy import optimize
from chempy import equilibria
from collections import defaultdict
from collections.abc import Iterable
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
from asdc.sdc.utils import encode
from asdc.sdc.microcontroller import PeristalticPump
# placeholder config for development
SOLUTIONS = {0: {"H2SO4": 0.1}, 1: {"Na2SO4": 0.1}, 2: {"CuSO4": 0.1}}
def mix(solutions, fraction):
""" compute nominal compositions when mixing multiple solutions """
solution = defaultdict(float)
for sol, x in zip(solutions.values(), fraction):
for species, conc in sol.items():
solution[species] += x * conc
return solution
def sulfuric_eq_pH(solution, verbose=False):
eqsys = equilibria.EqSystem.from_string(
"""
HSO4- = H+ + SO4-2; 10**-2
H2SO4 = H+ + HSO4-; 2.4e6
H2O = H+ + OH-; 10**-14/55.4
"""
)
nominal_sulfates = solution["CuSO4"] + solution["Na2SO4"]
arr, info, sane = eqsys.root(
defaultdict(
float, {"H2O": 55.4, "H2SO4": solution["H2SO4"], "SO4-2": nominal_sulfates}
)
)
conc = dict(zip(eqsys.substances, arr))
pH = -np.log10(conc["H+"])
if verbose:
print("pH: %.2f" % pH)
print()
pprint(conc)
return -np.log10(conc["H+"])
def pH_error(target_pH, stock=SOLUTIONS):
def f(x):
""" perform linear mixing between just two solutions """
s = mix(stock, [x, 1 - x, 0])
pH = sulfuric_eq_pH(s, verbose=False)
return pH
return lambda x: f(x) - target_pH
class PumpArray:
""" KDS Legato pump array interface """
def __init__(
self,
solutions=SOLUTIONS,
diameter=29.5,
port="COM7",
baud=115200,
timeout=1,
output_buffer=100,
fast=False,
flow_rate=0.5,
flow_units="ml/min",
):
"""pump array.
What is needed? concentrations and flow rates.
Low level interface: set individual flow rates
High level interface: set total flow rate and composition
TODO: look into using serial.tools.list_ports.comports to identify the correct COM port to connect to...
the id string should be something like 'USB serial port for Syringe Pump (COM*)'
"""
self.solutions = solutions
self.syringe_diameter = diameter
# serial interface things
self.port = port
self.baud = baud
self.timeout = timeout
self.buffer_size = output_buffer
self.fast = fast
self.flow_rate = flow_rate
self.flow_units = flow_units
self.flow_setpoint = {pump_id: 0.0 for pump_id in self.solutions.keys()}
# pump initialization
# self.diameter(self.syringe_diameter)
def relative_rates(self):
total_rate = sum(self.flow_setpoint.values())
return {key: rate / total_rate for key, rate in self.flow_setpoint.items()}
def eval(self, command, pump_id=0, ser=None, check_response=False, fast=False):
"""evaluate a PumpChain command.
consider batches commands together using connection `ser`
"""
if fast or self.fast:
command = "@{}".format(command)
# TODO: run a command for every configured pump if pump_id is None
# if isinstance(pump_id, Iterable):
# for id in pump_id:
# self.eval()
command = f"{pump_id} {command}"
if ser is not None:
ser.write(encode(command))
if check_response:
s = ser.read(self.buffer_size)
return s
else:
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
ser.write(encode(command))
if check_response:
s = ser.read(self.buffer_size)
return s
def refresh_ui(self, pump_id=0):
""" for whatever reason, 'ver' refreshes the pump UI when other commands do not """
self.eval("ver", pump_id=pump_id)
def run(self, pump_id=0):
print(f"asking pump {pump_id} to run")
self.eval("run", pump_id=pump_id)
def run_all(self, fast=False):
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
for pump_id in self.solutions.keys():
if self.flow_setpoint[pump_id] > 0:
self.eval("run", pump_id=pump_id, ser=ser, fast=fast)
time.sleep(0.25)
else:
self.eval("stop", pump_id=pump_id, ser=ser, fast=fast)
time.sleep(0.25)
def refresh_all(self):
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
for pump_id in self.solutions.keys():
self.eval("ver", pump_id=pump_id, ser=ser)
time.sleep(0.05)
def stop(self, pump_id=0):
self.eval("stop", pump_id=pump_id)
def stop_all(self, fast=False):
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
for pump_id in self.solutions.keys():
self.eval("stop", pump_id=pump_id, ser=ser, fast=fast)
time.sleep(0.25)
def diameter(self, setpoint=None, pump_id=None):
if setpoint is not None:
command = "diameter {}".format(setpoint)
else:
command = "diameter"
if pump_id is None:
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
for id in self.solutions.keys():
self.eval(
command, pump_id=id, ser=ser, fast=True, check_response=True
)
else:
self.eval(command, pump_id=pump_id)
def infusion_rate(self, ser=None, pump_id=0, rate=None, units="ml/min", fast=False):
if rate is not None:
command = "irate {} {}".format(rate, units)
else:
command = "irate"
self.eval(command, pump_id=pump_id, ser=ser, fast=fast)
def set_pH(self, setpoint=3.0):
""" control pH -- limited to two pumps for now. """
if setpoint == 7.0:
print("forcing Na2SO4-only run")
x = 0.0
else:
x, r = optimize.brentq(
pH_error(setpoint, stock=self.solutions), 0, 1, full_output=True
)
print(x)
self.infusion_rate(pump_id=0, rate=x * self.flow_rate, units=self.flow_units)
self.infusion_rate(
pump_id=1, rate=(1 - x) * self.flow_rate, units=self.flow_units
)
self.flow_setpoint = {0: x * self.flow_rate, 1: (1 - x) * self.flow_rate}
def get_pump_id(self, q):
for key, value in self.solutions.items():
if q in value:
return key
def levels(self):
"""check syringe levels to ensure enough solution is available for a given push
for each pump, run IVOLUME and TVOLUME to compute the remaining solution level
Note: does not handle the case where the target setpoint is reached and the pump stops!
This function is meant to be used preemptively to avoid this happening in the first place.
"""
def decode_level(response):
"""parse *volume response and convert to mL
expect something like `b'\n00:7.01337 ul\r\n00:'`
the main response being {address:02d}:{volume:f} {unit}
"""
response = response.decode().strip().split("\r\n")
(
response,
*prompt,
) = response # unpack response and hope the real response is the first line
address, level = response.split(":")
# expect something like level == '7.01337 ul'
level, unit = level.split()
level = float(level)
# somewhat hacky unit conversions to mL
if unit == "ml":
pass
elif unit == "ul":
level *= 1e-3
elif unit == "nl":
level *= 1e-6
return level
volume_remaining = {}
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
for pump_id, solution in self.solutions.items():
# TODO: solutions need better names
name = list(solution.keys())[0]
r = self.eval(
"tvolume", pump_id=pump_id, check_response=True, fast=True, ser=ser
)
print(r.decode())
logger.debug(f"tvolume: {r.decode()}")
target_volume = decode_level(r)
r = self.eval(
"ivolume", pump_id=pump_id, check_response=True, fast=True, ser=ser
)
print(r.decode())
logger.debug(f"ivolume: {r.decode()}")
infused_volume = decode_level(r)
# print(f'tvolume: {target_volume} mL')
# print(f'ivolume: {infused_volume} mL')
# print(f'remaining: {target_volume - infused_volume} mL')
volume_remaining[name] = target_volume - infused_volume
return volume_remaining
def set_rates(self, setpoints, units="ml/min", start=False, fast=False):
"""directly set absolute flow rates
flow_setpoint is a dict containing absolute flow rates for each syringe
TODO: incorporate peristaltic pump here and set rates appropriately? need to set rates separately sometimes.
"""
total_setpoint = sum(setpoints.values())
print(f"total_setpoint: {total_setpoint}")
# reset rates to 0
for pump_id in self.flow_setpoint.keys():
self.flow_setpoint[pump_id] = 0.0
# set flowrates for the syringe pump array
with serial.Serial(
port=self.port, baudrate=self.baud, timeout=self.timeout
) as ser:
print(setpoints)
time.sleep(0.05)
for species, setpoint in setpoints.items():
print(species, setpoint)
pump_id = self.get_pump_id(species)
print(pump_id)
if setpoint > 0:
self.flow_setpoint[pump_id] = setpoint
self.infusion_rate(
pump_id=pump_id, ser=ser, rate=setpoint, units=units, fast=fast
)
time.sleep(0.25)
time.sleep(0.25)
if start:
self.run_all()
print(self.flow_setpoint)
| 10,996
| 31.344118
| 116
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/orion.py
|
import os
import re
import time
import serial
import typing
import asyncio
import streamz
import argparse
import threading
import collections
import pandas as pd
from datetime import datetime
from contextlib import contextmanager
from typing import Dict, List
import zmq
import zmq.asyncio
MODEL_NUMBER = "A221"
# the A221 meter responds to GETMEAS with a comma-delimited string
# RESPONSE_COLUMNS contains field names for this string
# LOGFILE_COLUMNS specifies the columns that we explicitly want to save
RESPONSE_COLUMNS = (
"model",
"serialnum",
"software_ver",
"userid",
"timestamp",
"sample_id",
"channel",
"mode",
"pH",
"pH_unit",
"mV",
"mV_unit",
"temperature",
"temperature_unit",
"slope",
"slope_unit",
"methodnum",
"lognum",
)
LOGFILE_COLUMNS = (
"timestamp",
"pH",
"pH_unit",
"mV",
"mV_unit",
"temperature",
"temperature_unit",
"slope",
"slope_unit",
)
# set up and bind zmq publisher socket
DASHBOARD_PORT = 2345
DASHBOARD_ADDRESS = "127.0.0.1"
DASHBOARD_URI = f"tcp://{DASHBOARD_ADDRESS}:{DASHBOARD_PORT}"
def encode(message: str) -> bytes:
message = message + "\r"
return message.encode()
class PHMeter:
supported_modes = {"pH", "mV"}
def __init__(
self,
address,
baud=19200,
timeout=2,
mode="pH",
model_number=MODEL_NUMBER,
buffer_size=64,
zmq_pub=False,
):
self.model_number = MODEL_NUMBER
self.pH = collections.deque(maxlen=buffer_size)
self.temperature = collections.deque(maxlen=buffer_size)
self.ser = serial.Serial(
port=address,
baudrate=baud,
parity=serial.PARITY_NONE,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
timeout=timeout,
)
self.mode = mode
self.timeout = timeout
self._blocking = False
self.blocking = False
if zmq_pub:
self.context = zmq.asyncio.Context.instance()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind(DASHBOARD_URI)
else:
self.socket = None
@property
def mode(self) -> str:
return self._mode
@mode.setter
def mode(self, mode: str) -> bool:
if mode in self.supported_modes:
self._mode = mode
else:
raise ValueError(f"meter mode must be one of {self.supported_modes}")
self.write(f"SETMODE {self.mode}")
return self.check_response()
@property
def blocking(self) -> bool:
return self._blocking
@blocking.setter
def blocking(self, blocking: bool):
assert isinstance(blocking, bool)
if self._blocking == blocking:
pass
elif blocking:
self.ser.timeout = None
elif not blocking:
self.ser.timeout = self.timeout
self._blocking = blocking
@contextmanager
def sync(self):
try:
self.blocking = True
yield
finally:
self.blocking = False
def check_response(self) -> bool:
response = self.ser.read(size=2).decode()
print(f"response: {response}")
if response == "> ":
return True
else:
return False
def set_csv(self):
self.write("SETCSV")
return self.check_response()
def write(self, msg: str) -> None:
self.ser.write(encode(msg))
@staticmethod
def _to_dict(data: str) -> Dict[str, float]:
values = data.split(",")
d = dict(pH=float(values[1]), temperature=float(values[5]))
return d
def _process_pH(self, response: str, timestamp: datetime) -> str:
"""
Meter Model, Serial Number, Software Revision, User ID, Date & Time, Sample ID, Channel, Mode, pH Value, pH Unit, mV Value, mV Unit, Temperature Value, Temperature Unit, Slope Value, Slope Unit, Method #, Log #
b'GETMEAS \r\n\r\n\rA221 pH,K10231,3.04,ABCDE,08/26/20 00:20:59,---,CH-1,pH,6.92,pH,-3.8, mV,23.3,C,99.6,%,M100,#162\n\r\r>'
TODO: get timestamp, sample_id, channel, mode, pH_value, pH_unit, mV_value, mV_unit, temperature_value, temperature_unit, slope_value, slope_unit
"""
# index into the response by searching for the model number
model_match = re.search(self.model_number, response)
data_idx = model_match.start()
# remove any >, \r, and \n characters
data = re.sub("[\>\\r\\n]", "", response[data_idx:])
# strip any whitespace adjacent to the comma delimiters
# and reconstruct the CSV string
values = list(map(str.strip, data.split(",")))
# coerce into pandas dataframe to select columns
row = pd.DataFrame([values], columns=RESPONSE_COLUMNS)
v = row.loc[:, LOGFILE_COLUMNS]
v["timestamp"] = timestamp
data = v.to_csv(header=False, index=False).strip()
return data
def read(self, count: int = 1):
if count == 1:
self.write("GETMEAS")
response = self.ser.read_until(b">")
timestamp = datetime.now()
# print(response)
data = self._process_pH(response.decode(), timestamp)
return data
elif count > 1:
# FIXME: the multi-reading `GETMEAS` command will only return
# a single `>` at the end of all the response lines...
self.write(f"GETMEAS {count}")
responses = [self.ser.read_until(b">") for response in range(count)]
data = [self._process_pH(response.decode()) for response in responses]
return data
def readloop(self, stop_event=None, interval=30, logfile="pHmeter_test.csv"):
# start an asyncio event loop in the worker thread...
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# clear the output buffer...
buf = self.ser.read(500)
start_ts = pd.Timestamp(datetime.now())
def update_buffers(values):
# push values into deques for external monitoring...
self.pH.append(values["pH"])
self.temperature.append(values["temperature"])
# skip writing header line if appending to existing logfile
write_header = True
if os.path.isfile(logfile):
write_header = False
with self.sync():
with open(logfile, "a") as f:
if write_header:
# write a CSV header if the file is new
print(",".join(LOGFILE_COLUMNS), file=f)
source = streamz.Source()
log = source.sink(lambda x: print(x, file=f))
values = source.map(self._to_dict)
buf = values.sink(update_buffers)
if self.socket is not None:
start = pd.Timestamp(datetime.now())
reltime = lambda: pd.Timestamp(datetime.now()) - start
df = values.map(
lambda x: pd.DataFrame(x, index=[reltime().total_seconds()])
)
df.sink(lambda x: self.socket.send_pyobj(x))
# main measurement loop to run at interval
while not stop_event.is_set():
target_ts = time.time() + interval
reading = self.read()
source.emit(reading)
# wait out the rest of the interval
# but continue immediately if this iteration took longer than the interval
# return immediately if signalled
stop_event.wait(timeout=max(0, target_ts - time.time()))
@contextmanager
def monitor(self, interval=30, logfile="pHmeter_test.csv"):
"""use this like
```
with meter.monitor(interval=10):
time.sleep(60)
```
"""
stop_event = threading.Event()
io_worker = threading.Thread(
target=self.readloop, args=(stop_event, interval, logfile)
)
try:
io_worker.start()
yield
finally:
stop_event.set()
io_worker.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pH meter client")
parser.add_argument("--port", default="COM21", help="COM port for the pH meter")
parser.add_argument(
"--verbose", action="store_true", help="include extra debugging output"
)
args = parser.parse_args()
meter = PHMeter(args.port)
with meter.monitor(interval=10):
time.sleep(240)
print(meter.pH)
| 8,767
| 27.842105
| 218
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/utils.py
|
def encode(message):
message = message + "\r\n"
return message.encode()
def decode(message):
""" bytes to str; strip carriage return """
if type(message) is list:
return [decode(msg) for msg in message]
return message.decode().strip()
def ismatec_to_flow(pct_rate):
""" calibration curve from ismatec output fraction to flow in mL/min (0-100%)"""
mL_per_min_rate = 0.0144 * pct_rate
return mL_per_min_rate
def flow_to_ismatec(mL_per_min_rate):
""" calibration curve from flow in mL/min to ismatec output fraction (0-100%)"""
pct_rate = mL_per_min_rate / 0.0144
return pct_rate
def proportion_to_flow(rate):
""" calibration curve from ismatec output proportion (0,1) to flow in mL/min """
mL_per_min_rate = 1.44 * rate
return mL_per_min_rate
def flow_to_proportion(mL_per_min_rate):
""" calibration curve from flow in mL/min to ismatec output proportion (0,1) """
pct_rate = mL_per_min_rate / 1.44
return pct_rate
| 1,001
| 26.081081
| 84
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/microcontroller.py
|
import json
import time
import serial
from typing import Optional, Dict
from asdc.sdc.utils import encode, decode
from asdc.sdc.utils import flow_to_proportion, proportion_to_flow
class MicrocontrollerInterface:
"""base interface for the equipment hooked up through a microcontroller board
This interface currently uses the [ndjson](https://github.com/ndjson/ndjson-spec) protocol to send commands to the board.
"""
def __init__(
self, port: str = "COM9", baudrate: int = 115200, timeout: float = 0.5
):
"""Microcontroller interface
Arguments:
port: serial port for the microcontroller board
baudrate: serial protocol timing
timeout: default serial interface timeout period (s)
"""
self.port = port
self.baudrate = baudrate
self.timeout = timeout
def eval(self, command: Dict, timeout: Optional[float] = None) -> str:
"""Microcontroller interface
open a serial connection to the microcontroller, send an `ndjson` command,
and fetch and return the result from the board.
Arguments:
command: serial port for the microcontroller board
timeout: override default serial interface timeout period (s)
Returns:
response from the board. currently the board does not strictly
send `ndjson` responses.
"""
if timeout is None:
timeout = self.timeout
with serial.Serial(
port=self.port, baudrate=self.baudrate, timeout=timeout
) as ser:
# block until the whole command is echoed
ser.write(encode(json.dumps(command, separators=(",", ":"))))
ack = ser.readline()
print(decode(ack))
# block until an ndjson response is received
response = ser.readline()
return decode(response)
def read(self):
with serial.Serial(
port=self.port, baudrate=self.baudrate, timeout=self.timeout
) as ser:
response = ser.readlines()
return decode(response)
class Reflectometer(MicrocontrollerInterface):
""" microcontroller interface for the [ThorLabs PDA36A2](https://www.thorlabs.com/thorproduct.cfm?partnumber=PDA36A2) """
def collect(self, timeout: Optional[float] = None) -> float:
"""collect reading from laser reflectance setup
Arguments:
timeout: override default serial interface timeout period (s)
Returns:
average voltage reading from the photodiode, corresponding to the sample reflectance.
the average is taken over 25 readings (with a 50 ms interval between readings)
"""
if timeout is None:
timout = self.timeout
response = self.eval({"op": "laser"}, timeout=timeout)
# TODO: check response content / add response status info
# reflectance_data = json.loads(response[1])
reflectance = float(response)
return reflectance
class PeristalticPump(MicrocontrollerInterface):
""" microcontroller interface for the [ISMATEC peristaltic pump](http://www.ismatec.com/images/pdf/manuals/IP.pdf) """
def start(self):
""" start pumping """
return self.eval({"op": "start"})
def stop(self):
""" start pumping """
return self.eval({"op": "stop"})
def get_flow(self) -> float:
""" get voltage encoding flow rate from the board... """
return self.eval({"op": "get_flow"})
def set_flow(self, rate: float):
"""set pumping rate to counterbalance a nominal target flow rate in mL/min
This uses a rough calibration curve defined in [asdc.sdc.utils.flow_to_proportion][]
Arguments:
rate: nominal flow rate in mL/min
"""
ismatec_proportion = flow_to_proportion(rate)
print(f"ismatec_proportion: {ismatec_proportion}")
self.eval({"op": "set_flow", "rate": ismatec_proportion})
def set_flow_proportion(self, proportion: float):
"""set proportional flow rate
Arguments:
proportion: nominal flow rate as a fraction of the pump capacity in `(0, 1)`
"""
self.eval({"op": "set_flow", "rate": proportion})
class Light(MicrocontrollerInterface):
""" microcontroller interface for the light (for illumination during optical characterization """
def set(self, value):
""" set the light state to on/off """
if value in ("on", "off"):
self.eval({"op": "light", "value": value})
else:
raise ValueError
def on(self):
""" turn the light on """
self.set("on")
def off(self):
""" turn the light off """
self.set("off")
| 4,816
| 30.48366
| 125
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/experiment_defaults.py
|
import json
import inspect
import logging
from typing import Optional
from dataclasses import dataclass
from collections.abc import Iterable
logger = logging.getLogger(__name__)
class SDCArgs:
"""base class for default experiment arguments
relies on a bit of introspection magic to generate param strings for the Ametek backend
the backend wants a comma-delimited argument string
subclass SDCargs with a dataclass to specify argument defaults (and ordering)
use a thin subclass of these dataclasses to make the interface nicer
e.g. the interface subclass can take `versus` as a single argument
and pass that value to `versus_initial` and `versus_final`
"""
@property
def name(self):
return type(self).__name__
@classmethod
def from_dict(cls, args):
""" override default dataclass arguments, but skip any keys that aren't attributes of the dataclass """
return cls(
**{k: v for k, v in args.items() if k in inspect.signature(cls).parameters}
)
def format(self):
""" format a comma-delimited argument string in the order expected by Ametek backend """
paramstring = ",".join(
[str(arg).upper() for name, arg in self.__dict__.items()]
)
logger.debug(f"running {self.name}: {paramstring}")
return paramstring
def as_dict(self):
return self.__dict__
def setup(self, pstat_experiments):
""" a bit hacky -- needs a reference to the .NET library for potentiostat setup funcs """
args = self.getargs()
setup_func = getattr(pstat_experiments, self.setup_func)
status = setup_func(args)
return args
def update_relative_scan(self, open_circuit_potential):
"""Mutating update -- transform potentials 'VS HOLD' to 'VS REF'.
potentials currently have matching keys like
{key}_potential = value
versus_{key} = reference_type_string
this is a bit hacky. cleaner would be to have types for potentials:
class Potential(pydantic.BaseModel):
value: float = 0.0
reference: pydantic.Literal["VS REF", "VS OC", "VS PREV", "VS HOLD"]
then it should be safer to make `str(potential) -> "0.0,VS OC"`
"""
if open_circuit_potential is None:
return
# update any potential that is relative to "VS HOLD"
to_update = [key for key, value in self.as_dict().items() if value == "VS HOLD"]
for versus_key in to_update:
# key matching (brittle / depends on field names in experiment arguments)
key = versus_key.replace("versus_", "")
potential_key = f"{key}_potential"
# transform potential specified relative to measured OCP
# to the reference electrode frame
relative_potential = getattr(self, potential_key)
absolute_potential = open_circuit_potential + relative_potential
# write back data to both potential and reference fields
setattr(self, potential_key, absolute_potential)
setattr(self, versus_key, "VS REF")
return
@dataclass
class MeasureOCP(SDCArgs):
""" overload setup method -- this experiment takes no arguments """
setup_func: str = "AddMeasureOpenCircuit"
def setup(self, pstat_experiments):
setup_func = getattr(pstat_experiments, self.setup_func)
status = setup_func()
return None
class SDCChain:
""" wrapper class to set up sequenced experiments """
def __init__(self, *experiments, remeasure_ocp=False):
self.remeasure_ocp = remeasure_ocp
if len(experiments) == 1 and isinstance(experiments[0], Iterable):
self.experiments = experiments[0]
else:
self.experiments = experiments
def setup(self, pstat_experiments):
args = []
for e in self.experiments:
_args = e.setup(pstat_experiments)
args.append(_args)
if self.remeasure_ocp:
MeasureOCP().setup(pstat_experiments)
@dataclass
class LPRArgs(SDCArgs):
initial_potential: float = 0.0
versus_initial: str = "VS REF"
final_potential: float = 1.0
versus_final: str = "VS REF"
step_height: float = 0.1
step_time: float = 0.1
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
acquisition_mode: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "YES"
cell: str = "EXTERNAL"
enable_ir_compensation: str = "DISABLED"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
@dataclass
class PotentiostaticArgs(SDCArgs):
initial_potential: float = 0.0
versus_initial: str = "VS REF"
time_per_point: float = 0.00001
duration: float = 10
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
acquisition_mode: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "YES"
cell: str = "EXTERNAL"
enable_ir_compensation: str = "DISABLED"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
@dataclass
class PotentiodynamicArgs(SDCArgs):
initial_potential: float = 0.0
versus_initial: str = "VS REF"
final_potential: float = 0.0
versus_final: str = "VS REF"
step_height: float = 0.1
step_time: float = 1.0
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
acquisition_mode: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "YES"
cell: str = "EXTERNAL"
enable_ir_compensation: str = "DISABLED"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
@dataclass
class LSVArgs(SDCArgs):
initial_potential: float = 0.0
versus_initial: str = "VS REF"
final_potential: float = 0.65
versus_final: str = "VS REF"
scan_rate: float = 1.0
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "YES"
cell: str = "EXTERNAL"
enable_ir_compensation: str = "DISABLED"
user_defined_the_amount_of_ir_comp: float = 1
use_previously_determined_ir_comp: str = "YES"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
@dataclass
class StaircaseLSVArgs(SDCArgs):
initial_potential: float = 0.0
versus_initial: str = "VS REF"
final_potential: float = 0.65
versus_final: str = "VS REF"
step_height: float = 0.1
step_time: float = 1.0
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
acquisition_mode: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "NO"
cell_to_use: str = "INTERNAL"
enable_ir_compensation: str = "DISABLED"
user_defined_the_amount_of_ir_comp: str = 1
use_previously_determined_ir_comp: str = "YES"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
@dataclass
class TafelArgs(SDCArgs):
initial_potential: float = -0.25
versus_initial: str = "VS OC"
final_potential: float = 0.25
versus_final: str = "VS OC"
step_height: float = 0.001
step_time: float = 0.5
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
acquisition_mode: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "YES"
cell: str = "EXTERNAL"
enable_ir_compensation: str = "DISABLED"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
@dataclass
class OpenCircuitArgs(SDCArgs):
time_per_point: float = 1
duration: float = 10
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "2MA"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
cell: str = "EXTERNAL"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
e_resolution: str = "AUTO"
@dataclass
class CorrosionOpenCircuitArgs(SDCArgs):
time_per_point: float = 1
duration: float = 10
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "2MA"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
cell: str = "EXTERNAL"
bandwidth: str = "AUTO"
low_current_interface_bandwidth: str = "AUTO"
e_resolution: str = "AUTO"
@dataclass
class CyclicVoltammetryArgs(SDCArgs):
initial_potential: float = 0.0
versus_initial: str = "VS REF"
vertex_1_potential: float = 1.0
versus_vertex_1: str = "VS REF"
vertex_1_hold: float = 0
acquire_data_during_vertex_hold_1: str = "NO"
vertex_2_potential: float = -1.0
versus_vertex_2: str = "VS REF"
vertex_2_hold: float = 0
acquire_data_during_vertex_hold_2: str = "NO"
scan_rate: float = 0.1
cycles: int = 3
limit_1_type: Optional[str] = None
limit_1_direction: str = "<"
limit_1_value: float = 0
limit_2_type: Optional[str] = None
limit_2_direction: str = "<"
limit_2_value: float = 0
current_range: str = "AUTO"
electrometer: str = "AUTO"
e_filter: str = "AUTO"
i_filter: str = "AUTO"
leave_cell_on: str = "YES"
cell: str = "EXTERNAL"
enable_ir_compensation: str = "DISABLED"
user_defined_the_amount_of_ir_comp: float = 1
use_previously_determined_ir_comp: str = "YES"
bandwidth: str = "AUTO"
final_potential: float = 0.0
versus_final: str = "VS REF"
low_current_interface_bandwidth: str = "AUTO"
| 11,072
| 30.191549
| 111
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/__init__.py
|
""" SDC interface """
from __future__ import absolute_import
import sys
# try:
# raise ModuleNotFoundError
# except NameError:
# ModuleNotFoundError = ImportError
# if clr module (pythonnet) is not available, load the SDC shims
from . import pump
from . import orion
from . import reglo
from . import experiment
from . import microcontroller
try:
from . import position
from . import potentiostat
except ImportError:
# from .shims import pump
from .shims import position
from .shims import potentiostat
| 538
| 18.25
| 64
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/shims/potentiostat.py
|
""" asdc.control: pythonnet .NET interface to VersaSTAT/VersaSCAN libraries """
import os
import sys
import json
import time
import asyncio
import inspect
import logging
import streamz
import numpy as np
import pandas as pd
from datetime import datetime
from contextlib import contextmanager
from streamz.dataframe import DataFrame
import zmq
import zmq.asyncio
# set up and bind zmq publisher socket
DASHBOARD_PORT = 2345
DASHBOARD_ADDRESS = "127.0.0.1"
DASHBOARD_URI = f"tcp://{DASHBOARD_ADDRESS}:{DASHBOARD_PORT}"
context = zmq.asyncio.Context.instance()
socket = context.socket(zmq.PUB)
# socket.bind(DASHBOARD_URI)
shim_data = os.path.join(os.path.split(__file__)[0], "data")
# df = pd.read_csv(os.path.join(shim_data, 'test_data.csv'), index_col=0)
df = pd.read_csv(os.path.join(shim_data, "test_open_circuit.csv"), index_col=0)
class VersaStatError(Exception):
pass
@contextmanager
def controller(start_idx=17109013, initial_mode="potentiostat"):
""" context manager that wraps potentiostat controller class Control. """
ctl = Potentiostat(start_idx=start_idx, initial_mode=initial_mode)
try:
ctl.stop()
ctl.clear()
yield ctl
except Exception as exc:
print(exc)
print("Exception: unwind potentiostat controller...")
ctl.stop()
ctl.clear()
ctl.disconnect()
raise
finally:
print("disconnect from potentiostat controller.")
ctl.stop()
ctl.clear()
ctl.disconnect()
class Potentiostat:
"""Interface to the VersaSTAT SDK library for instrument control
methods are broken out into `Immediate` (direct instrument control) and `Experiment`.
"""
def __init__(self, start_idx=0, initial_mode="potentiostat", poll_interval=1):
self.instrument = None
self.start_idx = start_idx
self.connect()
self.serial_number = None
self.model = None
self.options = None
self.low_current_interface = None
self.mode = initial_mode
self.poll_interval = poll_interval
self.current_range = None
# action buffer for shim
self.action_queue = []
self._points_available = 0
# load data from a file...
self._data = df
def __call__(self, experiment):
return self.run(experiment)
def check_overload(self):
self.update_status()
overload_status = self.overload_status()
if overload_status != 0:
print("OVERLOAD:", overload_status)
return overload_status
def read_buffers(self):
return {
"current": self.current(),
"potential": self.potential(),
"elapsed_time": self.elapsed_time(),
"applied_potential": self.applied_potential(),
"current_range": self.current_range_history(),
"segment": self.segment(),
}
def run(self, experiment):
""" run an SDC experiment sequence -- busy wait until it's finished """
# this is a bit magical...
# `experiment` has an attribute `setup_func` that holds the name of the .NET function
# that should be invoked to add an experiment
# `experiment.setup` is responsible for looking it up and invoking it
# with e.g. `f = getattr(pstat.instrument.Experiment, experiment.setup_func)`
# this way, an `SDCSequence` can call all the individual `setup` methods
# no need to run any setup...
# argstring = experiment.setup(self.instrument.Experiment)
argstring = str(experiment)
metadata = {"timestamp_start": datetime.now(), "parameters": argstring}
self.start()
error_codes = set()
# while self.sequence_running():
n = self._data.shape[0]
n_iters = 40
chunk_size = n // 40
for idx in range(n_iters):
self.points_available = idx * chunk_size
time.sleep(self.poll_interval)
error_codes.add(self.check_overload())
print(f"points: {self.points_available}")
metadata["timestamp_end"] = datetime.now()
metadata["error_codes"] = json.dumps(list(map(int, error_codes)))
results = self.read_buffers()
# cast results into specific e-chem result type
# (which subclass pandas.DataFrame and have a validation and plotting interface)
results = experiment.marshal(results)
return results, metadata
def read_chunk(self, start):
return pd.DataFrame(
{
"current": self.current(start=start),
"potential": self.potential(start=start),
"elapsed_time": self.elapsed_time(start=start),
}
)
def stream(self, experiment):
""" stream the data from the potentiostat... """
source = streamz.Stream()
metadata = {"timestamp_start": datetime.now(), "parameters": str(experiment)}
self.start()
# build a list of pd.DataFrames
# to concat into the full measurement data
chunks = source.sink_to_list()
# publish the pd.DataFrame chunk over zmq
send_data = source.sink(lambda x: socket.send_pyobj(x))
# monitor convergence
# hacky -- rely on measurement interval being ~1s
example = pd.DataFrame({"current": [], "potential": [], "elapsed_time": []})
sdf = DataFrame(source, example=example)
early_stop = experiment.register_early_stopping(sdf)
n = self._data.shape[0]
n_iters = 40
chunk_size = n // 40
cursor = 0
# while self.sequence_running():
for idx in range(1, n_iters + 1):
self.points_available += chunk_size
source.emit(self.read_chunk(cursor))
cursor += chunk_size
if experiment.stop_execution:
print("stopping early")
break
time.sleep(self.poll_interval)
metadata["timestamp_end"] = datetime.now()
results = self.read_buffers()
# cast results into specific e-chem result type
# (which subclass pandas.DataFrame and have a validation and plotting interface)
results = experiment.marshal(results)
return results, metadata, chunks
def connect(self):
self.index = self.start_idx
self.connected = True
def disconnect(self):
self.connected = False
# Immediate methods -- direct instrument control
def set_cell(self, status="on"):
""" turn the cell on or off """
if status not in ("on", "off"):
raise ArgumentError("specify valid cell status in {on, off}")
def choose_cell(self, choice="external"):
""" choose between the internal and external cells. """
if choice not in ("internal", "external"):
raise ArgumentError("specify valid cell in {internal, external}")
def set_mode(self, mode):
""" choose between potentiostat and galvanostat modes. """
if mode not in ("potentiostat", "galvanostat"):
raise ArgumentError("set mode = {potentiostat, galvanostat}")
def set_current_range(self, current_range):
valid_current_ranges = [
"2A",
"200mA",
"20mA",
"2mA",
"200uA",
"20uA",
"2uA",
"200nA",
"20nA",
"2nA",
]
if current_range not in valid_current_ranges:
raise ArgumentError(
"specify valid current range ({})".format(valid_current_ranges)
)
self.current_range = current_range
def set_dc_potential(self, potential):
""" Set the output DC potential (in Volts). This voltage must be within the instruments capability."""
pass
def set_dc_current(self, current):
"""Set the output DC current (in Amps). This current must be within the instruments capability.
Calling this method also changes to Galvanostat mode and sets the current range to the correct value.
WARNING: Once cell is enabled after setting the DC current, do not change to potentiostatic mode or change the current range.
These will affect the value being applied to the cell.
"""
pass
def set_ac_frequency(self, frequency):
""" Sets the output AC Frequency (in Hz). This frequency must be within the instruments capability."""
pass
def set_ac_amplitude(self, amplitude):
""" Sets the output AC Amplitude (in RMS Volts). This amplitude must be within the instruments capabilities."""
pass
def set_ac_waveform(self, mode="on"):
waveform_modes = ["on", "off"]
if mode not in waveform_modes:
raise ArgumentError("specify valid AC waveform mode {on, off}.")
def update_status(self):
"""Retrieve the status information from the instrument.
Also auto-ranges the current if an experiment sequence is not in progress.
Call this prior to calling the status methods below.
"""
pass
def latest_potential(self):
""" get the latest stored E value. """
return None
def latest_current(self):
""" get the latest stored I value. """
return None
def overload_status(self, raise_exception=False):
"""check for overloading.
0 indicates no overload, 1 indicates I (current) Overload, 2
indicates E, Power Amp or Thermal Overload has occurred.
"""
overload_cause = {
1: "I (current) overload",
2: "E, Power Amp, or Thermal overload",
}
# overload_code = self.instrument.Immediate.GetOverload()
overload_code = 0
if overload_code and raise_exception:
msg = "A " + overload_cause[overload_code] + " has occurred."
raise VersaStatError(msg)
return overload_code
def booster_enabled(self):
""" check status of the booster switch. """
return None
def cell_enabled(self):
""" check status of the cell. """
return None
def autorange_current(self, auto):
"""Enable or disable (default is enabled) automatic current ranging while an experiment is not running.
Disabling auto-ranging is useful when wanting to apply a DC current in immediate mode.
"""
pass
# Experiment methods
# Experiment actions apparently can be run asynchronously
def actions(self):
""" get the current experiment action queue. """
# Returns a list of comma delimited action names that are supported by the instrument that is currently connected
return None
def clear(self):
""" clear the experiment action queue. """
self.action_queue = []
def start(self, max_wait_time=30, poll_interval=2):
"""Starts the sequence of actions in the instrument that is currently connected.
Wait until the instrument starts the action to return control flow."""
print("started experiment sequence successfully.")
return
def stop(self):
""" Stops the sequence of actions that is currently running in the instrument that is currently connected. """
pass
def skip(self):
"""Skips the currently running action and immediately starts the next action.
If there is no more actions to run, the sequence is simply stopped.
"""
self.action_queue.pop(0)
def sequence_running(self):
""" Returns true if a sequence is currently running on the connected instrument, false if not. """
pass
@property
def points_available(self):
"""Returns the number of points that have been stored by the instrument after a sequence of actions has begun.
Returns -1 when all data has been retrieved from the instrument.
"""
return self._points_available
@points_available.setter
def points_available(self, value):
self._points_available = value
def last_open_circuit(self):
"""Returns the last measured Open Circuit value.
This value is stored at the beginning of the sequence (and updated anytime the “AddMeasureOpenCircuit” action is called)"""
return None
# The following Action Methods can be called in order to create a sequence of Actions.
# A single string argument encodes multiple parameters as comma-separated lists...
# For example, AddOpenCircuit( string ) could be called, then AddEISPotentiostatic( string ) called.
# This would create a sequence of two actions, when started, the open circuit experiment would run, then the impedance experiment.
# TODO: write a class interface for different experimental actions to streamline logging and serialization?
# TODO: code-generation for GetData* interface?
def potential(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available
return self._data["potential"].values[start:num_points]
def current(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available
return self._data["current"].values[start:num_points]
def elapsed_time(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available
return self._data["elapsed_time"].values[start:num_points]
def applied_potential(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available
return self._data["applied_potential"].values[start:num_points]
def segment(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available
return self._data["segment"].values[start:num_points]
def current_range_history(self, start=0, num_points=None, as_list=True):
if num_points is None:
num_points = self.points_available
return self._data["current_range"].values[start:num_points]
def hardcoded_open_circuit(self, params):
default_params = (
"1,10,NONE,<,0,NONE,<,0,2MA,AUTO,AUTO,AUTO,INTERNAL,AUTO,AUTO,AUTO"
)
print(default_params)
return status, default_params
| 14,457
| 32.009132
| 134
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/shims/position.py
|
""" asdc.position: pythonnet .NET interface to VersaSTAT motion controller """
import os
import sys
import time
import numpy as np
from contextlib import contextmanager
ax = [0, 0, 0]
@contextmanager
def controller(ip="192.168.10.11", speed=1e-4):
""" context manager that wraps position controller class Position. """
pos = Position(ip=ip, speed=speed)
try:
yield pos
except Exception as exc:
print("unwinding position controller due to exception.")
raise exc
finally:
pass
class Controller:
def __init__(self):
self.Parameters = [1, 2, 3]
class Position:
""" Interface to the VersaSTAT motion controller library """
def __init__(self, ip="192.168.10.11", speed=0.0001):
""" instantiate a Position controller context manager """
self._ip = ip
self._speed = speed
# Set up and connect to the position controller
self.controller = Controller()
self.settings = None
# use this global variable to keep track of state
# across different instantiations of the position controller shim
global ax
self.axis = ax
self._x = 0
self._y = 0
self._z = 0
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, speed):
self._speed = speed
def home(block_interval=1):
"""execute the homing operation, blocking for `block_interval` seconds.
Warning: this will cause the motion stage to return to it's origin.
This happens to be the maximum height for the stage...
"""
time.sleep(1)
def print_status(self):
""" print motion controller status for each axis. """
print("ok")
def current_position(self):
"""return the current coordinates as a list
axis.Values holds (position, speed, error)
"""
return self.axis
def at_setpoint(self, verbose=False):
""" check that each axis of the position controller is at its setpoint """
return True
def update_single_axis(self, axis=0, delta=0.001, verbose=False, poll_interval=0.1):
"""update position setpoint and busy-wait until the motion controller has finished.
poll_interval: busy-waiting polling interval (seconds)
"""
self.axis[axis] += delta
def update_x(self, delta=0.001, verbose=False, poll_interval=0.1):
return self.update_single_axis(
axis=0, delta=delta, verbose=verbose, poll_interval=poll_interval
)
def update_y(self, delta=0.001, verbose=False, poll_interval=0.1):
return self.update_single_axis(
axis=1, delta=delta, verbose=verbose, poll_interval=poll_interval
)
def update_z(self, delta=0.001, verbose=False, poll_interval=0.1):
return self.update_single_axis(
axis=2, delta=delta, verbose=verbose, poll_interval=poll_interval
)
def update(
self,
delta=[0.001, 0.001, 0.0],
step_height=None,
compress=None,
verbose=False,
poll_interval=0.1,
max_wait_time=25,
):
"""update position setpoint and busy-wait until the motion controller has finished.
delta: position update [dx, dy, dz]
step_height: ease off vertically before updating position
poll_interval: busy-waiting polling interval (seconds)
"""
for idx, d in enumerate(delta):
self.axis[idx] += d
time.sleep(1)
| 3,713
| 26.308824
| 91
|
py
|
autoSDC
|
autoSDC-master/asdc/sdc/shims/pump.py
| 0
| 0
| 0
|
py
|
|
autoSDC
|
autoSDC-master/asdc/sdc/shims/__init__.py
| 0
| 0
| 0
|
py
|
|
autoSDC
|
autoSDC-master/asdc/analysis/cv.py
|
import logging
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData
logger = logging.getLogger(__name__)
class CVData(EchemData):
@property
def _constructor(self):
return CVData
@property
def name(self):
return "CV"
def check_quality(self):
""" CV heuristics: not implemented. """
return True
def plot(self, fit=False):
""" plot CV: current vs potential """
# # super().plot('current', 'potential')
plt.plot(self["potential"], self["current"])
plt.xlabel("potential (V)")
plt.ylabel("current (A)")
plt.tight_layout()
| 718
| 20.787879
| 52
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/butler_volmer.py
|
import lmfit
import numpy as np
from asdc.analysis.echem_data import EchemData
def butler_volmer(x, E_oc, j0, alpha_c, alpha_a):
overpotential = x - E_oc
current = j0 * (np.exp(alpha_a * overpotential) - np.exp(-alpha_c * overpotential))
return current
def log_butler_volmer(x, E_oc, i_corr, alpha_c, alpha_a):
abscurrent = np.abs(butler_volmer(x, E_oc, i_corr, alpha_c, alpha_a))
# clip absolute current values so that the lmfit model
# does not produce NaN values when evaluating the log current
# at the exact open circuit potential
return np.log10(np.clip(abscurrent, 1e-9, np.inf))
class ButlerVolmerModel(lmfit.Model):
"""model current under butler-volmer model
Example:
```
bv = butler_volmer.ButlerVolmerModel()
pars = bv.guess(tafel)
E, logI = bv.slice(tafel, pars['E_oc'], w=0.1)
bv_fit = bv.fit(logI, x=E, params=pars)
```
"""
def __init__(self, independent_vars=["x"], prefix="", nan_policy="omit", **kwargs):
kwargs.update(
{
"prefix": prefix,
"nan_policy": nan_policy,
"independent_vars": independent_vars,
}
)
super().__init__(butler_volmer, **kwargs)
def _set_paramhints_prefix(self):
self.set_param_hint("j0", min=0)
self.set_param_hint("alpha_c")
self.set_param_hint("alpha_a")
def _guess(self, data, x=None, **kwargs):
# guess open circuit potential: minimum log current
id_oc = np.argmin(data)
E_oc_guess = x[id_oc]
# unlog the data to guess corrosion current
i_corr = np.max(10 ** data)
pars = self.make_params(E_oc=E_oc_guess, j0=i_corr, alpha_c=5, alpha_a=5)
return lmfit.models.update_param_vals(pars, self.prefix, **kwargs)
def guess(self, data: EchemData, **kwargs):
self._set_paramhints_prefix()
E = data.potential.values
I = data.current.values.copy()
mask = np.isnan(I)
# guess open circuit potential: minimum log current
I[mask] = np.inf
id_oc = np.argmin(np.abs(I))
E_oc_guess = E[id_oc]
I[mask] = np.nan
E, I = self.slice(data, E_oc_guess)
# guess corrosion current
i_corr = np.max(I[np.isfinite(I)])
pars = self.make_params(E_oc=E_oc_guess, j0=i_corr, alpha_c=10, alpha_a=10)
return lmfit.models.update_param_vals(pars, self.prefix, **kwargs)
def slice(self, data: EchemData, E_oc: float, w: float = 0.15):
E = data.potential.values
I = data.current.values
slc = (E > E_oc - w) & (E < E_oc + w)
E, I = E[slc], I[slc]
mask = np.isfinite(I)
return E[mask], I[mask]
class ButlerVolmerLogModel(lmfit.Model):
"""model log current under butler-volmer model
Example:
```
bv = butler_volmer.ButlerVolmerModel()
pars = bv.guess(tafel)
E, logI = bv.slice(tafel, pars['E_oc'], w=0.1)
bv_fit = bv.fit(logI, x=E, params=pars)
```
"""
def __init__(self, independent_vars=["x"], prefix="", nan_policy="omit", **kwargs):
kwargs.update(
{
"prefix": prefix,
"nan_policy": nan_policy,
"independent_vars": independent_vars,
}
)
super().__init__(log_butler_volmer, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
self.set_param_hint("i_corr", min=0)
self.set_param_hint("alpha_c", min=0.1)
self.set_param_hint("alpha_a", min=0.1)
def _guess(self, data, x=None, **kwargs):
# guess open circuit potential: minimum log current
id_oc = np.argmin(data)
E_oc_guess = x[id_oc]
# unlog the data to guess corrosion current
i_corr = np.max(10 ** data)
pars = self.make_params(
E_oc=E_oc_guess, i_corr=i_corr, alpha_c=0.5, alpha_a=0.5
)
return lmfit.models.update_param_vals(pars, self.prefix, **kwargs)
def guess(self, data: EchemData, **kwargs):
# don't overwrite data
E = data.potential.values.copy()
I = data.current.values.copy()
mask = np.isnan(I)
# guess open circuit potential: minimum log current
I[mask] = np.inf
id_oc = np.argmin(np.abs(I))
E_oc_guess = E[id_oc]
I[mask] = np.nan
E, logI = self.slice(data, E_oc_guess)
# guess corrosion current
i_corr = np.max(10 ** logI[np.isfinite(logI)])
pars = self.make_params(
E_oc=E_oc_guess, i_corr=i_corr, alpha_c=0.5, alpha_a=0.5
)
return lmfit.models.update_param_vals(pars, self.prefix, **kwargs)
def slice(self, data: EchemData, E_oc: float, w: float = 0.15):
E = data.potential.values
I = data.current.values
slc = (E > E_oc - w) & (E < E_oc + w)
E, logI = E[slc], np.log10(np.abs(I[slc]))
mask = np.isfinite(logI)
return E[mask], logI[mask]
def butler_volmer_nuc(x, E_oc, i_corr, alpha_c, alpha_a, E_nuc, A, p, i_pass):
overpotential = x - E_oc
driving_force = x - E_nuc
# driving_force = np.clip(driving_force, 0, np.inf)
# nucleation model
S = np.exp(-A * driving_force ** p)
# S[np.isnan(S)] = 1
S[driving_force <= 0] = 1
# see eq 5 in Bellezze et al (10.1016/j.corsci.2017.10.012)
current = (
i_corr
* (S * np.exp(alpha_a * overpotential) - np.exp(-alpha_c * overpotential))
+ (1 - S) * i_pass
)
return np.log10(np.clip(np.abs(current), 1e-9, np.inf))
class ButlerVolmerNucleationModel(lmfit.Model):
"""model current under butler-volmer model with a nucleation and growth active/passive effect
Example:
```
bv = butler_volmer.ButlerVolmerModel()
pars = bv.guess(tafel)
E, logI = bv.slice(tafel, pars['E_oc'], w=0.1)
bv_fit = bv.fit(logI, x=E, params=pars)
```
"""
def __init__(self, independent_vars=["x"], prefix="", nan_policy="omit", **kwargs):
kwargs.update(
{
"prefix": prefix,
"nan_policy": nan_policy,
"independent_vars": independent_vars,
}
)
super().__init__(butler_volmer_nuc, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
""" E_oc, i_corr, alpha_c, alpha_a, E_nuc, A, p, i_pass """
self.set_param_hint("i_corr", min=0)
self.set_param_hint("alpha_c", min=0)
self.set_param_hint("alpha_a", min=0)
# self.set_param_hint('A', min=1e-4, max=1e-3)
self.set_param_hint("A", min=0)
self.set_param_hint("p", min=2, max=3)
self.set_param_hint("i_pass", min=0, max=1, value=0.1)
def guess(self, data: EchemData, **kwargs):
""" E_oc, i_corr, alpha_c, alpha_a, E_nuc, A, p, i_pass """
E = data.potential.values
I = data.current.values
# guess open circuit potential: minimum log current
id_oc = np.argmin(np.abs(I))
E_oc_guess = E[id_oc]
E, I = self.slice(data, E_oc_guess)
# guess corrosion current
# i_corr = np.max(I)
i_corr = np.max(10 ** I[np.isfinite(I)])
self.set_param_hint("E_nuc", min=E_oc_guess, max=E_oc_guess + 0.5)
pars = self.make_params(
E_oc=E_oc_guess,
i_corr=i_corr,
alpha_c=0.5,
alpha_a=0.5,
E_nuc=E_oc_guess + 0.2,
A=5e-4,
i_pass=0.1,
p=2,
)
return lmfit.models.update_param_vals(pars, self.prefix, **kwargs)
def slice(self, data: EchemData, E_oc: float, w: float = 0.15):
E = data.potential.values
I = data.current.values
slc = (E > E_oc - w) & (E < E_oc + w)
return E[slc], np.log10(np.abs(I[slc]))
| 7,962
| 30.227451
| 97
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/lsv.py
|
import logging
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData
logger = logging.getLogger(__name__)
class LSVData(EchemData):
@property
def _constructor(self):
return LSVData
@property
def name(self):
return "LSV"
def check_quality(self):
""" LSV heuristics: not implemented. """
return True
def plot(self, fit=False):
""" plot LSV: current vs potential """
# # super().plot('current', 'potential')
plt.plot(self["potential"], self["current"])
plt.xlabel("potential (V)")
plt.ylabel("current (A)")
plt.tight_layout()
| 723
| 20.939394
| 52
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/potentiodynamic.py
|
import logging
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData
logger = logging.getLogger(__name__)
class PotentiodynamicData(EchemData):
@property
def _constructor(self):
return PotentiodynamicData
@property
def name(self):
return "Potentiodynamic"
def check_quality(self):
""" Potentiodynamic heuristics: not implemented. """
return True
def plot(self, fit=False):
""" plot Potentiodynamic: current vs potential """
# # super().plot('current', 'potential')
plt.plot(self["potential"], self["current"])
plt.xlabel("potential (V)")
plt.ylabel("current (A)")
plt.tight_layout()
| 783
| 22.757576
| 60
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/tafel.py
|
import logging
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData
from asdc.analysis import butler_volmer
logger = logging.getLogger(__name__)
def current_crosses_zero(df):
""" verify that a valid Tafel scan should has a current trace that crosses zero """
current = df["current"]
success = current.min() < 0 and current.max() > 0
if not success:
logger.warning("Tafel current does not cross zero!")
logger.debug("Tafel check")
return success
def fit_bv(df, w=0.2):
bv = butler_volmer.ButlerVolmerLogModel()
pars = bv.guess(df)
E, I = bv.slice(df, pars["E_oc"], w=w)
bv_fit = bv.fit(I, x=E, params=pars)
return bv_fit
class TafelData(EchemData):
@property
def _constructor(self):
return TafelData
@property
def name(self):
return "Tafel"
def check_quality(self):
model = fit_bv(self)
i_corr = model.best_values["i_corr"]
ocp = model.best_values["E_oc"]
print(f"i_corr: {i_corr}")
logger.info(f"Tafel: OCP: {ocp}, i_corr: {i_corr}")
return current_crosses_zero(self)
def fit(self, w=0.2):
""" fit a butler volmer model to Tafel data """
self.model = fit_bv(self, w=w)
# convenience attributes:
# just store optimized model params in class attributes for now
self.i_corr = self.model.best_values["i_corr"]
self.ocp = self.model.best_values["E_oc"]
self.alpha_c = self.model.best_values["alpha_c"]
self.alpha_a = self.model.best_values["alpha_a"]
return self.model
def evaluate_model(self, V_mod=None):
""" evaluate butler-volmer model on regular grid """
if V_mod is None:
V_mod = np.linspace(
self.potential.min() - 0.5, self.potential.max() + 0.5, 200
)
I_mod = self.model.eval(self.model.params, x=V_mod)
return V_mod, I_mod
def plot(self, fit=False):
""" Tafel plot: log current against the potential """
# # super().plot('current', 'potential')
plt.plot(self["potential"], np.log10(np.abs(self["current"])))
plt.xlabel("potential (V)")
plt.ylabel("log current (A)")
ylim = plt.ylim()
xlim = plt.xlim()
if fit:
ylim = plt.ylim()
model = self.fit()
# evaluate and plot model
V, I_mod = self.evaluate_model()
overpotential = V - self.ocp
bc = self.alpha_c / np.log(10)
ba = self.alpha_a / np.log(10)
log_i_corr = np.log10(self.i_corr)
plt.plot(V, I_mod, linestyle="--", color="k", alpha=0.5)
plt.axhline(log_i_corr, color="k", alpha=0.5, linewidth=0.5)
cpt_style = dict(color="k", alpha=0.5, linewidth=0.5)
# cathodic branch
plt.plot(V, -overpotential * bc + log_i_corr, **cpt_style)
# anodic branch
plt.plot(V, overpotential * ba + log_i_corr, **cpt_style)
plt.ylim(ylim)
plt.xlim(xlim)
plt.tight_layout()
| 3,195
| 27.792793
| 87
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/ocp.py
|
import logging
import numpy as np
import pandas as pd
from csaps import csaps
from pandas import DataFrame
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData
logger = logging.getLogger(__name__)
def ocp_stop(x, y, time=90, tstart=300, thresh=0.00003):
t = tstart
deriv = np.inf
while (np.abs(deriv) > thresh) and (t < 900):
tstop = t + time
deriv = np.mean(y[(x > t) & (x < tstop)])
t = tstop
return t
def ocp_convergence(ocp, smooth=0.001, tr=100):
"""model an open circuit potential trace to check that it converges to a constant value
computes the average slope at the end of the potential trace
the RMS error of a cubic spline model
the maximum potential jump over a single measurement interval
and the hold stop time
"""
t, potential = ocp["elapsed_time"], ocp["potential"]
# estimate the derivative using a cubic spline model
model = csaps(t, potential, smooth=smooth)
dVdt = model.spline.derivative()(t)
tstop = ocp_stop(t, dVdt)
# average the smoothed derivative over the last time chunk
checktime = t.max() - tr
avslope = np.mean(dVdt[t > checktime])
# compute the largest spike in the finite difference derivative
maxdiff = np.max(np.abs(np.diff(potential)))
# compute the root-mean-square error of the spline model
rms = np.sqrt(np.mean((model.spline(t) - potential) ** 2))
results = {
"average_slope": avslope,
"rms": rms,
"spike": maxdiff,
"stop time": tstop,
}
return results
class OCPData(EchemData):
@property
def _constructor(self):
return OCPData
@property
def name(self):
return "OCP"
def check_quality(self):
""" OCP convergence criteria """
convergence_stats = ocp_convergence(self)
if convergence_stats["spike"] > 0.1:
logger.warning("OCP potential trace failed smoothness heuristic.")
logger.info(f"OCP check: {convergence_stats}")
return convergence_stats
def plot(self):
""" plot open circuit potential vs elapsed time """
# super().plot('elapsed_time', 'potential')
plt.plot(self.elapsed_time, self.potential)
plt.xlabel("elapsed time (s)")
plt.ylabel("potential (V)")
plt.tight_layout()
| 2,360
| 26.453488
| 91
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/report.py
|
import os
import json
import dataset
import pandas as pd
import numpy as np
from asdc import analysis
def process_pH(pH_file, dir="data"):
data = {}
try:
df = pd.read_csv(os.path.join(dir, pH_file))
data["pH_initial"] = df.pH.iloc[0]
data["pH_final"] = df.pH.iloc[-1]
data["pH_avg"] = df.pH.mean()
data["pH_med"] = df.pH.median()
data["T_initial"] = df.temperature.iloc[0]
data["T_avg"] = df.temperature.mean()
except:
pass
return data
def process_ocp(experiments, dir="data"):
data = {}
# there should be only one!
experiment = experiments[0]
ocp = analysis.OCPData(pd.read_csv(os.path.join(dir, experiment["datafile"])))
data["E_oc_initial"] = ocp.potential.iloc[0]
data["E_oc_stable"] = ocp.potential.iloc[-20:].mean()
data["E_oc_stable_std"] = ocp.potential.iloc[-20:].std()
return data
def process_lpr(experiments, dir="data", r2_thresh=0.95):
data = []
for experiment in experiments:
datafile = os.path.join(dir, experiment["datafile"])
lpr = pd.read_csv(datafile)
lpr = analysis.LPRData(lpr)
lpr.check_quality(r2_thresh=r2_thresh)
pr, ocp, r2 = lpr.fit()
if r2 < r2_thresh:
pr = np.nan
ocp = np.nan
r2 = np.nan
data.append({"polarization_resistance": pr, "E_oc": ocp, "lpr_r2": r2})
df = pd.DataFrame(data)
data = {}
data["polarization_resistance"] = np.nanmean(df["polarization_resistance"])
data["polarization_std"] = np.nanstd(df["polarization_resistance"])
data["lpr_r2"] = np.nanmean(df["lpr_r2"])
data["lpr_r2_std"] = np.nanstd(df["lpr_r2"])
data["lpr_E_oc"] = np.nanmean(df["E_oc"])
data["lpr_E_oc_std"] = np.nanstd(df["E_oc"])
return data
def process_tafel(experiment, dir="data"):
tafel = analysis.TafelData(pd.read_csv(os.path.join(dir, experiment["datafile"])))
tafel.clip_current_to_range()
model = tafel.fit()
return {"tafel_E_oc": tafel.ocp, "i_corr": tafel.i_corr}
def process_row(row, db, dir=dir):
loc_id = row["id"]
instructions = json.loads(row["instructions"])
# unpack flowrate stuff
flow_instructions = instructions[0]
row["flow_rate"] = flow_instructions["flow_rate"]
row["relative_rates"] = flow_instructions["relative_rates"]
row["purge_time"] = flow_instructions["purge_time"]
# read and process pH logs
# oops -- forgot to save pH logfile in the db...
pH_file = f"pH_log_run{loc_id:03d}.csv"
pH_metadata = process_pH(pH_file, dir=dir)
row.update(pH_metadata)
# analyze OCP trace
ocp_experiments = list(db["experiment"].find(location_id=loc_id, op="open_circuit"))
if len(ocp_experiments) > 0:
ocp_metadata = process_ocp(ocp_experiments, dir=dir)
row.update(ocp_metadata)
# analyze LPR data
lpr_experiments = list(db["experiment"].find(location_id=loc_id, op="lpr"))
if len(lpr_experiments) > 0:
lpr_metadata = process_lpr(lpr_experiments, dir=dir)
row.update(lpr_metadata)
# analyze Tafel data
tafel_experiment = db["experiment"].find_one(location_id=loc_id, op="tafel")
if tafel_experiment:
tafel_metadata = process_tafel(tafel_experiment, dir=dir)
row.update(tafel_metadata)
# analyze CV data
return row
def load_session(db_path: str, verbose: bool = False) -> pd.DataFrame:
"""load metadata and analyze results
scan number (scan numbers by type (this will be useful if we begin more dynamic scan orders))
location
flowrates
pH (median?)
beginning and final ocp from hold
outputs from LPR fit
outputs from Tafel fit
start time
"""
dir, _ = os.path.split(db_path)
records = []
with dataset.connect(f"sqlite:///{db_path}") as db:
for location in db["location"]:
if verbose:
print(location)
records.append(process_row(location, db, dir=dir))
return pd.DataFrame(records)
| 4,047
| 27.914286
| 97
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/lpr.py
|
from __future__ import annotations
import lmfit
import logging
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData, Status
logger = logging.getLogger(__name__)
def current_crosses_zero(df: pd.DataFrame) -> bool:
""" verify that a valid LPR scan has a current trace that crosses zero """
current = df["current"]
logger.debug("LPR check")
return current.min() < 0 and current.max() > 0
def _scan_range(df, potential_window=0.005) -> tuple[float, float]:
current, potential = df["current"].values, df["potential"].values
# find rough open circuit potential -- find zero crossing of current trace
# if the LPR fit is any good, then the intercept should give
# a more precise estimate of the open circuit potential
zcross = np.argmin(np.abs(current))
ocp = potential[zcross]
# select a window around OCP to fit
lb, ub = ocp - potential_window, ocp + potential_window
return lb, ub
def valid_scan_range(df: EchemData, potential_window: float = 0.005) -> bool:
""" check that an LPR scan has sufficient coverage around the open circuit potential """
current, potential = df["current"], df["potential"]
lb, ub = _scan_range(df, potential_window=potential_window)
return potential.min() <= lb and potential.max() >= ub
def best_lpr_fit(df: EchemData, potential_window, r2_thresh=0.95):
# straightforward linear fit
slope, intercept, r2 = polarization_resistance(df, potential_window)
current, potential, time = (
df["current"].values,
df["potential"].values,
df["elapsed_time"].values,
)
fit_current = current
if r2 < r2_thresh:
ps_list = [0, 0.33, 0.66]
best_chisq = np.inf
result = None
for ps in ps_list:
chisq, dc_current = sin_fit(time, current, phase_shift=ps * np.pi * 2)
if chisq < best_chisq:
result = dc_current
best_chisq = chisq
dc_current = result
corrected = pd.DataFrame({"current": dc_current, "potential": potential})
slope2, intercept2, r22 = polarization_resistance(corrected, potential_window)
if r22 > r2:
slope = slope2
intercept = intercept2
r2 = r22
fit_current = dc_current
return slope, intercept, r2, fit_current
def sinfun(x, amp, afreq, bfreq, phaseshift):
return amp * np.sin(x * (afreq * x + bfreq) + phaseshift)
def sin_fit(time, current, phase_shift=0):
# aliases to make lmfit code more idiomatic...
x, y = time, current
mod = lmfit.models.PolynomialModel(5, prefix="bkgd_")
pars = mod.guess(y, x=x)
sinmodel = lmfit.Model(sinfun, prefix="sin_")
mod += sinmodel
# sinpars=sinmodel.make_params(amp=(np.max(y)-np.min(y))/4,freq=1/20*2*np.pi,phaseshift=0)
sinpars = sinmodel.make_params(
amp=(np.max(y) - np.min(y) - pars["bkgd_c0"] * np.max(x)) / 2,
bfreq=1 / 20 * 2 * np.pi,
phaseshift=phase_shift,
afreq=0,
)
sinpars["sin_phaseshift"].min = 0
sinpars["sin_phaseshift"].max = 2 * np.pi
sinpars["sin_bfreq"].min = 0
# sinpars['sin_bfreq'].max=1
sinpars["sin_amp"].min = 0
sinpars["sin_amp"].max = (np.max(y) - np.min(y)) / 2
pars += sinpars
out = mod.fit(y, pars, x=x, method="nelder")
comps = out.eval_components(x=x)
y_real = y - comps["sin_"]
dc_current = comps["bkgd_"]
chiaq = out.chisqr
return chiaq, dc_current
def polarization_resistance(
df: EchemData, potential_window: float = 0.005
) -> tuple[float, float, float]:
"""extract polarization resistance: fit a linear model relating measured current to potential
Arguments:
df: polarization resistance scan data
potential_window: symmetric potential range around open circuit potential to fit polarization resistance model
"""
current, potential = df["current"].values, df["potential"].values
lb, ub = _scan_range(df, potential_window=potential_window)
fit_p = (potential >= lb) & (potential <= ub)
# quick linear regression
slope, intercept, r_value, p_value, std_err = stats.linregress(
current[fit_p], potential[fit_p]
)
r2 = r_value ** 2
return slope, intercept, r2
class LPRData(EchemData):
@property
def _constructor(self):
return LPRData
@property
def name(self):
return "LPR"
def check_quality(df, r2_thresh=0.95, w=5):
""" log results of quality checks and return a status code for control flow in the caller """
status = Status.OK
if not current_crosses_zero(df):
logger.warning("LPR current does not cross zero!")
status = max(status, Status.WARN)
if not valid_scan_range(df, potential_window=w * 1e-3):
logger.warning(f"scan range does not span +/- {w} mV")
status = max(status, Status.WARN)
slope, intercept, r2 = polarization_resistance(df)
if r2 < r2_thresh:
logger.warning("R^2 threshold not met")
status = max(status, Status.WARN)
logger.info(f"LPR slope: {slope} (R2={r2}), OCP: {intercept}")
return status
def fit(self):
""" fit a polarization resistance model (linear model in +/- 5mV of OCP) """
slope, intercept, r2, fit_current = best_lpr_fit(self, 0.005)
self.polarization_resistance = slope
self.open_circuit_potential = intercept
self.r_value = r2
self.fit_current = fit_current
return slope, intercept, r2
def evaluate_model(self, x):
""" evaluate the fitted linear model """
return self.open_circuit_potential + self.polarization_resistance * x
def plot(self, fit=False):
"""LPR plot: plot current vs potential
Optional: plot a regression line computing the polarization resistance
"""
# # super().plot('current', 'potential')
plt.plot(self["current"], self["potential"], ".")
plt.axvline(0, color="k", alpha=0.5, linewidth=0.5)
plt.xlabel("current (A)")
plt.ylabel("potential (V)")
if fit:
self.fit()
ylim = plt.ylim()
x = np.linspace(self.current.min(), self.current.max(), 100)
plt.plot(x, self.evaluate_model(x), linestyle="--", color="k", alpha=0.5)
plt.plot(self.fit_current, self["potential"])
plt.ylim(ylim)
plt.tight_layout()
| 6,577
| 31.245098
| 118
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/__init__.py
|
from asdc.analysis.echem_data import EchemData
from asdc.analysis.cv import CVData
from asdc.analysis.lpr import LPRData
from asdc.analysis.lsv import LSVData
from asdc.analysis.ocp import OCPData
from asdc.analysis.tafel import TafelData
from asdc.analysis.potentiodynamic import PotentiodynamicData
from asdc.analysis.potentiostatic import PotentiostaticData
from asdc.analysis import lpr
| 392
| 34.727273
| 61
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/potentiostatic.py
|
import logging
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from asdc.analysis.echem_data import EchemData
logger = logging.getLogger(__name__)
class PotentiostaticData(EchemData):
@property
def _constructor(self):
return PotentiostaticData
@property
def name(self):
return "Potentiostatic"
def check_quality(self):
""" Potentiostatic heuristics: not implemented. """
return True
def plot(self, fit=False):
""" plot Potentiostatic: current vs potential """
# # super().plot('current', 'potential')
plt.plot(self["elapsed_time"], self["current"])
plt.xlabel("elapsed time (s)")
plt.ylabel("current (A)")
plt.tight_layout()
| 784
| 22.787879
| 59
|
py
|
autoSDC
|
autoSDC-master/asdc/analysis/echem_data.py
|
import numpy as np
import pandas as pd
from enum import IntEnum
class Status(IntEnum):
OK = 0
WARN = 1
RETRY = 2
FAIL = 3
class EchemData(pd.DataFrame):
def check_quality(self):
"""run diagnostics, return status that explains what to do and why.
maybe status could be like
OK -- no errors, proceed
Warn -- errors, nothing to be done, so proceed?
Retry -- error, redo the measurement to try to get a replicate
Fail -- error, no point in performing subsequent work
"""
raise NotImplementedError
def clip_current_to_range(self):
""" discard current values where current exceeds hard current limit """
self.current[self.current.abs() > self.current_range] = np.nan
| 771
| 26.571429
| 79
|
py
|
pmb-nll
|
pmb-nll-main/src/single_image_inference.py
|
"""
Probabilistic Detectron Single Image Inference Script
"""
import json
import os
import sys
import cv2
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog
from detectron2.data.transforms import ResizeShortestEdge
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools.evaluation_utils import (
get_train_contiguous_id_to_test_thing_dataset_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from probabilistic_inference.inference_utils import build_predictor, instances_to_json
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory
inference_output_dir = os.path.expanduser(args.output_dir)
os.makedirs(inference_output_dir, exist_ok=True)
# Get category mapping dictionary. Mapping here is from coco-->coco
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
cfg.MODEL.WEIGHTS = os.path.expanduser(args.model_ckpt)
predictor = build_predictor(cfg)
# List images in image folder
image_folder = os.path.expanduser(args.image_dir)
image_list = os.listdir(image_folder)
# Construct image resizer
resizer = ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TEST, max_size=cfg.INPUT.MAX_SIZE_TEST
)
final_output_list = []
with torch.no_grad():
with tqdm.tqdm(total=len(image_list)) as pbar:
for idx, input_file_name in enumerate(image_list):
# Read image, apply shortest edge resize, and change to channel first position
cv2_image = cv2.imread(os.path.join(image_folder, input_file_name))
if cv2_image.size != 0:
shape = cv2_image.shape
height = shape[0]
width = shape[1]
output_transform = resizer.get_transform(cv2_image)
cv2_image = output_transform.apply_image(cv2_image)
input_im_tensor = torch.tensor(cv2_image).to().permute(2, 0, 1)
input_im = [
dict(
{
"filename": input_file_name,
"image_id": input_file_name,
"height": height,
"width": width,
"image": input_im_tensor,
}
)
]
# Perform inference
outputs = predictor(input_im)
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(
instances_to_json(
outputs, input_im[0]["image_id"], cat_mapping_dict
)
)
pbar.update(1)
else:
print("Failed to read image {}".format(input_file_name))
with open(os.path.join(inference_output_dir, "results.json"), "w") as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,694
| 32.297872
| 94
|
py
|
pmb-nll
|
pmb-nll-main/src/apply_net.py
|
"""
Probabilistic Detectron Inference Script
"""
import json
import os
import sys
from shutil import copyfile
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog, build_detection_test_loader
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools.evaluation_utils import (
get_train_contiguous_id_to_test_thing_dataset_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from offline_evaluation import (
compute_average_precision,
compute_calibration_errors,
compute_ood_probabilistic_metrics,
compute_probabilistic_metrics,
)
from probabilistic_inference.inference_utils import (
build_predictor,
get_inference_output_dir,
instances_to_json,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory and copy inference config file to keep
# track of experimental settings
if args.inference_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
else:
inference_output_dir = args.inference_dir
if not os.path.isdir(inference_output_dir):
os.makedirs(inference_output_dir, exist_ok=True)
os.makedirs(inference_output_dir, exist_ok=True)
copyfile(
args.inference_config,
os.path.join(inference_output_dir, os.path.split(args.inference_config)[-1]),
)
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
predictor = build_predictor(cfg)
test_data_loader = build_detection_test_loader(cfg, dataset_name=args.test_dataset)
final_output_list = []
if not args.eval_only:
with torch.no_grad():
with tqdm.tqdm(total=len(test_data_loader)) as pbar:
for idx, input_im in enumerate(test_data_loader):
# Apply corruption
outputs = predictor(input_im)
# print(f'Image id {input_im[0]["image_id"]}')
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(
instances_to_json(
outputs, input_im[0]["image_id"], cat_mapping_dict
)
)
pbar.update(1)
with open(
os.path.join(inference_output_dir, "coco_instances_results.json"), "w"
) as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if "ood" in args.test_dataset:
compute_ood_probabilistic_metrics.main(args, cfg)
else:
compute_average_precision.main(args, cfg, inference_output_dir)
compute_probabilistic_metrics.main(
args, cfg, inference_output_dir=inference_output_dir, min_allowed_score=args.min_allowed_score
)
compute_calibration_errors.main(
args, cfg, inference_output_dir=inference_output_dir
)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,645
| 30.391892
| 106
|
py
|
pmb-nll
|
pmb-nll-main/src/single_image_inference_eval.py
|
"""
Probabilistic Detectron Single Image Inference Script
Runs inference and evaluation on specified images, rather than on entire dataset.
"""
import json
import os
import sys
from shutil import copyfile, rmtree
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog, build_detection_test_loader
# Detectron imports
from detectron2.engine import launch
from core.evaluation_tools import evaluation_utils
# Project imports
from core.evaluation_tools.evaluation_utils import \
get_train_contiguous_id_to_test_thing_dataset_id_dict
from core.setup import setup_arg_parser, setup_config
from offline_evaluation import (compute_average_precision,
compute_calibration_errors,
compute_ood_probabilistic_metrics,
compute_probabilistic_metrics)
from probabilistic_inference.inference_utils import (build_predictor,
get_inference_output_dir,
instances_to_json)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory and copy inference config file to keep
# track of experimental settings
if args.inference_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
else:
inference_output_dir = args.inference_dir
if not os.path.isdir(inference_output_dir):
os.makedirs(inference_output_dir, exist_ok=True)
os.makedirs(inference_output_dir, exist_ok=True)
copyfile(
args.inference_config,
os.path.join(inference_output_dir, os.path.split(args.inference_config)[-1]),
)
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
predictor = build_predictor(cfg)
test_data_loader = build_detection_test_loader(cfg, dataset_name=args.test_dataset)
# Prepare GT annos
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
preprocessed_gt_instances = (
evaluation_utils.get_per_frame_preprocessed_gt_instances(
cfg, inference_output_dir
)
)
final_output_list = []
# Example for image_ids to visualize, set to empty list for all images in dataset
image_ids = [2153,2261,6894,10764,17905,23272]
with torch.no_grad():
with tqdm.tqdm(total=len(test_data_loader)) as pbar:
for idx, input_im in enumerate(test_data_loader):
image_id = input_im[0]["image_id"]
if len(image_ids) and image_id not in image_ids:
pbar.update(1)
continue
if not args.eval_only:
# Apply corruption
outputs = predictor(input_im)
json_instances = instances_to_json(
outputs, image_id, cat_mapping_dict
)
final_output_list.extend(json_instances)
# Save instances for this prediction only to temporary dir
tmp_inference_dir = os.path.join(inference_output_dir, "tmp")
rmtree(tmp_inference_dir, ignore_errors=True)
os.makedirs(tmp_inference_dir, exist_ok=True)
with open(
os.path.join(tmp_inference_dir, "coco_instances_results.json"),
"w",
) as fp:
json.dump(json_instances, fp, indent=4, separators=(",", ": "))
# Load in standard evaluation format
preprocessed_predicted_instances = (
evaluation_utils.eval_predictions_preprocess(json_instances)
)
else:
tmp_inference_dir = inference_output_dir
outputs = (
evaluation_utils.get_per_frame_preprocessed_pred_instances(
cfg, tmp_inference_dir, image_id, 0.0
)
)
preprocessed_gt_instance = {}
for k, v in preprocessed_gt_instances.items():
for img_id, t in v.items():
if img_id == image_id:
preprocessed_gt_instance[k] = t
if len(preprocessed_gt_instance) == 0:
preprocessed_gt_instance = None
class_map = MetadataCatalog[cfg.ACTUAL_TEST_DATASET].get(
"thing_classes"
)
gt_class_map = MetadataCatalog[cfg.ACTUAL_TEST_DATASET].thing_dataset_id_to_contiguous_id
predictor.visualize_inference(
input_im,
outputs,
preprocessed_gt_instance,
min_allowed_score=0.1,
class_map=class_map,
gt_class_map=gt_class_map,
num_samples=0,
)
# Compute metrics for this prediction only
compute_average_precision.main(args, cfg, tmp_inference_dir, [image_id])
compute_probabilistic_metrics.main(
args,
cfg,
inference_output_dir=tmp_inference_dir,
image_ids=[image_id],
min_allowed_score=0.0,
)
pbar.update(1)
with open(
os.path.join(inference_output_dir, "coco_instances_results.json"), "w"
) as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if "ood" in args.test_dataset:
compute_ood_probabilistic_metrics.main(args, cfg)
else:
compute_average_precision.main(args, cfg, inference_output_dir, image_ids)
compute_probabilistic_metrics.main(
args, cfg, inference_output_dir=inference_output_dir, image_ids=image_ids
)
compute_calibration_errors.main(
args, cfg, inference_output_dir=inference_output_dir
)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 7,829
| 36.644231
| 105
|
py
|
pmb-nll
|
pmb-nll-main/src/__init__.py
| 0
| 0
| 0
|
py
|
|
pmb-nll
|
pmb-nll-main/src/train_net.py
|
"""
Probabilistic Detectron Training Script following Detectron2 training script found at detectron2/tools.
"""
import logging
import os
import random
import sys
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
# Detectron imports
import detectron2.utils.comm as comm
# DETR imports
from d2.train_net import Trainer as Detr_Trainer
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.engine import DefaultTrainer, launch
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
from detectron2.modeling import build_model
# Project imports
from core.setup import setup_arg_parser, setup_config
from probabilistic_modeling.modeling_utils import freeze_non_probabilistic_weights
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Builds evaluators for post-training mAP report.
Args:
cfg(CfgNode): a detectron2 CfgNode
dataset_name(str): registered dataset name
Returns:
detectron2 DatasetEvaluators object
"""
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
return DatasetEvaluators(evaluators)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Builds DataLoader for test set.
Args:
cfg(CfgNode): a detectron2 CfgNode
dataset_name(str): registered dataset name
Returns:
detectron2 DataLoader object specific to the test set.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_train_loader(cls, cfg):
"""
Builds DataLoader for train set.
Args:
cfg(CfgNode): a detectron2 CfgNode
Returns:
detectron2 DataLoader object specific to the train set.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
freeze_non_probabilistic_weights(cfg, model)
return model
class Custom_Detr_Trainer(Detr_Trainer):
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
freeze_non_probabilistic_weights(cfg, model)
return model
def main(args):
# Setup config node
cfg = setup_config(args, random_seed=args.random_seed)
# For debugging only
# cfg.defrost()
# cfg.DATALOADER.NUM_WORKERS = 0
# cfg.SOLVER.IMS_PER_BATCH = 1
# Eval only mode to produce mAP results
# Build Trainer from config node. Begin Training.
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticDetr":
trainer = Custom_Detr_Trainer(cfg)
else:
trainer = Trainer(cfg)
if args.eval_only:
model = trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
if args.random_port:
port = random.randint(1024, 65535)
args.dist_url = "tcp://127.0.0.1:{}".format(port)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,421
| 28.284768
| 103
|
py
|
pmb-nll
|
pmb-nll-main/src/core/setup.py
|
import os
import random
import time
from shutil import copyfile
# Detectron imports
import detectron2.utils.comm as comm
import numpy as np
import torch
# Detr imports
from d2.detr.config import add_detr_config
from detectron2.config import CfgNode as CN
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup
from detectron2.utils.logger import setup_logger
from probabilistic_modeling.probabilistic_detr import ProbabilisticDetr
from probabilistic_modeling.probabilistic_generalized_rcnn import (
DropoutFastRCNNConvFCHead,
ProbabilisticGeneralizedRCNN,
ProbabilisticROIHeads,
)
from probabilistic_modeling.probabilistic_retinanet import ProbabilisticRetinaNet
# Project imports
import core
from core.datasets.setup_datasets import setup_all_datasets
def setup_arg_parser():
"""
Sets up argument parser for python scripts.
Returns:
arg_parser (ArgumentParser): Argument parser updated with probabilistic detectron args.
"""
arg_parser = default_argument_parser()
arg_parser.add_argument(
"--dataset-dir", type=str, default="", help="path to dataset directory."
)
arg_parser.add_argument(
"--random-seed",
type=int,
default=0,
help="random seed to be used for all scientific computing libraries",
)
# Inference arguments, will not be used during training.
arg_parser.add_argument(
"--inference-config",
type=str,
default="",
help="Inference parameter: Path to the inference config, which is different from training config. Check readme for more information.",
)
arg_parser.add_argument(
"--test-dataset",
type=str,
default="",
help="Inference parameter: Dataset used for testing. Can be one of the following: 'coco_2017_custom_val', 'openimages_val', 'openimages_ood_val' ",
)
arg_parser.add_argument(
"--image-corruption-level",
type=int,
default=0,
help="Inference parameter: Image corruption level between 0-5. Default is no corruption, level 0.",
)
# Evaluation arguments, will not be used during training.
arg_parser.add_argument(
"--iou-min",
type=float,
default=0.1,
help="Evaluation parameter: IOU threshold bellow which a detection is considered a false positive.",
)
arg_parser.add_argument(
"--iou-correct",
type=float,
default=0.5,
help="Evaluation parameter: IOU threshold above which a detection is considered a true positive.",
)
arg_parser.add_argument(
"--min-allowed-score",
type=float,
default=0.0,
help="Evaluation parameter:Minimum classification score for which a detection is considered in the evaluation. Set to -1 for optimal F1-score.",
)
arg_parser.add_argument(
"--inference-dir",
type=str,
default="",
help="Directory where inference files will be stored.",
)
# Single image inference parameters
arg_parser.add_argument(
"--model-ckpt",
type=str,
default="",
help="Single image inference parameter: path to model checkpoint used for inference.",
)
arg_parser.add_argument(
"--image-dir",
type=str,
default="",
help="Single image inference parameter: path to image directory",
)
arg_parser.add_argument(
"--output-dir", type=str, default="", help="Path to where to save outputs"
)
arg_parser.add_argument(
"--random-port",
action="store_true",
help="Use a randomized port number to avoid issues with multiple multi-GPU jobs on same machine.",
)
return arg_parser
def add_probabilistic_config(cfg):
"""
Add configuration elements specific to probabilistic detectron.
Args:
cfg (CfgNode): detectron2 configuration node.
"""
_C = cfg
# Probabilistic Modeling Setup
_C.MODEL.PROBABILISTIC_MODELING = CN()
_C.MODEL.PROBABILISTIC_MODELING.MC_DROPOUT = CN()
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS = CN()
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS = CN()
_C.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS = 25
_C.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE = "log_prob"
_C.MODEL.PROBABILISTIC_MODELING.PPP = CN()
# PPP stuff
_C.MODEL.PROBABILISTIC_MODELING.PPP.INTENSITY_TYPE = "uniform"
_C.MODEL.PROBABILISTIC_MODELING.PPP.UNIFORM_INTENSITY = -np.inf
_C.MODEL.PROBABILISTIC_MODELING.PPP.NUM_GAUSS_MIXTURES = 10
_C.MODEL.PROBABILISTIC_MODELING.PPP.COV_TYPE = "diagonal"
_C.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE = False
_C.MODEL.TRAIN_ONLY_UNCERTAINTY_PREDS = False
_C.MODEL.TRAIN_PPP = True
_C.MODEL.TRAIN_ONLY_PPP = False
# Annealing step for losses that require some form of annealing
_C.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP = 0
# Monte-Carlo Dropout Settings
_C.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE = 0.0
# Loss configs
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME = "none"
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES = 3
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME = "none"
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE = "diagonal"
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES = 1000
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE = "gaussian"
# Probabilistic Inference Setup
_C.PROBABILISTIC_INFERENCE = CN()
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT = CN()
_C.PROBABILISTIC_INFERENCE.BAYES_OD = CN()
_C.PROBABILISTIC_INFERENCE.ENSEMBLES_DROPOUT = CN()
_C.PROBABILISTIC_INFERENCE.ENSEMBLES = CN()
# General Inference Configs
_C.PROBABILISTIC_INFERENCE.INFERENCE_MODE = "standard_nms"
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT.ENABLE = False
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT.NUM_RUNS = 1
_C.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD = 0.7
_C.PROBABILISTIC_INFERENCE.USE_MC_SAMPLING = True
_C.PROBABILISTIC_INFERENCE.TREAT_AS_MB = False
_C.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES = 0.0
_C.PROBABILISTIC_INFERENCE.LOAD_PPP_FROM_MODEL = False
# Bayes OD Configs
_C.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE = "bayesian_inference"
_C.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE = "bayesian_inference"
_C.PROBABILISTIC_INFERENCE.BAYES_OD.DIRCH_PRIOR = "uniform"
# Ensembles Configs
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE = "pre_nms"
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.RANDOM_SEED_NUMS = [0, 1000, 2000, 3000, 4000]
# 'mixture_of_gaussian' or 'bayesian_inference'
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE = "mixture_of_gaussians"
def setup_config(args, random_seed=None, is_testing=False):
"""
Sets up config node with probabilistic detectron elements. Also sets up a fixed random seed for all scientific
computing libraries, and sets up all supported datasets as instances of coco.
Args:
args (Namespace): args from argument parser
random_seed (int): set a fixed random seed throughout torch, numpy, and python
is_testing (bool): set to true if inference. If true function will return an error if checkpoint directory not
already existing.
Returns:
(CfgNode) detectron2 config object
"""
# Get default detectron config file
cfg = get_cfg()
add_detr_config(cfg)
add_probabilistic_config(cfg)
# Update default config file with custom config file
configs_dir = core.configs_dir()
args.config_file = os.path.join(configs_dir, args.config_file)
cfg.merge_from_file(args.config_file)
# Add dropout rate for faster RCNN box head
cfg.MODEL.ROI_BOX_HEAD.DROPOUT_RATE = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
# Update config with inference configurations. Only applicable for when in
# probabilistic inference mode.
if args.inference_config != "":
args.inference_config = os.path.join(configs_dir, args.inference_config)
cfg.merge_from_file(args.inference_config)
# Create output directory
model_name = os.path.split(os.path.split(args.config_file)[0])[-1]
dataset_name = os.path.split(os.path.split(os.path.split(args.config_file)[0])[0])[
-1
]
if args.output_dir == "":
cfg["OUTPUT_DIR"] = os.path.join(
core.data_dir(),
dataset_name,
model_name,
os.path.split(args.config_file)[-1][:-5],
"random_seed_" + str(random_seed),
time.strftime("%Y%m%d-%H%M%S"),
)
else:
cfg["OUTPUT_DIR"] = args.output_dir
if is_testing:
if not os.path.isdir(cfg["OUTPUT_DIR"]):
pass
"""raise NotADirectoryError(
"Checkpoint directory {} does not exist.".format(
cfg['OUTPUT_DIR']))"""
os.makedirs(cfg["OUTPUT_DIR"], exist_ok=True)
# copy config file to output directory
copyfile(
args.config_file,
os.path.join(cfg["OUTPUT_DIR"], os.path.split(args.config_file)[-1]),
)
# Freeze config file
cfg["SEED"] = random_seed
# Set device automatically
if not torch.cuda.is_available():
print("[NLLOD]: CUDA not available, using device=cpu")
cfg.MODEL.DEVICE = "cpu"
cfg.freeze()
# Initiate default setup
default_setup(cfg, args)
# Setup logger for probabilistic detectron module
setup_logger(
output=cfg.OUTPUT_DIR,
distributed_rank=comm.get_rank(),
name="Probabilistic Detectron",
)
# Set a fixed random seed for all numerical libraries
if random_seed is not None:
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
# Setup datasets
if args.image_corruption_level != 0:
image_root_corruption_prefix = "_" + str(args.image_corruption_level)
else:
image_root_corruption_prefix = None
dataset_dir = os.path.expanduser(args.dataset_dir)
# Handle cases when this function has been called multiple times. In that case skip fully.
# Todo this is very bad practice, should fix.
try:
setup_all_datasets(
dataset_dir, image_root_corruption_prefix=image_root_corruption_prefix
)
return cfg
except AssertionError:
return cfg
| 10,529
| 32.858521
| 155
|
py
|
pmb-nll
|
pmb-nll-main/src/core/__init__.py
|
import os
def top_dir():
"""
returns project top most directory
:return: (str) Project directory
"""
return os.sep.join(
os.path.dirname(
os.path.realpath(__file__)).split(
os.sep)[
:-2])
def data_dir():
"""
Returns data directory. Data directory should never be in src, especially when using IDEs.
:return:(str) data directory
"""
return top_dir() + '/data'
def configs_dir():
"""
Returns configs directory
:return: (str) Configs directory
"""
return os.sep.join([top_dir(), 'src', 'configs'])
| 609
| 19.333333
| 94
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/mhtdaClink.py
|
# -*- coding: utf-8 -*-
"""
Michael Motro github.com/motrom/fastmurty 4/2/19
"""
import numpy as np
from ctypes import c_int, Structure, POINTER,\
RTLD_GLOBAL, CDLL, c_double, byref, c_char_p, c_bool
import os
file_path = os.path.realpath(__file__)
mhtda_path = os.path.join(os.path.split(file_path)[0], "mhtda.so")
lib = CDLL(mhtda_path, RTLD_GLOBAL)
sparse = True
""" c structures """
class Solution(Structure):
_fields_ = [("x", POINTER(c_int)),
("y", POINTER(c_int)),
("v", POINTER(c_double))]
class Subproblem(Structure):
_fields_ = [("buffer", c_char_p),
("m", c_int),
("n", c_int),
("rows2use", POINTER(c_int)),
("cols2use", POINTER(c_int)),
("eliminateels", POINTER(c_bool)),
("eliminatemiss", c_bool),
("solution", Solution)]
class QueueEntry(Structure):
_fields_ = [("key", c_double), ("val", POINTER(Subproblem))]
class cs_di_sparse(Structure):
_fields_ = [("nzmax", c_int),
("m", c_int),
("n", c_int),
("p", POINTER(c_int)),
("i", POINTER(c_int)),
("x", POINTER(c_double)),
("nz", c_int)]
if sparse:
class PathTypessp(Structure):
_fields_ = [("val", c_double),
("i", c_int),
("j", c_int)]
class WVssp(Structure):
_fields_ = [("Q", POINTER(PathTypessp)),
("pathback", POINTER(c_int)),
("m", c_int),
("n", c_int)]
class WVsplit(Structure):
_fields_ = [("row_cost_estimates", POINTER(c_double)),
("row_best_columns", POINTER(c_int)),
("col_used", POINTER(c_bool)),
("m", c_int),
("n", c_int),
("m_start", c_int),
("n_start", c_int)]
input_argtype = cs_di_sparse
else:
class WVssp(Structure):
_fields_ = [("distances", POINTER(c_double)),
("pathback", POINTER(c_int)),
("n", c_int)]
class WVsplit(Structure):
_fields_ = [("row_cost_estimates", POINTER(c_double)),
("row_best_columns", POINTER(c_int)),
("m", c_int),
("n", c_int),
("m_start", c_int),
("n_start", c_int)]
input_argtype = POINTER(c_double)
class WVda(Structure):
_fields_ = [("buffer", c_char_p),
("m", c_int),
("n", c_int),
("nsols", c_int),
("solutionsize", c_int),
("subproblemsize", c_int),
("currentproblem", POINTER(Subproblem)),
("Q", POINTER(QueueEntry)),
("sspvars", WVssp),
("splitvars", WVsplit)]
""" c functions """
lib.da.argtypes = [input_argtype, c_int, POINTER(c_bool), POINTER(c_double),
c_int, POINTER(c_bool), POINTER(c_double),
c_int, POINTER(c_int), POINTER(c_double), POINTER(WVda)]
lib.da.restype = c_int
allocateWorkvarsforDA = lib.allocateWorkvarsforDA
allocateWorkvarsforDA.argtypes = [c_int, c_int, c_int]
allocateWorkvarsforDA.restype = WVda
deallocateWorkvarsforDA = lib.deallocateWorkvarsforDA
deallocateWorkvarsforDA.argtypes = [WVda]
lib.SSP.argtypes = [input_argtype, POINTER(Subproblem), POINTER(WVssp)]
lib.SSP.restype = c_double
allocateWorkvarsforSSP = lib.allocateWorkvarsforSSP
lib.allocateWorkvarsforSSP.argtypes = [c_int, c_int]
lib.allocateWorkvarsforSSP.restype = WVssp
lib.createSubproblem.argtypes = [c_int, c_int]
lib.createSubproblem.restype = Subproblem
lib.deallocateSubproblem.argtypes = [POINTER(Subproblem)]
""" handler functions """
def mhtda(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_assocs, out_costs, workvars):
"""
feeds numpy array / sparse matrix input and output to mhtda C library
"""
if sparse:
c_c = c[0]
else:
c_c = c.ctypes.data_as(POINTER(c_double))
row_sets_c = row_sets.ctypes.data_as(POINTER(c_bool))
row_set_weights_c = row_set_weights.ctypes.data_as(POINTER(c_double))
col_sets_c = col_sets.ctypes.data_as(POINTER(c_bool))
col_set_weights_c = col_set_weights.ctypes.data_as(POINTER(c_double))
out_assocs_c = out_assocs.ctypes.data_as(POINTER(c_int))
out_costs_c = out_costs.ctypes.data_as(POINTER(c_double))
nrowpriors = c_int(row_sets.shape[0])
ncolpriors = c_int(col_sets.shape[0])
nsols = c_int(out_assocs.shape[0])
err = lib.da(c_c, nrowpriors, row_sets_c, row_set_weights_c,
ncolpriors, col_sets_c, col_set_weights_c,
nsols, out_assocs_c, out_costs_c, byref(workvars))
assert err == 0, "not enough valid solutions"
def SSP(c, workvars):
"""
runs single best data association on numpy array or sparse matrix data
"""
if sparse:
c_c = c[0]
m = c_c.m
n = c_c.n
assert m <= workvars.m
assert n <= workvars.n
else:
m,n = c.shape
assert n <= workvars.n
c = np.pad(c, ((0,0),(0,workvars.n-n)), 'constant', constant_values = 0)
c_c = c.ctypes.data_as(POINTER(c_double))
x = np.zeros(m, dtype=np.int32) + 33
y = np.zeros(n, dtype=np.int32)
v = np.zeros(n)
rows2use = np.arange(m, dtype=np.int32)
cols2use = np.arange(n, dtype=np.int32)
sol = Solution(x.ctypes.data_as(POINTER(c_int)),
y.ctypes.data_as(POINTER(c_int)),
v.ctypes.data_as(POINTER(c_double)))
prb = Subproblem()
prb.solution = sol
prb.m = m
prb.n = n
prb.rows2use = rows2use.ctypes.data_as(POINTER(c_int))
prb.cols2use = cols2use.ctypes.data_as(POINTER(c_int))
# prb = lib.createSubproblem(m, n)
lib.SSP(c_c, byref(prb), byref(workvars))
# x = [prb.solution.x[i] for i in xrange(m)]
# y = [prb.solution.y[j] for j in xrange(n)]
return x, y
""" additional useful functions """
def sparsifyByRow(c, nvalsperrow):
"""
creates a row-ordered sparse matrix with a fixed number of elements per row
the lowest-valued elements are kept, still arranged in order of column value
"""
m,n = c.shape
nvalsperrow = min(n, nvalsperrow)
nvals = m*nvalsperrow
cp = np.arange(0, nvals+1, nvalsperrow, dtype=np.int32)
ci = np.empty(nvals, dtype=np.int32)
cx = np.empty(nvals, dtype=np.float64)
for i, crow in enumerate(c):
if nvalsperrow < n:
colsbyvalue = np.argpartition(crow, nvalsperrow)
else:
colsbyvalue = np.arange(nvalsperrow)
colsinorder = np.sort(colsbyvalue[:nvalsperrow])
ci[i*nvalsperrow:(i+1)*nvalsperrow] = colsinorder
cx[i*nvalsperrow:(i+1)*nvalsperrow] = crow[colsinorder]
cstruct = cs_di_sparse(c_int(nvals), c_int(m), c_int(n),
cp.ctypes.data_as(POINTER(c_int)),
ci.ctypes.data_as(POINTER(c_int)),
cx.ctypes.data_as(POINTER(c_double)), c_int(nvals))
# have to return numpy arrays too, or they might get recycled
return (cstruct, cp, ci, cx)
def sparsifyByElement(c, nvals):
"""
creates a row-ordered sparse matrix with a fixed number of elements
the lowest-valued elements are kept, in increasing order of value
"""
m,n = c.shape
nvals = min(m*n, nvals)
c = c.flatten()
elsbyvalue = np.argpartition(c, nvals)
elsinorder = np.sort(elsbyvalue[:nvals])
cp = np.searchsorted(elsinorder // n, np.arange(m+1)).astype(np.int32)
ci = (elsinorder % n).astype(np.int32)
cx = c[elsinorder].astype(np.float64)
cstruct = cs_di_sparse(c_int(nvals), c_int(m), c_int(n), byref(cp),
byref(ci), byref(cx), c_int(nvals))
# have to return numpy arrays too, or they might get recycled
return (cstruct, cp, ci, cx)
import numba as nb
@nb.njit(nb.i8(nb.i8[:,:], nb.b1[:,:], nb.i4[:,:,:], nb.i8[:,:], nb.i8))
def processOutput(matches, hypotheses, out_assocs, backward_index, n_matches):
"""
Transforms the pairs found by the data association algorithm to a more usable
format for tracking: a vector of matches and a binary matrix of associations.
Usually it is also necessary to only keep a fixed number of matches.
This version removes matches that are found after the limit has been hit,
without considering the relative probabilities of existence.
A serious tracker will probably want a better one - i.e. summing hypothesis
scores for each match to estimate total probabilities of existence.
"""
nm = 0
nsols = out_assocs.shape[0]
matches[:] = -1
backward_index[:] = -1
hypotheses[:] = False
for k in range(nsols):
hypothesis = hypotheses[k]
for rr in range(out_assocs.shape[1]):
i,j = out_assocs[k,rr]
#for i,j in out_assocs[k]:
if i == -2: break
backidx = backward_index[i,j]
if backidx == -1:
if n_matches == nm:
continue
backward_index[i,j] = nm
matches[nm] = (i,j)
backidx = nm
nm += 1
hypothesis[backidx] = True
return nm
| 9,416
| 36.369048
| 81
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/example_simplest.py
|
# -*- coding: utf-8 -*-
"""
Michael Motro github.com/motrom/fastmurty last modified 5/17/19
Runs single-input K-best associations algorithm on a simple matrix.
Intended to demonstrate usage.
"""
import numpy as np
from otherimplementations.slowmurty import mhtda as mhtdaSlow
from mhtdaClink import sparse, mhtda, allocateWorkvarsforDA
from mhtdaClink import sparsifyByRow as sparsify
cost_matrix = np.array([[-10, -9, -1],
[ -1, -6, 3],
[ -9, -5, -6]], dtype=np.float64)
nrows, ncolumns = cost_matrix.shape
nsolutions = 6 # find the 5 lowest-cost associations
# sparse cost matrices only include a certain number of elements
# the rest are implicitly infinity
# in this case, the sparse matrix includes all elements (3 columns per row)
cost_matrix_sparse = sparsify(cost_matrix, 3)
# The sparse and dense versions are compiled differently (see the Makefile).
# The variable "sparse" in mhtdaClink needs to match the version compiled
cost_matrix_to_use = cost_matrix_sparse if sparse else cost_matrix
# mhtda is set up to potentially take multiple input hypotheses for both rows and columns
# input hypotheses specify a subset of rows or columns.
# In this case, we just want to use the whole matrix.
row_priors = np.ones((1, nrows), dtype=np.bool8)
col_priors = np.ones((1, ncolumns), dtype=np.bool8)
# Each hypothesis has a relative weight too.
# These values don't matter if there is only one hypothesis...
row_prior_weights = np.zeros(1)
col_prior_weights = np.zeros(1)
# The mhtda function modifies preallocated outputs rather than
# allocating new ones. This is slightly more efficient for repeated use
# within a tracker.
# The cost of each returned association:
out_costs = np.zeros(nsolutions)
# The row-column pairs in each association:
# Generally there will be less than nrows+ncolumns pairs in an association.
# The unused pairs are currently set to (-2, -2)
out_associations = np.zeros((nsolutions, nrows+ncolumns, 2), dtype=np.int32)
# variables needed within the algorithm (a C function sets this up):
workvars = allocateWorkvarsforDA(nrows, ncolumns, nsolutions)
# run!
mhtda(cost_matrix_to_use,
row_priors, row_prior_weights, col_priors, col_prior_weights,
out_associations, out_costs, workvars)
# print each association
for solution in range(nsolutions):
print("solution {:d}, cost {:.0f}".format(solution, out_costs[solution]))
# display row-column matches, not row misses or column misses
association = out_associations[solution]
association_matches = association[(association[:,0]>=0) & (association[:,1]>=0)]
mask = np.zeros((nrows, ncolumns), dtype=np.bool8)
mask[association_matches[:,0], association_matches[:,1]] = True
matrixstr = []
for row in range(nrows):
rowstr = []
for column in range(ncolumns):
elestr = ('{:3.0f}'.format(cost_matrix[row,column])
if mask[row,column] else ' * ')
rowstr.append(elestr)
matrixstr.append(' '.join(rowstr))
print('\n'.join(matrixstr))
# compare to the simple(ish) Python implementation in slowmurty.py.
# There are a bunch of equivalent solutions, so the associations might not be
# in the same order.
out_costs2 = out_costs.copy()
mhtdaSlow(cost_matrix,
row_priors, row_prior_weights, col_priors, col_prior_weights,
out_associations, out_costs2, workvars)
assert np.allclose(out_costs, out_costs2)
| 3,470
| 41.329268
| 89
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/example_multiview.py
|
# -*- coding: utf-8 -*-
"""
Michael Motro github.com/motrom/fastmurty last modified 4/2/19
This is a simulation of 3-d point targets detected with 3 sensors, each of which
only sees two axes.
Two data association steps are used, between sensors 1 and 2 and between the
resulting estimates and sensor 3.
The test parameters and the number of hypotheses (associations) can be altered to
determine how accuracy and runtime scale with problem difficulty and hypothesis count.
"""
import numpy as np
from time import time
from mhtdaClink import sparse, mhtda, allocateWorkvarsforDA, processOutput
from mhtdaClink import allocateWorkvarsforSSP, SSP # used for evaluation
from mhtdaClink import sparsifyByRow as sparsify
""" settings """
ntests = 100
entryrate = 100 # poisson rate of object entry
fpratio = .005 # poisson rate of fp msmts, wrt entry rate
detect_rate = .995 # detection probability at each time
std = .001 # standard deviation of msmt noise
miss_distance_cutoffs = np.arange(.1,1.01,.1) # for scoring object detection performance
np.random.seed(0)
# tracker settings
max_ns = 600
max_nhyp = 1000
s = 10 # assumed sparsity - part of estimate, not enforced in actual simulation
fprate = fpratio*entryrate
def likelihood1(c, msmts1, msmts2):
# constant term in NLL of normal distribution
var_constant = 4*std**2 # normalizing out inv(Sigma)
constant_term = .5*np.log(np.pi*var_constant)
constant_term += np.log(fpratio/detect_rate+1-detect_rate)*2 + np.log(entryrate)
# get nll of all pairs from first two measurement sets
for i,msmti in enumerate(msmts1):
for j,msmtj in enumerate(msmts2):
c[i,j] = np.square(msmti[0]-msmtj[0])/var_constant + constant_term
pair_miss_exist_prob = (1-detect_rate)*detect_rate/(fpratio+(1-detect_rate)*detect_rate)
def update1(update_matches, msmts1, msmts2, samples, weights):
match_var = std**2 / 2
miss_var = std**2
for sidx, update_match in enumerate(update_matches):
i,j = update_match
if i!=-1 and j!=-1:
samples[sidx] = ((msmts1[i,0]+msmts2[j,0])*.5, msmts1[i,1], msmts2[j,1],
match_var, miss_var, miss_var)
weights[sidx] = 1.
elif i!=-1:
samples[sidx] = (msmts1[i,0], msmts1[i,1], -1,
miss_var, miss_var, -1)
weights[sidx] = pair_miss_exist_prob
elif j!=-1:
samples[sidx] = (msmts2[j,0], -1, msmts2[j,1],
miss_var, -1, miss_var)
weights[sidx] = pair_miss_exist_prob
else:
# all the null updates should be at the end of the array
return sidx
return update_matches.shape[0]
third_miss_loglik = np.log(entryrate) + np.log((1-detect_rate)**2*detect_rate + fpratio)
def likelihood2(c, samples, weights, ns, msmts3):
twopiterm = np.log(2*np.pi)*.5
msmt_var = std**2 * 2
nm = len(msmts3)
for i in range(ns):
sample = samples[i]
if sample[5] == -1: # only msmt1, so only match on 2nd dimension
constant_term_i = third_miss_loglik
constant_term_i += np.log(1./pair_miss_exist_prob/detect_rate-1)
constant_term_i += np.log(msmt_var)*.5
constant_term_i += twopiterm
c[i,:nm] = np.square(sample[1]-msmts3[:,0])/msmt_var + constant_term_i
elif sample[4] == -1: # only msmt2, so only match on 3rd dimension
constant_term_i = third_miss_loglik
constant_term_i += np.log(1./pair_miss_exist_prob/detect_rate-1)
constant_term_i += np.log(msmt_var)*.5
constant_term_i += twopiterm
c[i,:nm] = np.square(sample[2]-msmts3[:,1])/msmt_var + constant_term_i
else: # both
constant_term_i = third_miss_loglik
constant_term_i += np.log(1./detect_rate-1)
constant_term_i += np.log(msmt_var)
constant_term_i += twopiterm*2
c[i,:nm] = np.square(sample[1]-msmts3[:,0])
c[i,:nm] += np.square(sample[2]-msmts3[:,1])
c[i,:nm] /= msmt_var
c[i,:nm] += constant_term_i
# probability of msmt from third set, with no matches, being real and not fp
third_exist_prob = (1-detect_rate)**2*detect_rate
third_exist_prob = third_exist_prob / (third_exist_prob + fpratio)
def update2(update_matches2, update_matches, new_samples, new_weights, msmts1, msmts2, msmts3):
for sidx, update_match2 in enumerate(update_matches2):
new_sample = new_samples[sidx]
id12, id3 = update_match2
if id12 == -1:
if id3 == -1:
return sidx
else:
new_weights[sidx] = third_exist_prob
new_sample[0] = .5
new_sample[1:3] = msmts3[id3, :2]
else:
id1, id2 = update_matches[id12]
if sum((id1==-1, id2==-1, id3==-1)):
new_weights[sidx] = third_exist_prob
else:
new_weights[sidx] = 1.
if id1 == -1:
if id3 == -1:
new_sample[0] = msmts2[id2, 0]
new_sample[1] = .5
new_sample[2] = msmts2[id2, 1]
else:
new_sample[0] = msmts2[id2, 0]
new_sample[1] = msmts3[id3, 0]
new_sample[2] = msmts2[id2,1] + msmts3[id3,1]
elif id2 == -1:
if id3 == -1:
new_sample[0] = msmts1[id1, 0]
new_sample[1] = msmts1[id1, 1]
new_sample[2] = .5
else:
new_sample[0] = msmts1[id1,0]
new_sample[1] = msmts1[id1,1] + msmts3[id3,0]
new_sample[2] = msmts3[id3,1]
elif id3 == -1:
new_sample[0] = msmts1[id1,0] + msmts2[id2,0]
new_sample[1] = msmts1[id1,1]
new_sample[2] = msmts2[id2,1]
else:
new_sample[0] = msmts1[id1,0] + msmts2[id2,0]
new_sample[1] = msmts1[id1,1] + msmts3[id3,0]
new_sample[2] = msmts2[id2,1] + msmts3[id3,1]
workvarsforscoring = allocateWorkvarsforSSP(entryrate, entryrate + int(fprate*6) + 3)
def scoreObj(tru, est):
c2 = [[sum(np.square(sample[:3]-truobj)) for sample in est] for truobj in tru]
c2 = np.sqrt(c2)
m,n = c2.shape
scores = []
for miss_cutoff in miss_distance_cutoffs:
c = c2 - miss_cutoff
if sparse:
c = sparsify(c, s)
x, y = SSP(c, workvarsforscoring)
nFN = sum(np.array(x)==-1)
nFP = sum(np.array(y)==-1)
scores.append((nFN,nFP,m,n))
return np.array(scores)
def scoreTrack(tru_tracks, tru_m, update_matches, update_matches2):
track_found = np.zeros(tru_tracks.shape[0], dtype=bool)
fpcount = 0
pcount = 0
for id12, id3 in update_matches2:
if id12 == -1:
id1 = -1
id2 = -1
else:
id1, id2 = update_matches[id12]
if all((id1==-1,id2==-1,id3==-1)) == 3:
continue
in_tru_tracks = np.all(tru_tracks == (id1,id2,id3), axis=1)
if any(in_tru_tracks):
in_tru_tracks = np.where(in_tru_tracks)[0][0]
track_found[in_tru_tracks] = True
else:
fpcount += 1
pcount += 1
fncount = tru_m - np.sum(track_found[:tru_m])
return fncount, fpcount, tru_m, pcount
max_nm = entryrate + int(fprate*6) + 3 # poisson cdf @ 6 = .99992
timed_total_all = 0.
timed_update_all = 0.
obj_scores_all = np.zeros((miss_distance_cutoffs.shape[0],4), dtype=int)
track_scores_all = np.zeros(4, dtype=int)
samples = np.zeros((max_ns, 6))
weights = np.zeros((max_ns,))
hypotheses = np.zeros((max_nhyp, max_ns), dtype=bool)
out_assocs = np.zeros((max_nhyp, max_ns+max_nm, 2), dtype=np.int32)
out_y = np.zeros(max_nm, dtype=bool)
hypothesis_weights = np.zeros((max_nhyp,))
ids = np.zeros((max_ns,), dtype=np.uint16)
ns = 0
new_samples = samples.copy()
new_weights = weights.copy()
new_hypotheses = hypotheses.copy()
new_hypothesis_weights = hypothesis_weights.copy()
new_ids = ids.copy()
new_ns = 0
c1 = np.zeros((max_ns, max_nm))
c2 = c1.copy()
update_matches = np.zeros((max_ns, 2), dtype=int)
update_matches2 = np.zeros((max_ns, 2), dtype=int)
workvars = allocateWorkvarsforDA(max_ns, max_nm, max_nhyp)
backidx1 = np.zeros((max_ns, max_nm), dtype=int)
backidx2 = backidx1.copy()
row_sets = np.zeros((1,max_ns), dtype=np.bool8)
col_sets = np.zeros((1,max_nm), dtype=np.bool8)
includerowsorcols_dummy = np.zeros(1)
for test in range(ntests):
print("test {:d}".format(test))
# generate real objects
tru_m = entryrate#np.random.poisson(entryrate)
tru = np.random.rand(tru_m, 3)
tru_tracks = np.zeros((tru_m, 3), dtype=int) - 1
# generate three sets of measurements
detected = np.random.rand(tru_m) < detect_rate
nreal = sum(detected)
nfalse = np.random.poisson(fprate)
msmts1 = tru[detected][:,[0,1]]+np.random.normal(size=(nreal,2))*std
msmts1 = np.append(msmts1, np.random.rand(nfalse, 2), axis=0)
tru_tracks[:tru_m][detected,0] = np.arange(nreal)
tru_tracks_false = np.zeros((nfalse, 3), dtype=int)-1
tru_tracks_false[:,0] = np.arange(nreal, nreal+nfalse)
tru_tracks = np.append(tru_tracks, tru_tracks_false, axis=0)
nm1 = nreal+nfalse
detected = np.random.rand(tru_m) < detect_rate
nreal = sum(detected)
nfalse = np.random.poisson(fprate)
msmts2 = tru[detected][:,[0,2]]+np.random.normal(size=(nreal,2))*std
msmts2 = np.append(msmts2, np.random.rand(nfalse, 2), axis=0)
tru_tracks[:tru_m][detected,1] = np.arange(nreal)
tru_tracks_false = np.zeros((nfalse, 3), dtype=int)-1
tru_tracks_false[:,1] = np.arange(nreal, nreal+nfalse)
tru_tracks = np.append(tru_tracks, tru_tracks_false, axis=0)
nm2 = nreal+nfalse
detected = np.random.rand(tru_m) < detect_rate
nreal = sum(detected)
nfalse = np.random.poisson(fprate)
msmts3 = tru[detected][:,[1,2]]+np.random.normal(size=(nreal,2))*std
msmts3 = np.append(msmts3, np.random.rand(nfalse, 2), axis=0)
tru_tracks[:tru_m][detected,2] = np.arange(nreal)
tru_tracks_false = np.zeros((nfalse, 3), dtype=int)-1
tru_tracks_false[:,2] = np.arange(nreal, nreal+nfalse)
tru_tracks = np.append(tru_tracks, tru_tracks_false, axis=0)
nm3 = nreal+nfalse
# first update
timed_total = time()
likelihood1(c1, msmts1, msmts2)
c = sparsify(c1, s) if sparse else c1
row_sets[0,:nm1] = True
row_sets[0,nm1:] = False
col_sets[0,:nm2] = True
col_sets[0,nm2:] = False
timed_start = time()
out_assocs[:] = -2
mhtda(c, row_sets, includerowsorcols_dummy, col_sets, includerowsorcols_dummy,
out_assocs, hypothesis_weights, workvars)
ns = processOutput(update_matches, hypotheses, out_assocs, backidx1, max_ns)
timed_update = time() - timed_start
ns = update1(update_matches, msmts1, msmts2, samples, weights)
# find likelihood between updated objects and third set of measurements
likelihood2(c2, samples, weights, ns, msmts3)
c = sparsify(c2, s) if sparse else c2
# account for the fact that each row miss is normalized
missliks = np.log(1-weights*detect_rate)
missliks_hyp = np.dot(hypotheses, missliks)
hypothesis_weights -= missliks_hyp
col_sets[0,:nm3] = True
col_sets[0,nm3:] = False
# second update
timed_start = time()
out_assocs[:] = -2
mhtda(c, hypotheses, hypothesis_weights, col_sets, includerowsorcols_dummy,
out_assocs, new_hypothesis_weights, workvars)
processOutput(update_matches2, new_hypotheses, out_assocs, backidx2,
max_ns)
timed_update += time() - timed_start
new_ns = update2(update_matches2, update_matches, new_samples, new_weights,
msmts1, msmts2, msmts3)
timed_total = time() - timed_total
## analysis of how hypotheses match truth, for debugging purposes
tru_matches_1_valid = (tru_tracks[:,0] >= 0) | (tru_tracks[:,1] >= 0)
tru_matches_1 = backidx1[tru_tracks[tru_matches_1_valid,0],
tru_tracks[tru_matches_1_valid,1]]
tru_matches_not_here = sum(tru_matches_1 == -1)
if tru_matches_not_here == 0:
tru_hypothesis = np.zeros(hypotheses.shape[1], dtype=bool)
tru_hypothesis[tru_matches_1] = True
matching_hypotheses = np.where(np.all(hypotheses==tru_hypothesis,axis=1))[0]
assert len(matching_hypotheses) <= 1
if len(matching_hypotheses) == 1:
matching_hypothesis = matching_hypotheses[0]
tru_matches_2_score = tru_matches_1_valid & (tru_tracks[:,2] >= 0)
tru_matches_2in = tru_matches_1[tru_tracks[tru_matches_1_valid,2] >= 0]
total_prob = -sum(missliks[tru_matches_1])
total_prob += sum(c2[tru_matches_2in,
tru_tracks[tru_matches_2_score,2]])
tru_matches_1_score = (tru_tracks[:,0] >= 0) & (tru_tracks[:,1] >= 0)
total_prob += sum(c1[tru_tracks[tru_matches_1_valid,0],
tru_tracks[tru_matches_1_valid,1]])
if total_prob + 1e-4 < new_hypothesis_weights[0]:
print("probable error")
else:
tru_assignment_rank = np.searchsorted(new_hypothesis_weights, total_prob)
# score
timed_update_all += timed_update
timed_total_all += timed_total
include_samples = new_hypotheses[0] & (new_weights > .5)
track_scores_all += scoreTrack(tru_tracks, tru_m, update_matches,
update_matches2[new_hypotheses[0]])
obj_scores_all += scoreObj(tru, new_samples[include_samples])
timed_update_all *= 1000./ntests
timed_total_all *= 1000./ntests
obj_score_rates = obj_scores_all[:,:2].astype(float)/obj_scores_all[:,2:]
track_score_rates = track_scores_all[:2].astype(float)/track_scores_all[2:]
#score_rates = track_score_rates
score_rates = np.append(track_score_rates[None,:], obj_score_rates, axis=0)
print("{:.1f} ms update, {:.1f} ms total".format(timed_update_all, timed_total_all))
print(score_rates)
| 14,286
| 41.020588
| 95
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/example_3frame.py
|
# -*- coding: utf-8 -*-
"""
Michael Motro github.com/motrom/fastmurty last modified 5/16/19
Runs single-input K-best associations algorithm on a square random matrix,
then treats the result as an input hypothesis for a multiple-input K-best
associations algorithm on a rectangular random matrix.
"""
import numpy as np
from time import time
from mhtdaClink import sparse, mhtda, allocateWorkvarsforDA
from mhtdaClink import sparsifyByRow as sparsify
np.random.seed(0)
numtests = 100
nsols = 200
sizes = np.arange(10, 301, 10)
sparsity = 20 # elements per row
my_results = []
for size in sizes:
print("running size {:d}".format(size))
#max_val = 0. # misses will occur (but are unlikely for large matrices)
max_val = -float(size+1) # to ensure that misses are never picked
timed_total = 0.
relative_cost = 0.
this_sparsity = min(sparsity, size)
size2 = size*(this_sparsity+2)
workvars = allocateWorkvarsforDA(size, size, nsols)
workvars2 = allocateWorkvarsforDA(size2, size, nsols)
out_costs = np.zeros(nsols)
out_costs2 = np.zeros(nsols)
out_associations = np.zeros((nsols, size*2, 2), dtype=np.int32)
out_associations2 = np.zeros((nsols, size2+size, 2), dtype=np.int32)
input_hypothesis = np.ones((1, size), dtype=np.bool8)
input_score = np.zeros(1)
backidx = np.zeros((size+1,size+1), dtype=int)
second_hypotheses = np.zeros((nsols, size2), dtype=np.bool8)
for test in range(numtests):
cd = np.random.rand(size, size) + max_val
c1 = sparsify(cd, this_sparsity) if sparse else cd
cd = np.random.rand(size2, size) + max_val
c2 = sparsify(cd, this_sparsity) if sparse else cd
out_associations[:] = -2
mhtda(c1, input_hypothesis, input_score, input_hypothesis, input_score,
out_associations, out_costs, workvars)
backidx[:] = -1
matches = np.unique(out_associations.reshape((nsols*size*2, 2)), axis=0)
matches = matches[matches[:,0] > -2]
backidx[matches[:,0],matches[:,1]] = np.arange(matches.shape[0])
second_hypotheses[:] = False
for solution in range(nsols):
association = out_associations[solution]
association = association[association[:,0] > -2]
association = backidx[association[:,0], association[:,1]]
assert np.all(association >= 0) and np.all(association < size2) and association.shape[0]==size
second_hypotheses[solution, association] = True
timed_start = time()
mhtda(c2, second_hypotheses, out_costs, input_hypothesis, input_score,
out_associations2, out_costs2, workvars2)
timed_end = time()
timed_total += (timed_end-timed_start)
my_results.append(timed_total*1000)
my_results = np.array(my_results) / numtests
print(my_results)
| 2,894
| 39.208333
| 106
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/example_2frame.py
|
# -*- coding: utf-8 -*-
"""
Michael Motro github.com/motrom/fastmurty last modified 4/2/19
Runs single-input K-best associations algorithm on square random matrices.
This test is meant to be directly comparable to the test code included with
Miller+Stone+Cox's implementation of data association.
"""
import numpy as np
from time import time
from mhtdaClink import sparse, mhtda, allocateWorkvarsforDA
from mhtdaClink import sparsifyByRow as sparsify
np.random.seed(0)
numtests = 100
nsols = 200
sizes = np.arange(10, 301, 10)
sparsity = 20 # elements per row
my_results = []
for size in sizes:
print("running size {:d}".format(size))
#max_val = -.1 # misses will occur (but are unlikely for large matrices)
max_val = -float(size+1) # to ensure that misses are never picked
timed_total = 0.
relative_cost = 0.
this_sparsity = min(30, size)
workvars = allocateWorkvarsforDA(size, size, nsols)
out_costs = np.zeros(nsols)
out_associations = np.zeros((nsols, size*2, 2), dtype=np.int32)
input_hypothesis = np.ones((1, size), dtype=np.bool8)
input_score = np.zeros(1)
for test in range(numtests):
cd = np.random.rand(size, size) + max_val
c = sparsify(cd, this_sparsity) if sparse else cd
timed_start = time()
mhtda(c, input_hypothesis, input_score, input_hypothesis, input_score,
out_associations, out_costs, workvars)
timed_end = time()
timed_total += (timed_end-timed_start)
relative_cost += sum(np.exp(-out_costs+out_costs[0]))
my_results.append((timed_total*1000, relative_cost))
my_results = np.array(my_results) / numtests
print(my_results)
| 1,695
| 33.612245
| 78
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/otherimplementations/daGibbs.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
github.com/motrom/fastmurty last mod 5/25/19
This is based on the pseudocode given in "Efficient Implementation..." by
B.N. Vo and B.T. Vo
This code is not necessarily a great implementation, it is primarily intended for
the comparison of Gibbs and deterministic algorithms in the paper.
Notably, the step of removing duplicate associations was not coded optimally.
That step is therefore not included in the paper's timing analysis.
That step could be performed in several ways... a hashtable by likelihood is
probably the fastest way but is difficult to create in Numba.
"""
import numpy as np
import numba as nb
from random import random
sparsedtype = np.dtype([('x', np.float64), ('idx', np.int64)])
nbsparsedtype = nb.from_dtype(sparsedtype)
def sparsify(c, s): # keep s lowest elements for each row
c2 = np.zeros((c.shape[0],s), dtype=sparsedtype)
for i, ci in enumerate(c):
colsinorder = np.argsort(ci)
c2[i]['idx'] = colsinorder[:s]
c2[i]['x'] = ci[colsinorder[:s]]
return c2
@nb.njit(nb.void(nbsparsedtype[:,:], nb.i8[:], nb.i8[:,:], nb.f8[:], nb.i8,
nb.f8[:], nb.b1[:]))
def gibbs(c, x, out, out_costs, niter, costs, occupied):
""" x is the best solution, obtained by sspSparse or some other method """
m = c.shape[0]
costs[:] = 1
occupied[:] = False
for i,j in enumerate(x):
for cij in c[i]:
if cij['idx'] == j:
costs[i] = cij['x']
occupied[j] = True
out[0] = x
out_costs[0] = np.prod(costs)
missprob = 1.
for t in range(1, niter):
for i in range(m):
j1 = x[i]
if j1 != -1:
occupied[j1] = False
costs[i] = 1
rowi = c[i]
probsum = missprob
for cij in rowi:
if not occupied[cij['idx']]:
probsum += cij['x']
choice = random() * probsum
if choice < missprob:
x[i] = -1
else:
probsum = missprob
for cij in rowi:
j = cij['idx']
if not occupied[j]:
probsum += cij['x']
if choice < probsum:
x[i] = j
occupied[j] = True
costs[i] = cij['x']
out[t] = x
out_costs[t] = np.prod(costs)
@nb.njit(nb.f8(nb.i8[:,:], nb.f8[:], nb.i8[:]))
def getTotalCost(out, out_costs, order):
totalcost = out_costs[order[-1]]
lastx = out[order[-1]]
for idx in order[-2::-1]:
x = out[idx]
if not np.all(x == lastx): # check for duplicate associations
totalcost += out_costs[idx]
lastx = x
return totalcost
if __name__ == '__main__':
"""
Determines the runtime and quality of this data association method.
Note that this method really can't handle cases with very unlikely misses.
The input matrices are set to be mostly positive for this reason.
"""
from time import time
from scipy.optimize import linear_sum_assignment
np.random.seed(23)
numtests = 100
m = 100
n = 100
sparsity = 10
niters = np.logspace(1, 4, 4, base=10, dtype=int)
my_results = []
runtimes = np.zeros(len(niters))
costs = np.zeros(len(niters))
relcosts = np.zeros(len(niters))
x = np.zeros(m, dtype=np.int64)
out = np.zeros((100000, m), dtype=int)
out_costs = np.zeros(100000)
costs_struct = np.ones(m, dtype=np.float64)
occupied_struct = np.zeros(n, dtype=np.bool8)
for test in range(numtests):
# random matrix
clog = np.random.rand(m,n) - .05
# # 'geometric' matrix - detecting 1d points with white noise
# pts = np.sort(np.random.rand(m))
# ptnoise = np.random.normal(size=m)*.001
# clog = np.square(pts[:,None] - pts[None,:] - ptnoise[None,:]) - .02
clogs = sparsify(clog, sparsity)
c = clogs.copy()
c[:]['x'] = np.exp(-clogs[:]['x'])
for k, niter in enumerate(niters):
out[:] = 0
solrow, solcol = linear_sum_assignment(clog)
x[:] = -1
matches = clog[solrow,solcol] < 0
x[solrow[matches]] = solcol[matches]
timed_start = time()
gibbs(c, x, out, out_costs, niter, costs_struct, occupied_struct)
timed_end = time()
runtimes[k] += (timed_end-timed_start)
outorder = np.argsort(out_costs[:niter])
cost = getTotalCost(out, out_costs, outorder)
costs[k] += np.log(cost)
relcosts[k] += cost / out_costs[0]
assert outorder[-1] == 0
runtimes *= 1000 / numtests
costs /= numtests
relcosts /= numtests
print(list(zip(niters, relcosts, runtimes)))
| 4,944
| 33.340278
| 81
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/otherimplementations/slowmurty.py
|
# -*- coding: utf-8 -*-
"""
github.com/motrom/fastmurty last modified 5/17/19
a simple but inefficient implementation of HOMHT data association
used for testing the validity of the main code
very slow - don't use on anything bigger than 50x50!
Also, this code doesn't correctly handle the corner cases of empty input matrices
or all-miss associations.
"""
from scipy.optimize import linear_sum_assignment
from heapq import heappush, heappop
from itertools import chain
inf = 1e8
def da(c):
miss = c>=0
c = c.copy()
c[miss] = 0
solrow, solcol = linear_sum_assignment(c)
matches = miss[solrow, solcol] == False
solrow = solrow[matches]
solcol = solcol[matches]
cost = sum(c[solrow, solcol])
assocs = chain(zip(solrow, solcol),
((row,-1) for row in range(c.shape[0]) if row not in solrow),
((-1,col) for col in range(c.shape[1]) if col not in solcol))
return cost, assocs
def mhtda(c, row_priors, row_prior_weights, col_priors, col_prior_weights,
out_assocs, out_costs, workvars=None):
orig_c = c
Q = []
out_assocs[:] = -2
out_costs[:] = inf
for row_set, row_set_weight in zip(row_priors, row_prior_weights):
for col_set, col_set_weight in zip(col_priors, col_prior_weights):
row_set = [row for row in range(orig_c.shape[0]) if row_set[row]]
col_set = [col for col in range(orig_c.shape[1]) if col_set[col]]
priorcost = row_set_weight + col_set_weight
c = orig_c[row_set,:][:,col_set].copy()
cost, assocs = da(c)
assocs = tuple((row_set[row] if row>=0 else -1,
col_set[col] if col>=0 else -1) for row,col in assocs)
cost += priorcost
heappush(Q, (cost, priorcost, (), assocs, row_set, col_set, []))
for solution in range(out_assocs.shape[0]):
cost, priorcost, fixed_assocs, orig_assocs, row_set, col_set,\
eliminate = heappop(Q)
solution_assocs = sorted(fixed_assocs + orig_assocs)
out_assocs[solution, :len(solution_assocs)] = solution_assocs
out_costs[solution] = cost
# murty's algorithm
for thisrow, thiscol in orig_assocs:
###if thisrow == -1: continue
# create altered version of the assignment problem
c = orig_c.copy()
thispriorcost = priorcost
eliminate.append((thisrow,thiscol))
for eliminaterow, eliminatecol in eliminate:
if eliminaterow == -1:
c[:,eliminatecol] -= inf
thispriorcost += inf
elif eliminatecol == -1:
c[eliminaterow,:] -= inf
thispriorcost += inf
else:
c[eliminaterow,eliminatecol] += inf
c = c[row_set,:][:,col_set]
# solve altered version
cost, assocs = da(c)
assocs = tuple((row_set[row] if row>=0 else -1,
col_set[col] if col>=0 else -1) for row,col in assocs)
cost += thispriorcost
heappush(Q, (cost, thispriorcost, fixed_assocs, assocs,
row_set, col_set, eliminate))
# fix this row and column for succeeding assignment problems
col_set = list(col_set)
row_set = list(row_set)
fixed_assocs = fixed_assocs + ((thisrow, thiscol),)
if thisrow == -1:
col_set.remove(thiscol)
eliminate = [(row,col) for row,col in eliminate if col!=thiscol]
elif thiscol == -1:
row_set.remove(thisrow)
eliminate = [(row,col) for row,col in eliminate if row!=thisrow]
else:
priorcost += orig_c[thisrow, thiscol]
row_set.remove(thisrow)
col_set.remove(thiscol)
eliminate = [(row,col) for row,col in eliminate if
row!=thisrow and col!=thiscol]
| 4,082
| 41.53125
| 81
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/otherimplementations/__init__.py
|
# -*- coding: utf-8 -*-
"""
github.com/motrom/fastmurty last modified 5/17/19
"""
| 83
| 13
| 49
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/sspSparse.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
The Jonker-Volgenant algorithm for finding the maximum assignment.
Michael Motro, University of Texas at Austin
last modified 3/5/2019
the __main__ code at the bottom tests this implementation, comparing it to
Scipy's linear_sum_assignment function. You'll need to have scipy in your
distribution to run this file on its own, but not to import it in other files.
"""
import numpy as np
import numba as nb
from sparsity import nbsparsedtype
inf= 1e9 # inf is a suitably large number
nbheapouttype = nb.typeof((0., 0, 0)) # float, int, int
heapdtype = np.dtype([('key', np.float64), ('i',np.int64), ('j',np.int64)])
nbheapdtype = nb.from_dtype(heapdtype)
@nb.jit(nb.void(nbheapdtype[:], nb.i8, nb.f8, nb.i8, nb.i8), nopython=True)
def heappush(heap, pos, newkey, newi, newj):
while pos > 0:
parentpos = (pos - 1) >> 1
if newkey > heap[parentpos]['key']: break
heap[pos] = heap[parentpos]
pos = parentpos
inputel = heap[pos]
inputel['key'] = newkey
inputel['i'] = newi
inputel['j'] = newj
@nb.jit(nbheapouttype(nbheapdtype[:], nb.i8), nopython=True)
def heappop(heap, heapsize):
minele = heap[0]
minkey = minele['key']
mini = minele['i']
minj = minele['j']
heapsize -= 1
newele = heap[heapsize]
newkey = newele['key']
# Bubble up the smaller child until hitting a leaf.
pos = 0
childpos = 1 # leftmost child position
while childpos < heapsize:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < heapsize:
if heap[childpos]['key'] > heap[rightpos]['key']:
childpos = rightpos
if heap[childpos]['key'] > newkey:
break
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = (pos<<1) + 1
heap[pos] = newele
return minkey, mini, minj
@nb.jit(nb.f8(nbsparsedtype[:,:], nb.i8[:], nb.i8[:], nb.f8[:],
nb.i8[:], nb.i8, nb.i8[:], nb.i8, nbheapdtype[:], nb.i8[:]),
nopython=True)
def SSP(c, x, y, v, rows2use, nrows2use, cols2use, ncols2use, d, pred):
""" solves full 2D assignment problem
c: matrix, sparse
x: column indices that match to row, or -1 if row is missing
y: match indices for column
v: column reductions
rows2use, nrows2use: rows in rows2use[:nrows2use] are considered part of the problem
cols2use, ncols2use: " "
d: workspace var, heap structure to store potential matches (pathcost, i, j)
pred: workspace var, stores whether each column was used and path backward
returns cost of assignment
"""
n = y.shape[0]
C = 0.
y[:] = -1
v[:] = 0.
pred[:] = 0
# initial step
pred[cols2use[:ncols2use]] = -2
freerows = 0
for ri in xrange(nrows2use):
i = rows2use[ri]
minj = -1
minval = 0.
for cij in c[i]:
j = cij['idx']
if pred[j] == -2:
val = cij['x']
if val < minval:
minval = val
minj = j
if minj == -1:
x[i] = -1
rows2use[ri] = rows2use[freerows]
rows2use[freerows] = i
freerows += 1
elif y[minj] == -1:
x[i] = minj
y[minj] = i
C += minval
rows2use[ri] = rows2use[freerows]
rows2use[freerows] = i
freerows += 1
for i1 in rows2use[freerows:nrows2use]:
dsize = 0 # restart heap
# pred doubles as a check for previously visited columns, and a path backwards
pred[cols2use[:ncols2use]] = -2
rowi = c[i1]
for cij in rowi:
j = cij['idx']
dj = cij['x'] - v[j]
if pred[j] == -2:
heappush(d, dsize, dj, i1, j)
dsize += 1
heappush(d, dsize, 0., i1, -1)
dsize += 1
while True:
predj = 0
while predj != -2: # already found and matched this col
assert dsize > 0
minval, i, j = heappop(d, dsize)
dsize -= 1
if j == -1:
break # hit unmatched row
predj = pred[j]
if j == -1: break
pred[j] = i
v[j] += minval # first half of score augmentation
i = y[j]
if i == -1:
break # hit unmatched column
# update distances to other columns
rowi = c[i]
# find this row's reduction
# have to look up the right column
for cij in rowi:
if cij['idx']==j:
u1 = cij['x'] - v[j]
heappush(d, dsize, -u1, i, -1)
dsize += 1
for cij in rowi:
j = cij['idx']
if pred[j] == -2:
dj = cij['x'] - v[j] - u1
heappush(d, dsize, dj, i, j)
dsize += 1
# augment
# travel back through shortest path to find matches
if j==-1:
j = x[i]
x[i] = -1
while i != i1:
i = pred[j]
y[j] = i
k = j
j = x[i]
x[i] = k
# updating of column prices, part 2
for j in xrange(n):
if pred[j] != -2:
v[j] -= minval
# updating total cost
C += minval
return C
@nb.jit(nb.f8(nbsparsedtype[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8, nbheapdtype[:], nb.i8[:], nb.i8, nb.i8,
nb.b1[:], nb.b1, nb.f8), nopython=True)
def spStep(c, x, y, v, rows2use, nrows2use, cols2use, ncols2use, d, pred,
i1, j1, eliminate_els, eliminate_miss, cost_bound):
""" solves Murty subproblem given solution to originating problem
same inputs as SSP and also:
i1, j1 = row and column that are now unassigned
eliminate_els = boolean array, whether matching a column with i1 is prohibited
eliminate_miss = whether i1 is prohibited to miss
cost_bound = function will stop early and return inf if the solution is known
to be above this bound
returns cost of shortest path, a.k.a. this solution's cost minus original solution's
"""
pred[:] = 0
pred[cols2use[:ncols2use]] = -2 # set these as available
dsize = 0
rowi = c[i1]
ui = 0.
for cij in rowi:
if cij['idx'] == j1:
ui = cij['x'] - v[j1]
for cij in rowi:
j = cij['idx']
if pred[j] == -2 and not eliminate_els[j]:
dj = cij['x'] - v[j] - ui
if dj <= cost_bound:
heappush(d, dsize, dj, i1, j)
dsize += 1
if not eliminate_miss:
dj = -ui
if dj <= cost_bound:
heappush(d, dsize, dj, i1, -1)
dsize += 1
minmissi = 0
minmissj = 0
miss_unused = True
missing_from_row = False
missing_cost = 0. # this is a dual cost on auxiliary columns
while True:
missing = False
predj = 0
while predj != -2: # already found and matched this col
if dsize == 0:
return inf
minval, i, j = heappop(d, dsize)
dsize -= 1
if j == -1:
if miss_unused:
minmissi = i
missing = True
missing_from_row = True
break # hit unmatched row
else:
predj = pred[j]
if not missing:
pred[j] = i
v[j] += minval # first half of score augmentation
if j==j1: break
i = y[j]
if i==-1:
# entry to missing zone: col was missing but is now matched
if miss_unused:
minmissj = j
missing = True
missing_from_row = False
else:
# already covered the missing zone, this is a dead end
continue
if missing:
if j1 == -1:
j=-1
break
miss_unused = False
missing_cost = minval
u1 = -minval
# exit from missing zone: row that was missing is matched
for i in rows2use[:nrows2use]:
if x[i]==-1:
rowi = c[i]
for cij in rowi:
j = cij['idx']
if pred[j] == -2:
dj = cij['x']-v[j]-u1
if dj <= cost_bound:
heappush(d, dsize, dj, i, j)
dsize += 1
# exit from missing zone: col that was matched is missing
for j in cols2use[:ncols2use]:
if y[j] >= 0 and pred[j] == -2:
dj = -v[j]-u1
if dj <= cost_bound:
heappush(d, dsize, dj, -1, j)
dsize += 1
else:
rowi = c[i]
for cij in rowi:
# first need to find this row's price
# meaning we have to look up the right column
if cij['idx']==j:
ui = cij['x'] - v[j]
if miss_unused:
dj = -ui
if dj <= cost_bound:
heappush(d, dsize, dj, i, -1)
dsize += 1
for cij in rowi:
j = cij['idx']
if pred[j] == -2:
dj = cij['x'] - v[j] - ui
if dj <= cost_bound:
heappush(d, dsize, dj, i, j)
dsize += 1
# augment
# travel back through shortest path to find matches
i = i1+1 # any number that isn't i1
while i != i1:
if j == -1:
# exit from missing zone: row was missing but is now matched
i = -1
else:
i = pred[j]
y[j] = i
if i == -1:
# exit from missing zone: column j was matched but is now missing
if missing_from_row:
# entry to missing zone: row was matched but is now missing
i = minmissi
j = x[i]
x[i] = -1
else:
# entry to missing zone: col was missing but is now matched
j = minmissj
else:
k = j
j = x[i]
x[i] = k
# updating of column prices
for j in cols2use[:ncols2use]:
if pred[j]!=-2:
v[j] -= minval
if not miss_unused:
v[cols2use[:ncols2use]] += minval - missing_cost
v[y==-1] = 0.
return minval
if __name__ == '__main__':
"""
create a random matrix
try assignment, check for equality
"""
from scipy.optimize import linear_sum_assignment
from sparsity import sparsedtype
m = 10
n = 20
s = 10
# P = np.random.exponential(size=(n,m))
# mX = np.random.exponential(size=(n,))
# mY = np.random.exponential(size=(m,))
P = np.random.rand(m,n)*5
mX = np.random.rand(m)
mY = np.random.rand(n)
# make sparse version
# take s lowest columns from each row
c2 = P - mX[:,None] - mY[None,:]
csp = np.zeros((m,s), dtype=sparsedtype)
for i in xrange(m):
colsinorder = np.argsort(c2[i])
csp[i]['idx'] = colsinorder[:s]
csp[i]['x'] = c2[i, colsinorder[:s]]
# make it sparse on the real matrices too
P[i,colsinorder[s:]] = 100
c2[i,colsinorder[s:]] = .01
# make full square version, use standard code
c1 = np.zeros((m+n,m+n))
c1[:m,:n] = P
c1[:m,n:] = 1e4
c1[range(m),range(n,m+n)] = mX
c1[m:,:n] = 1e4
c1[range(m,m+n), range(n)] = mY
sol = linear_sum_assignment(c1)
x1 = np.array(sol[1][:m])
x1[x1>=n] = -1
y1 = np.arange(n)
for k,j in enumerate(sol[1]):
j = sol[1][k]
if j < n:
if k < m:
y1[j] = k
else:
y1[j] = -1
print(x1)
print(y1)
y = np.zeros(n, dtype=int) - 1
x = np.zeros(m, dtype=int) - 1
v = np.zeros(n)
rows2use = np.arange(m)
cols2use = np.arange(n)
d = np.zeros(m*s+m, dtype=heapdtype)
pred = np.zeros(n, dtype=int)
C = SSP(csp, x, y, v, rows2use, cols2use, d, pred)
print(x)
print(y)
v += mY
u = mX.copy()
xmatch = x>=0
xmis = xmatch==False
ymis = y==-1
u[xmatch] = P[xmatch,x[xmatch]] - v[x[xmatch]]
u2 = np.append(u, np.zeros(n))
v2 = np.append(v, np.zeros(m))
x2 = np.append(x, y+n)
x2[np.where(x==-1)[0]] = np.where(x==-1)[0]+n
x2[np.where(y==-1)[0]+m] = np.where(y==-1)[0]
slack = c1 - u2[:,None] - v2
assert np.min(slack) > -1e-8
assert all(slack[range(m+n), x2] < 1e-8)
assert np.min(v[ymis]) >= -1e-8 if any(ymis) else True
C2 = sum(c2[i,j] for i,j in enumerate(x) if j>=0)
assert np.isclose(C, C2)
| 13,230
| 31.5086
| 88
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/murtysplitLookaheadDense.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/14/19
These functions reorder rows and columns before creating subproblems.
The goal is to set it up so the first subproblem fixes everything
but the first non-missing row.
One row and column is unfixed (w/ match or miss eliminated) every new problem.
"""
import numpy as np
import numba as nb
nbpairtype = nb.typeof((0,0))
# reorder rows so that misses are first
# last row should always remain last, so previous eliminations are kept
# reorder columns so that they are eliminated in order along with the rows
@nb.jit(nbpairtype(nb.f8[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8), nopython=True)
def partitionDefault(c, x, y, v, rows2use, m2, cols2use, n2):
m3 = 0 # number of missing rows
for ri in xrange(m2-1):
i = rows2use[ri]
j = x[i]
if j == -1: # missing row
rows2use[ri] = rows2use[m3]
rows2use[m3] = i
m3 += 1
if x[rows2use[m2-1]] == -1:
m2 -= 1
n3 = 0 # number of missing columns
for cj in xrange(n2):
j = cols2use[cj]
if y[j] == -1:
cols2use[cj] = cols2use[n3]
cols2use[n3] = j
n3 += 1
assert n2-n3==m2-m3 # number of reported matches is the same
cols2use[n3:n2] = x[rows2use[m3:m2]]
# if there are missing columns, must eliminate on all rows
# if no missing columns, can eliminate only matched rows
return (0, n3) if n3 > 0 else (m3, 0)
@nb.jit(nbpairtype(nb.f8[:,:], nb.i8[:], nb.i8[:], nb.f8[:], nb.i8[:],
nb.i8, nb.i8[:], nb.i8, nb.f8[:], nb.i8[:]), nopython=True)
def murtySplit(c, x, y, v, rows2use, m2, cols2use, n2,
row_cost_estimates, row_best_columns):
if m2 <= 2 or n2 <= 1:
return partitionDefault(c, x, y, v, rows2use, m2, cols2use, n2)
# order missing columns at beginning, they will not be removed no matter
# the partition order
n3 = 0 # number of missing columns
for cj in xrange(n2):
j = cols2use[cj]
if y[j] == -1:
cols2use[cj] = cols2use[n3]
cols2use[n3] = j
n3 += 1
n_missing_cols = n3
# set aside row m2-1 and its column
last_column = x[rows2use[m2-1]]
if last_column != -1:
for cj in xrange(n2-1):
j = cols2use[cj]
if j == last_column:
cols2use[cj] = cols2use[n2-1]
cols2use[n2-1] = j
n2 -= 1 # don't use this column in lookahead
m2 -= 1
# determine if all rows will be eliminated or not
n_not_eliminated_rows = 0
if n_missing_cols == 0:
# in this case, you can keep missing rows at the beginning and not fix them
m3 = 0 # number of missing rows
for ri in xrange(m2):
i = rows2use[ri]
j = x[i]
if j == -1: # missing row
rows2use[ri] = rows2use[m3]
rows2use[m3] = i
m3 += 1
assert m3 == m2 - n2
n_not_eliminated_rows = m3
# find estimated cost for row, min(slack[i,j]) for j!=x[i]
# row_cost_estimates = np.zeros(m2-1)
# row_best_columns = np.zeros(m2-1, dtype=np.int64)
for ri in xrange(n_not_eliminated_rows, m2):
i = rows2use[ri]
j = x[i]
ui = 0. if j==-1 else c[i,j] - v[j]
minval = 1e3 if j==-1 else 0. # value of missing
mincj = -1
for cj in xrange(n2):
j2 = cols2use[cj]
if j2 != j:
dj = c[i,j2] - v[j2]
if dj < minval:
minval = dj
mincj = cj
row_cost_estimates[ri] = minval - ui
row_best_columns[ri] = mincj
n3 = n2
for m3 in xrange(m2-1, n_not_eliminated_rows-1, -1):
# choose the *worst* current row and partition on this *last*
# meaning that partition has the fewest fixed rows & the most freedom
worst_ri = np.argmax(row_cost_estimates[n_not_eliminated_rows:m3+1])
worst_ri += n_not_eliminated_rows
worst_i = rows2use[worst_ri]
rows2use[worst_ri] = rows2use[m3]
rows2use[m3] = worst_i
# don't want to pick this row again, can just overwrite it
row_cost_estimates[worst_ri] = row_cost_estimates[m3]
row_best_columns[worst_ri] = row_best_columns[m3]
deadj = x[worst_i]
if deadj != -1:
# swap columns so this particular column matches that row
for cj in xrange(n3):
j = cols2use[cj]
if j == deadj:
deadcol = cj
cols2use[cj] = cols2use[n3-1]
cols2use[n3-1] = deadj
break
n3 -= 1
# update other cost estimates that had picked the same column
for ri in xrange(n_not_eliminated_rows, m3):
if row_best_columns[ri] == deadcol:
# recalculate without deadj
i = rows2use[ri]
j = x[i]
ui = 0. if j==-1 else c[i,j] - v[j]
minval = 1e3 if j==-1 else 0. # value of missing
mincj = -1
# check everything except already used columns
for cj in xrange(n3):
j2 = cols2use[cj]
if j2 != j:
dj = c[i,j2] - v[j2]
if dj < minval:
minval = dj
mincj = cj
row_cost_estimates[ri] = minval - ui
row_best_columns[ri] = mincj
return n_not_eliminated_rows, n_missing_cols
| 5,791
| 37.105263
| 83
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/sparsity.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import numba as nb
inf = 1e9
sparsedtype = np.dtype([('x', np.float64), ('idx', np.int64)])
nbsparsedtype = nb.from_dtype(sparsedtype)
def sparsify(c, s): # keep s lowest elements for each row
c2 = np.zeros((c.shape[0],s), dtype=sparsedtype)
for i, ci in enumerate(c):
colsinorder = np.argsort(ci)
c2[i]['idx'] = colsinorder[:s]
c2[i]['x'] = ci[colsinorder[:s]]
return c2
def unSparsify(c, n):
m = c.shape[0]
c2 = np.zeros((m,n)) + inf
for i in xrange(m):
c2[i, c[i]['idx']] = c[i]['x']
return c2
| 643
| 22.851852
| 62
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/daSparse.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import numba as nb
from sparsity import nbsparsedtype
from sspSparse import SSP, spStep
from sspSparse import heapdtype as sparseheapdtype
from heap import iheappopmin, iheapreplacemax, iheapgetmax, heapdtype
from murtysplitLookaheadSparse import murtySplit
from outputSimple import processOutput
def allocateWorkVarsforDA(m, n, nsols):
""" sols_ store subproblems created by Murty's alg and their solutions """
sols_rows2use = np.empty((nsols+1, m+1), dtype=int)
sols_rows2use[:] = np.arange(m+1)
sols_cols2use = np.empty((nsols+1, n+1), dtype=int)
sols_cols2use[:] = np.arange(n+1)
sols_elim = np.zeros((nsols+1, n+1), dtype=bool)
sols_x = np.zeros((nsols+1,m), dtype=int)
sols_v = np.zeros((nsols+1,n))
# backward_index maintains an index for each observed object-measurement pair
backward_index = np.full((m+1,n+1), -1, dtype=int)
return sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index
@nb.jit(nb.void(nbsparsedtype[:,:], nb.b1[:,:], nb.f8[:], nb.b1[:,:], nb.f8[:],
nb.i8[:,:], nb.b1[:,:], nb.f8[:],
nb.i8[:,:], nb.i8[:,:], nb.b1[:,:], nb.i8[:,:], nb.f8[:,:], nb.i8[:,:]),
nopython=True)
def da(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_matches, out_assocs, out_costs,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index):
"""
c: input matrix
row_sets: nR*M binary matrix, specifies multiple input cases (for instance hypotheses)
row_set_weights: nR float array, the cost of each row set (hypothesis weight)
col_sets, col_set_weights: same for columns
solving data association across different measurement subsets is uncommon
but possible, see "Handling of Multiple Measurement Hypotheses..."
by Kellner & Aeberhard, 2018
out_matches = Nout*2 array of row-column matches that were used in associations
(i,-1) is a "missing" row, (-1,j) is a missing column
out_assocs = K*Nout binary matrix, each row is an association and includes
certain row-column matches
out_costs = K float array, the cost of each output association
sols_ = workspace variables
"""
# ### DEBUG
# ccc = np.zeros((c.shape[0],sols_v.shape[1])) + 1e9
# for i, ci in enumerate(c):
# ccc[i, ci['idx']] = ci['x']
inf = 1e9 # sufficiently high float that will not cause NaNs in arithmetic
m,s = c.shape
n = sols_v.shape[1]
nsols = out_assocs.shape[0]
if m == 0 or nsols == 0 or len(row_sets)==0:
out_costs[:] = inf
return
if n == 0 or len(col_sets)==0:
out_assocs[0,:] = -1
out_costs[0] = 1
out_costs[1:] = inf
return
# reset output
backward_index[:] = -1
out_matches[:] = -1
out_assocs[:] = False
out_matches_n = 0
# create some smaller temporary variables, for SSP and partitioning
y = np.full(n, -1, dtype=np.int64)
orig_y = y.copy()
pred = np.zeros(n, dtype=np.int64)
x = np.full(m, -1, dtype=np.int64)
v = np.zeros(n)
rows2use = np.arange(m)
cols2use = np.arange(n)
eliminate_els = np.zeros(n, dtype=np.bool8)
partition_row_cost = np.zeros(m)
partition_row_col = np.zeros(m, dtype=np.int64)
d = np.zeros(m*s+m, dtype=sparseheapdtype)
# priority queue
Q = np.zeros(nsols, dtype=heapdtype)
for jj in range(nsols):
Q[jj]['key'] = inf
Q[jj]['val'] = jj
Qsize = nsols
# find best solutions for each input hypothesis
cost_bound, solidx = iheapgetmax(Q, Qsize)
for row_set_idx in xrange(len(row_sets)):
row_set = row_sets[row_set_idx]
m2 = 0 # partition sort so included set is first
for i, in_set in enumerate(row_set):
if in_set:
rows2use[m2] = i
m2 += 1
m3=m2
for i, in_set in enumerate(row_set):
if not in_set:
rows2use[m3] = i
m3 += 1
for col_set_idx in xrange(len(col_sets)):
cols2use[:] = np.arange(n)
col_set = col_sets[col_set_idx]
n2 = 0 # partition sort so included set is first
for i, in_set in enumerate(col_set):
if in_set:
cols2use[n2] = i
n2 += 1
n3 = n2
for i, in_set in enumerate(col_set):
if not in_set:
cols2use[n3] = i
n3 += 1
x[rows2use[:m2]] = -1
x[rows2use[m2:]] = -2
y[cols2use[:n2]] = -1
y[cols2use[n2:]] = -2
v[:] = 0.
C = SSP(c, x, y, v, rows2use, m2, cols2use, n2, d, pred)
C += row_set_weights[row_set_idx]
C += col_set_weights[col_set_idx]
if C < cost_bound:
sols_rows2use[solidx,:m2] = rows2use[:m2]
sols_rows2use[solidx,m] = m2
sols_cols2use[solidx,:n2] = cols2use[:n2]
sols_cols2use[solidx,n] = n2
sols_elim[solidx,:] = False
sols_x[solidx] = x
sols_v[solidx] = v
iheapreplacemax(Q, Qsize, C, solidx)
cost_bound, solidx = iheapgetmax(Q, Qsize)
# ###DEBUG
# eps = 1e-8
# assert n2 > 0
# xmatches = np.where(x>=0)[0]
# assert all(y[x[xmatches]] == xmatches)
# # check slack
# u = np.zeros(m)
# u[xmatches] = ccc[xmatches,x[xmatches]]-v[x[xmatches]]
# slack = ccc-u[:,None]-v
# slack = slack[rows2use[:m2],:][:,cols2use[:n2]].copy()
# assert np.all(slack>=-eps)
# assert all(u[rows2use[:m2]] < eps)
# assert all(v[cols2use[:n2]] < eps)
for k in xrange(nsols):
Qsize = nsols-k-1 # current length of queue
# get best solution from queue
C, solidx = iheappopmin(Q, Qsize+1)
if C >= inf: break
orig_x = sols_x[solidx]
n2 = sols_cols2use[solidx,n]
cols2use[:n2] = sols_cols2use[solidx,:n2]
# reconstruct y from x
orig_y[:] = -2
orig_y[cols2use[:n2]] = -1
for i,j in enumerate(orig_x):
if j >= 0:
orig_y[j] = i
# add to output
out_costs[k] = C
out_matches_n = processOutput(out_matches, out_assocs[k], orig_x, orig_y,
backward_index, out_matches_n)
if k == nsols-1:
break
# prep for creating subproblems
m2 = sols_rows2use[solidx,m]
rows2use[:m2] = sols_rows2use[solidx,:m2]
orig_eliminate_els = sols_elim[solidx,:n]
orig_eliminate_miss = sols_elim[solidx,n]
orig_v = sols_v[solidx]
# reorder rows2use and cols2use so that subproblem creation is simple
m3_start, n3 = murtySplit(c, orig_x, orig_y, orig_v,
rows2use, m2, cols2use, n2,
partition_row_cost, partition_row_col, pred)
cost_bound, solidx = iheapgetmax(Q, Qsize)
cost_bound = cost_bound - C
for m3 in xrange(m3_start+1, m2+1):
# subproblem
# reset previous solution
x[:] = orig_x
v[:] = orig_v
y[:] = orig_y
# eliminate the selected match
eliminate_i = rows2use[m3-1]
eliminate_j = x[eliminate_i]
if m3 == m2:
# last elimination, mind eliminated cols from original
eliminate_els[:] = orig_eliminate_els
eliminate_miss = orig_eliminate_miss
else:
# undo elimination from previous problem
eliminate_els[:] = False # maybe can just reset cols2use[n3-1]?
eliminate_miss = False
if eliminate_j >= 0:
n3 += 1 # column n3 is no longer fixed
eliminate_els[eliminate_j] = True
else:
eliminate_miss = True
# solve new problem
# to remove early stopping, replace cost_bound with inf
Cnew = spStep(c, x, y, v, rows2use, m3, cols2use, n3, d, pred,
eliminate_i, eliminate_j, eliminate_els, eliminate_miss,
cost_bound)
#inf)
if Cnew < cost_bound:
# add solution
sols_rows2use[solidx,:m3] = rows2use[:m3]
sols_rows2use[solidx,m] = m3
sols_cols2use[solidx,:n3] = cols2use[:n3]
sols_cols2use[solidx,n] = n3
sols_elim[solidx,:n] = eliminate_els
sols_elim[solidx,n] = eliminate_miss
sols_x[solidx] = x
sols_v[solidx] = v
iheapreplacemax(Q, Qsize, C+Cnew, solidx)
cost_bound, solidx = iheapgetmax(Q, Qsize)
cost_bound = cost_bound - C
# ###DEBUG
# eps = 1e-8
# assert n3 > 0
# xmatches = np.where(x>=0)[0]
# assert all(y[x[xmatches]] == xmatches)
# # check that previous solution was better
# assert Cnew > -eps
# # check slack
# u = np.zeros(m)
# u[xmatches] = ccc[xmatches,x[xmatches]]-v[x[xmatches]]
# slack = ccc-u[:,None]-v
# slack[eliminate_i, eliminate_els] = 0
# slack = slack[rows2use[:m3],:][:,cols2use[:n3]].copy()
# assert np.all(slack>=-eps)
# us_to_debug = rows2use[:m3].copy()
# if eliminate_miss:
# us_to_debug = us_to_debug[us_to_debug!=eliminate_i]
# assert all(u[us_to_debug] < eps)
# assert all(v[cols2use[:n3]] < eps)
if __name__ == '__main__':
from time import time
from sparsity import sparsify
np.random.seed(42)
m = 20
n = 20
s = 20
nout = 100
nsols = 100
n_repeats = 10
totaltime = 0.
row_sets = np.ones((2,m), dtype=bool)
row_sets[1,0] = False
row_set_weights = np.array([2.,0.])
col_sets = np.ones((1,n), dtype=bool)
col_set_weights = np.array([0.])
workvars = allocateWorkVarsforDA(m, n, nsols)
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index = workvars
out_matches = np.zeros((nout, 2), dtype=int)
out_assocs = np.zeros((nsols, nout), dtype=bool)
out_costs = np.zeros(nsols)
for repeat in range(n_repeats):
# uniform
c = np.random.rand(m,n)*5 - 3
c = sparsify(c, s)
# consider missing rows and columns
costs = []
if repeat == 0:
# run once to compile numba
da(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_matches, out_assocs, out_costs,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index)
totaltime -= time()
# actual operation here!
da(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_matches, out_assocs, out_costs,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index)
totaltime += time()
#np.save('msc{:2d}.npy'.format(repeat), costs)
print(totaltime * (1000. / n_repeats)) # average runtime in milliseconds
| 11,809
| 37.594771
| 90
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/outputSimple.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/7/19
"""
import numba as nb
@nb.jit(nb.i8(nb.i8[:,:], nb.b1[:], nb.i8[:], nb.i8[:], nb.i8[:,:], nb.i8))
def processOutput(matches, hypothesis, x, y, backward_index, n_matches):
"""
This one removes matches that are found after the limit has been hit,
without considering the relative importance of each
keeps all hypotheses
"""
for i,j in enumerate(x):
if j == -2: continue
backidx = backward_index[i,j]
if backidx == -1:
if n_matches == matches.shape[0]:
continue
backward_index[i,j] = n_matches
matches[n_matches] = (i,j)
backidx = n_matches
n_matches += 1
hypothesis[backidx] = True
for j,i in enumerate(y):
if i==-1:
backidx = backward_index[-1,j]
if backidx == -1:
if n_matches == matches.shape[0]:
continue
backward_index[-1,j] = n_matches
matches[n_matches] = (i,j)
backidx = n_matches
n_matches += 1
hypothesis[backidx] = True
return n_matches
| 1,199
| 30.578947
| 75
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/heap.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
last mod 3/2/19
inspired by
"A Comparative Analysis of Three Different Priority Deques", Skov and Olsen
and the python 2.7 heapq
"""
import numpy as np
import numba as nb
heapdtype = np.dtype([('key', np.float64), ('val', np.int64)], align=False)
nbheapdtype = nb.from_dtype(heapdtype)
nbheapouttype = nb.typeof((0., 0)) # float, int
heapdtype_doubleidx = np.dtype([('key', np.float64),
('val', [('a',np.int64),('b',np.int64)])])
@nb.jit(nb.void(nbheapdtype[:], nb.i8, nb.f8, nb.i8), nopython=True)
def heappush(heap, pos, newkey, newval):
while pos > 0:
parentpos = (pos - 1) >> 1
if newkey > heap[parentpos]['key']: break
heap[pos] = heap[parentpos]
pos = parentpos
heap[pos]['key'] = newkey
heap[pos]['val'] = newval
@nb.jit(nbheapouttype(nbheapdtype[:], nb.i8), nopython=True)
def heappop(heap, heapsize):
minele = heap[0]
minkey = minele['key']
minval = minele['val']
heapsize -= 1
newele = heap[heapsize]
newkey = newele['key']
# Bubble up the smaller child until hitting a leaf.
pos = 0
childpos = 1 # leftmost child position
while childpos < heapsize:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < heapsize:
if heap[childpos]['key'] > heap[rightpos]['key']:
childpos = rightpos
if heap[childpos]['key'] > newkey:
break
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = (pos<<1) + 1
heap[pos] = newele
return minkey, minval
def heapIsValid(heap, heapsize):
for pos in xrange(heapsize >> 1):
leftpos = (pos<<1) + 1
rightpos = leftpos + 1
if heap[leftpos]['key'] < heap[pos]['key']:
return False
if rightpos < heapsize and heap[rightpos]['key'] < heap[pos]['key']:
return False
return True
@nb.jit(nb.void(nbheapdtype[:], nb.i8, nb.f8, nb.i8), nopython=True)
def iheappush(heap, pos, newkey, newval):
if (pos & 1): # new count will be even, new element part of low-high pair
lo = heap[pos-1]
if newkey < lo['key']:
# switch this pair
heap[pos] = lo
pos -= 1
# move new pair up tree
while pos > 1:
parentpos = (pos - 1) >> 1 & -2 # lo index of parent of lo
if newkey > heap[parentpos]['key']: break
heap[pos] = heap[parentpos]
pos = parentpos
else:
# new element is high part of pair
# move new pair up tree
while pos > 1:
parentpos = (pos - 2) >> 1 | 1 # hi index of parent of hi
if newkey < heap[parentpos]['key']: break
heap[pos] = heap[parentpos]
pos = parentpos
else:
# new count will be odd, this object is alone in new leaf
parentlo = (pos - 1) >> 1 & -2
if newkey < heap[parentlo]['key']:
# new element belongs in the min heap
while pos > 1:
parentpos = (pos - 1) >> 1 & -2
if newkey > heap[parentpos]['key']: break
heap[pos] = heap[parentpos]
pos = parentpos
else:
# might belong in max heap, or either
while pos > 1:
parentpos = (pos - 2) >> 1 | 1
if newkey < heap[parentpos]['key']: break
heap[pos] = heap[parentpos]
pos = parentpos
heap[pos]['key'] = newkey
heap[pos]['val'] = newval
@nb.jit(nbheapouttype(nbheapdtype[:], nb.i8), nopython=True)
def iheappopmin(heap, heapsize):
minele = heap[0]
minkey = minele['key']
minval = minele['val']
heapsize -= 1
newele = heap[heapsize]
newkey = newele['key']
newval = newele['val']
pos = 0
childpos = 2 # leftmost child position, lo index
while childpos < heapsize:
childkey = heap[childpos]['key']
# Set childpos to index of smaller child.
rightpos = childpos + 2
if rightpos < heapsize:
rightkey = heap[rightpos]['key']
if rightkey < childkey:
# branch to the right instead of the left
childpos = rightpos
childkey = rightkey
if newkey < childkey:
break # the new element is correctly positioned at pos
# Move the smaller child up.
heap[pos] = heap[childpos]
# swap low and high if needed
hichild = heap[childpos+1]
if newkey > hichild['key']:
newkey2 = hichild['key']
newval2 = hichild['val']
hichild['key'] = newkey
hichild['val'] = newval
# shifting the low index
newkey = newkey2
newval = newval2
pos = childpos
childpos = (pos<<1) + 2
heap[pos]['key'] = newkey
heap[pos]['val'] = newval
return minkey, minval
@nb.jit(nb.void(nbheapdtype[:], nb.i8, nb.f8, nb.i8), nopython=True)
def iheapreplacemax(heap, heapsize, newkey, newval):
pos = 1
childpos = 3 # leftmost child position, lo index
lochild = heap[0]
if newkey < lochild['key']: # new element is smallest in heap
newkey2 = lochild['key']
newval2 = lochild['val']
lochild['key'] = newkey
lochild['val'] = newval
# still shifting the high index
newkey = newkey2
newval = newval2
size_limit = (heapsize-2)&-4
while childpos < size_limit:
childkey = heap[childpos]['key']
# Set childpos to index of larger child.
rightpos = childpos + 2
rightkey = heap[rightpos]['key']
if rightkey > childkey:
# branch to the right instead of the left
childpos = rightpos
childkey = rightkey
if newkey > childkey:
break # the new element is correctly positioned at pos
# Move the smaller child up.
heap[pos] = heap[childpos]
# swap low and high if needed
lochild = heap[childpos-1]
if newkey < lochild['key']:
newkey2 = lochild['key']
newval2 = lochild['val']
lochild['key'] = newkey
lochild['val'] = newval
# still shifting the high index
newkey = newkey2
newval = newval2
pos = childpos
childpos = (pos<<1) + 1
# address corner case at end of heap
cornerstate = heapsize-childpos
if cornerstate >= 0:
# if cornerstate == 0, only lo index of left child is present
leftpos = childpos-1 if cornerstate==0 else childpos
if cornerstate == 2: # lo index of right child present
rightpos = childpos+1
if heap[rightpos]['key'] > heap[leftpos]['key']:
childpos = rightpos
else:
childpos = leftpos
else:
childpos = leftpos
if newkey < heap[childpos]['key']: # move down one more
heap[pos] = heap[childpos]
pos = childpos
# check for swap
if pos&1: # picked high index of left child
lochild = heap[pos-1]
if newkey < lochild['key']:
newkey2 = lochild['key']
newval2 = lochild['val']
lochild['key'] = newkey
lochild['val'] = newval
newkey = newkey2
newval = newval2
heap[pos]['key'] = newkey
heap[pos]['val'] = newval
@nb.jit(nbheapouttype(nbheapdtype[:], nb.i8), nopython=True)
def iheapgetmax(heap, heapsize):
max_index = 0 if heapsize==1 else 1
return heap[max_index]['key'], heap[max_index]['val']
def iheapIsValid(heap, heapsize):
for pos in xrange(0, heapsize-1, 2):
if heap[pos+1]['key'] < heap[pos]['key']:
return False
for pos in xrange(0, (heapsize-1) >> 1, 2):
leftpos = (pos<<1) + 2
rightpos = leftpos + 2
if heap[leftpos]['key'] < heap[pos]['key']:
return False
if rightpos < heapsize and heap[rightpos]['key'] < heap[pos]['key']:
return False
for pos in xrange(1, heapsize>>1, 2):
leftpos = (pos<<1) + 1
rightpos = leftpos + 2
if heap[leftpos]['key'] > heap[pos]['key']:
return False
if rightpos < heapsize and heap[rightpos]['key'] > heap[pos]['key']:
return False
if heapsize&1 and heapsize>1: # heap size is odd, one unpaired lo index
if heap[(heapsize-2)>>1|1]['key'] < heap[heapsize-1]['key']:
return False
return True
if __name__ == '__main__':
"""
Test using random pushes and pulls
"""
import heapq
np.random.seed(435)
testsize = 40
infloats = np.random.rand(testsize)
inints = np.random.choice(range(testsize), size=testsize, replace=False)
pushorpull = np.random.rand(testsize) > .25
# test normal heap
heap1 = []
heap2 = np.zeros(testsize, dtype=heapdtype)
heap2size = 0
for test in range(testsize):
if pushorpull[test] or len(heap1) == 0:
heapq.heappush(heap1, (infloats[test], inints[test]))
heappush(heap2, heap2size, infloats[test], inints[test])
heap2size += 1
else:
out1 = heapq.heappop(heap1)
out2 = heappop(heap2, heap2size)
heap2size -= 1
assert out1[0] == out2[0]
assert out1[1] == out2[1]
for j in xrange(len(heap1)):
assert heap1[j][0] == heap2[j]['key']
assert heap1[j][1] == heap2[j]['val']
assert heapIsValid(heap2, heap2size)
# # test interval heap
# topk = 9
# heap1 = []
# heap2 = np.zeros(testsize, dtype=heapdtype)
# heap2size = 0
# for test in range(testsize):
# if pushorpull[test]:
# # push
# if len(heap1) == topk:
# topkval = max(heap1)
# if topkval[0] > infloats[test]:
# h1 = heap1.index(topkval)
# heap1[h1] = (infloats[test], inints[test])
# heapq.heapify(heap1)
# else:
# heapq.heappush(heap1, (infloats[test], inints[test]))
# if heap2size == topk:
# topkval = heap2[1]['key']
# if topkval > infloats[test]:
# iheapreplacemax(heap2, heap2size, infloats[test], inints[test])
# else:
# iheappush(heap2, heap2size, infloats[test], inints[test])
# heap2size += 1
# else:
# if len(heap1) == 0:
# continue
# out1 = heapq.heappop(heap1)
# out2 = iheappopmin(heap2, heap2size)
# heap2size -= 1
# assert out1[0] == out2[0]
# assert out1[1] == out2[1]
# assert iheapIsValid(heap2, heap2size)
# assert len(heap1) == heap2size
# if len(heap1) > 0:
# match1 = np.array(sorted(heap1))
# match2 = np.sort(heap2[:heap2size])
# assert np.all(match1[:,0]==match2[:]['key'])
# assert np.all(match1[:,1]==match2[:]['val'])
# assert heap1[0][0] == heap2[0][0]
# assert heap1[0][1] == heap2[0][1]
# if len(heap1) > 1:
# topkval = max(heap1)
# assert topkval[0] == heap2[1][0]
# assert topkval[1] == heap2[1][1]
| 11,629
| 34.895062
| 84
|
py
|
pmb-nll
|
pmb-nll-main/src/core/fastmurty/previous python implementation/daDense.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import numba as nb
from sspDense import SSP, spStep
from heap import iheappopmin, iheapreplacemax, iheapgetmax, heapdtype
from murtysplitLookaheadDense import murtySplit
from outputSimple import processOutput
def allocateWorkVarsforDA(m, n, nsols):
# following specify reduced problems, input to JV functions
# keep a fixed bank of nsols problems, will replace unused problems as needed
sols_rows2use = np.empty((nsols+1, m+1), dtype=int)
sols_rows2use[:] = np.arange(m+1)
sols_cols2use = np.empty((nsols+1, n+1), dtype=int)
sols_cols2use[:] = np.arange(n+1)
sols_elim = np.zeros((nsols+1, n+1), dtype=bool)
sols_x = np.zeros((nsols+1,m), dtype=int)
sols_v = np.zeros((nsols+1,n))
backward_index = np.full((m+1,n+1), -1, dtype=int)
return sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index
@nb.jit(nb.void(nb.f8[:,:], nb.b1[:,:], nb.f8[:], nb.b1[:,:], nb.f8[:],
nb.i8[:,:], nb.b1[:,:], nb.f8[:],
nb.i8[:,:], nb.i8[:,:], nb.b1[:,:], nb.i8[:,:], nb.f8[:,:], nb.i8[:,:]),
nopython=True)
def da(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_matches, out_assocs, out_costs,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index):
"""
c: input matrix
row_sets: nR*M binary matrix, specifies multiple input cases (for instance hypotheses)
row_set_weights: nR float array, the cost of each row set (hypothesis weight)
col_sets, col_set_weights: same for columns
solving data association across different measurement subsets is uncommon
but possible, see "Handling of Multiple Measurement Hypotheses..."
by Kellner & Aeberhard, 2018
out_matches = Nout*2 array of row-column matches that were used in associations
(i,-1) is a "missing" row, (-1,j) is a missing column
out_assocs = K*Nout binary matrix, each row is an association and includes
certain row-column matches
out_costs = K float array, the cost of each output association
sols_ = workspace variables
"""
inf = 1e9 # sufficiently high float that will not cause NaNs in arithmetic
m,n = c.shape
nsols = out_assocs.shape[0]
if m == 0 or nsols == 0 or len(row_sets)==0:
out_costs[:] = inf
return
if n == 0 or len(col_sets)==0:
out_assocs[0,:] = -1
out_costs[0] = 1
out_costs[1:] = inf
return
# reset output
backward_index[:] = -1
out_matches[:] = -1
out_assocs[:] = False
out_matches_n = 0
# create some smaller temporary variables, for SSP and partitioning
y = np.full(n, -1, dtype=np.int64)
orig_y = y.copy()
d = np.zeros(n)
pred = np.zeros(n, dtype=np.int64)
x = np.full(m, -1, dtype=np.int64)
v = np.zeros(n)
rows2use = np.arange(m)
cols2use = np.arange(n)
eliminate_els = np.zeros(n, dtype=np.bool8)
partition_row_cost = np.zeros(m)
partition_row_col = np.zeros(m, dtype=np.int64)
# priority queue
Q = np.zeros(nsols, dtype=heapdtype)
for jj in range(nsols):
Q[jj]['key'] = inf
Q[jj]['val'] = jj
Qsize = nsols
# find best solutions for each input hypothesis
cost_bound, solidx = iheapgetmax(Q, Qsize)
for row_set_idx in xrange(len(row_sets)):
row_set = row_sets[row_set_idx]
m2 = 0 # partition so included set is first
for i, in_set in enumerate(row_set):
if in_set:
rows2use[m2] = i
m2 += 1
m3=m2
for i, in_set in enumerate(row_set):
if not in_set:
rows2use[m3] = i
m3 += 1
for col_set_idx in xrange(len(col_sets)):
cols2use[:] = np.arange(n)
col_set = col_sets[col_set_idx]
n2 = 0 # partition so included set is first
for i, in_set in enumerate(col_set):
if in_set:
cols2use[n2] = i
n2 += 1
n3 = n2
for i, in_set in enumerate(col_set):
if not in_set:
cols2use[n3] = i
n3 += 1
x[rows2use[:m2]] = -1
x[rows2use[m2:]] = -2
y[cols2use[:n2]] = -1
y[cols2use[n2:]] = -2
v[:] = 0.
C = SSP(c, x, y, v, rows2use, m2, cols2use, n2, d, pred)
C += row_set_weights[row_set_idx]
C += col_set_weights[col_set_idx]
if C < cost_bound:
sols_rows2use[solidx,:m2] = rows2use[:m2]
sols_rows2use[solidx,m] = m2
sols_cols2use[solidx,:n2] = cols2use[:n2]
sols_cols2use[solidx,n] = n2
sols_elim[solidx,:] = False
sols_x[solidx] = x
sols_v[solidx] = v
iheapreplacemax(Q, Qsize, C, solidx)
cost_bound, solidx = iheapgetmax(Q, Qsize)
for k in xrange(nsols):
Qsize = nsols-k-1 # current length of queue
# get best solution from queue
C, solidx = iheappopmin(Q, Qsize+1)
if C >= inf: break
orig_x = sols_x[solidx]
n2 = sols_cols2use[solidx,n]
cols2use[:n2] = sols_cols2use[solidx,:n2]
# reconstruct y from x
orig_y[:] = -2
orig_y[cols2use[:n2]] = -1
for i,j in enumerate(orig_x):
if j >= 0:
orig_y[j] = i
# add to output
out_costs[k] = C
out_matches_n = processOutput(out_matches, out_assocs[k], orig_x, orig_y,
backward_index, out_matches_n)
if k == nsols-1:
break
# prep for creating subproblems
m2 = sols_rows2use[solidx,m]
rows2use[:m2] = sols_rows2use[solidx,:m2]
orig_eliminate_els = sols_elim[solidx,:n]
orig_eliminate_miss = sols_elim[solidx,n]
orig_v = sols_v[solidx]
# reorder rows2use and cols2use so that subproblem creation is simple
m3_start, n3 = murtySplit(c, orig_x, orig_y, orig_v,
rows2use, m2, cols2use, n2,
partition_row_cost, partition_row_col)
cost_bound, solidx = iheapgetmax(Q, Qsize)
cost_bound = cost_bound - C
for m3 in xrange(m3_start+1, m2+1):
# subproblem
# reset previous solution
x[:] = orig_x
v[:] = orig_v
y[:] = orig_y
# eliminate the selected match
eliminate_i = rows2use[m3-1]
eliminate_j = x[eliminate_i]
if m3 == m2:
# last elimination, mind eliminated cols from original
eliminate_els[:] = orig_eliminate_els
eliminate_miss = orig_eliminate_miss
else:
# undo elimination from previous problem
eliminate_els[:] = False # should just be able to reset cols2use[n3]
eliminate_miss = False
if eliminate_j >= 0:
n3 += 1 # column n3 is no longer fixed
eliminate_els[eliminate_j] = True
else:
eliminate_miss = True
# solve new problem
# to remove early stopping, replace cost_bound with inf
Cnew = spStep(c, x, y, v, rows2use, m3, cols2use, n3, d, pred,
eliminate_i, eliminate_j, eliminate_els, eliminate_miss,
cost_bound)
#inf)
if Cnew < cost_bound:
# add solution
sols_rows2use[solidx,:m3] = rows2use[:m3]
sols_rows2use[solidx,m] = m3
sols_cols2use[solidx,:n3] = cols2use[:n3]
sols_cols2use[solidx,n] = n3
sols_elim[solidx,:n] = eliminate_els
sols_elim[solidx,n] = eliminate_miss
sols_x[solidx] = x
sols_v[solidx] = v
iheapreplacemax(Q, Qsize, C+Cnew, solidx)
cost_bound, solidx = iheapgetmax(Q, Qsize)
cost_bound = cost_bound - C
if __name__ == '__main__':
from time import time
np.random.seed(42)
m = 20
n = 20
nout = 100
nsols = 100
n_repeats = 30
totaltime = 0.
row_sets = np.ones((2,m), dtype=bool)
row_sets[1,0] = False
row_set_weights = np.array([2.,0.])
col_sets = np.ones((1,n), dtype=bool)
col_set_weights = np.array([0.])
workvars = allocateWorkVarsforDA(m, n, nsols)
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index = workvars
out_matches = np.zeros((nout, 2), dtype=int)
out_assocs = np.zeros((nsols, nout), dtype=bool)
out_costs = np.zeros(nsols)
for repeat in range(n_repeats):
# uniform
c = np.random.rand(m,n)*5 - 3
# consider missing rows and columns
costs = []
if repeat == 0:
# run once to compile numba
da(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_matches, out_assocs, out_costs,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index)
totaltime -= time()
# actual operation here!
da(c, row_sets, row_set_weights, col_sets, col_set_weights,
out_matches, out_assocs, out_costs,
sols_rows2use, sols_cols2use, sols_elim, sols_x, sols_v, backward_index)
totaltime += time()
print(totaltime * (1000. / n_repeats)) # average runtime in milliseconds
| 9,810
| 37.324219
| 90
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.