+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..8b63431c4557b8888bff47448a9dbdffd67be46c
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/RECORD
@@ -0,0 +1,80 @@
+pyasn1-0.4.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pyasn1-0.4.8.dist-info/LICENSE.rst,sha256=IsXMaSKrXWn7oy2MXuTN0UmBUIy1OvwOvYVZOEf9laU,1334
+pyasn1-0.4.8.dist-info/METADATA,sha256=Mx_DbLo2GA_t9nOIsqu-18vjHdTjMR1LtUzdcfLzE0Y,1521
+pyasn1-0.4.8.dist-info/RECORD,,
+pyasn1-0.4.8.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pyasn1-0.4.8.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+pyasn1-0.4.8.dist-info/top_level.txt,sha256=dnNEQt3nIDIO5mSCCOB5obQHrjDOUsRycdBujc2vrWE,7
+pyasn1-0.4.8.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+pyasn1/__init__.py,sha256=1Rn8wrJioqfDz7ORFwMehoT15xHOVeiiQD5pZW37D8s,175
+pyasn1/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/__pycache__/debug.cpython-38.pyc,,
+pyasn1/__pycache__/error.cpython-38.pyc,,
+pyasn1/codec/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/codec/ber/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/ber/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/codec/ber/__pycache__/decoder.cpython-38.pyc,,
+pyasn1/codec/ber/__pycache__/encoder.cpython-38.pyc,,
+pyasn1/codec/ber/__pycache__/eoo.cpython-38.pyc,,
+pyasn1/codec/ber/decoder.py,sha256=7-WINr38zVEa3KUkmshh8FjK6QnFaA8Y7j7XaTgYfRk,59708
+pyasn1/codec/ber/encoder.py,sha256=xHl01PCIAiHZXev4x01sjbCgAUKcsTT6SzaLI3nt-9E,27741
+pyasn1/codec/ber/eoo.py,sha256=eZ6lEyHdayMcMmNqtceDIyzf7u5lOeZoRK-WEUxVThI,626
+pyasn1/codec/cer/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/cer/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/codec/cer/__pycache__/decoder.cpython-38.pyc,,
+pyasn1/codec/cer/__pycache__/encoder.cpython-38.pyc,,
+pyasn1/codec/cer/decoder.py,sha256=ZYBqtDGNiYmKDpKDvioMDf-TYVWoJeZY3I8TEAKuk5s,3745
+pyasn1/codec/cer/encoder.py,sha256=PGtzcIelIHj5d5Yqc5FATMEIWCJybQYFlCaK1gy-NIA,9409
+pyasn1/codec/der/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/der/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/codec/der/__pycache__/decoder.cpython-38.pyc,,
+pyasn1/codec/der/__pycache__/encoder.cpython-38.pyc,,
+pyasn1/codec/der/decoder.py,sha256=kinXcogMDPGlR3f7hmAxRv2YbQyeP-UhuKM0r8gkbeA,2722
+pyasn1/codec/der/encoder.py,sha256=ZfRRxSCefQyLg0DLNb4zllaYf5_AWGIv3SPzB83Ln2I,3073
+pyasn1/codec/native/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/native/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/codec/native/__pycache__/decoder.cpython-38.pyc,,
+pyasn1/codec/native/__pycache__/encoder.cpython-38.pyc,,
+pyasn1/codec/native/decoder.py,sha256=4Q29tdKyytK3Oz-m94MSWxxPi_GhcBKvUfvPNKQcL0Y,7671
+pyasn1/codec/native/encoder.py,sha256=0eMLWR49dwMA1X4si0XswR1kX1aDAWyCeUNTpEbChag,8002
+pyasn1/compat/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/compat/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/compat/__pycache__/binary.cpython-38.pyc,,
+pyasn1/compat/__pycache__/calling.cpython-38.pyc,,
+pyasn1/compat/__pycache__/dateandtime.cpython-38.pyc,,
+pyasn1/compat/__pycache__/integer.cpython-38.pyc,,
+pyasn1/compat/__pycache__/octets.cpython-38.pyc,,
+pyasn1/compat/__pycache__/string.cpython-38.pyc,,
+pyasn1/compat/binary.py,sha256=mgWqHmr_SMEdB2WVVr6jyYMnodSbPP6IByE5qKccWLM,698
+pyasn1/compat/calling.py,sha256=uTk3nJtGrElqJi8t34SoO8-eWFBG0gwNhXrlo1YmFEE,379
+pyasn1/compat/dateandtime.py,sha256=zHvXXBp4t3XJ6teg_tz6qgNDevzd93qnrLoEbNxZQ_E,482
+pyasn1/compat/integer.py,sha256=k6tqyxXMC0zJoU-Rz4oUPPoUpTmWXE6Prnzu0tkmmks,2988
+pyasn1/compat/octets.py,sha256=ICe-DVLBIOHmNSz-sp3ioMh--smodJ4VW3Ju0ogJMWA,1359
+pyasn1/compat/string.py,sha256=exqXJmPM6vYj4MjzsjciQdpUcJprRdgrLma8I4UcYHA,505
+pyasn1/debug.py,sha256=HWGbLlEPLoCNyHqBd1Vd_KK91TppEn3CA4YgUxktT2k,3726
+pyasn1/error.py,sha256=DIn2FWY3ACYNbk_42b3ny2bevkehpK2lOqfAsfdkvBE,2257
+pyasn1/type/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/type/__pycache__/__init__.cpython-38.pyc,,
+pyasn1/type/__pycache__/base.cpython-38.pyc,,
+pyasn1/type/__pycache__/char.cpython-38.pyc,,
+pyasn1/type/__pycache__/constraint.cpython-38.pyc,,
+pyasn1/type/__pycache__/error.cpython-38.pyc,,
+pyasn1/type/__pycache__/namedtype.cpython-38.pyc,,
+pyasn1/type/__pycache__/namedval.cpython-38.pyc,,
+pyasn1/type/__pycache__/opentype.cpython-38.pyc,,
+pyasn1/type/__pycache__/tag.cpython-38.pyc,,
+pyasn1/type/__pycache__/tagmap.cpython-38.pyc,,
+pyasn1/type/__pycache__/univ.cpython-38.pyc,,
+pyasn1/type/__pycache__/useful.cpython-38.pyc,,
+pyasn1/type/base.py,sha256=TX7qdOX3EPiY7-11MY4fwK2Hy6nQsrdQ_M41aUcApno,22386
+pyasn1/type/char.py,sha256=5HH8r1IqZMDCsfDlQHVCRphLlFuZ93bE2NW78CgeUTI,11397
+pyasn1/type/constraint.py,sha256=0Qsth_0JctnDMvOSe5R-vd9IosgjqkKZT_X9lBRXtuI,22132
+pyasn1/type/error.py,sha256=4_BHdjX-AL5WMTpU-tX1Nfo_P88c2z1sDvqPU-S9Bns,246
+pyasn1/type/namedtype.py,sha256=VIL3H3oPgA0zNrDSeAhKmi4CZGTb69uDBVNJzzRk3wM,16368
+pyasn1/type/namedval.py,sha256=dXYWiVTihvBy4RiebGY3AlIXsJvW78mJ1L7JSw-H7Qw,4886
+pyasn1/type/opentype.py,sha256=pUpnPqv8o4AFeIsmGHDTFfuxXAq7FvG3hrTEnoAgBO8,2848
+pyasn1/type/tag.py,sha256=nAK54C0_F_DL4_IaWRthIfIYBOTuXZoVVcbcbqgZiVA,9486
+pyasn1/type/tagmap.py,sha256=2bwm0hqxG2gvXYheOI_iasfl2Z_B93qU7y39EHteUvs,2998
+pyasn1/type/univ.py,sha256=FXc_VOStZfC-xIVTznpFO0qTq1aO4XyJFU0ayQWgPMY,108921
+pyasn1/type/useful.py,sha256=r_K6UhgcrJ0ej658X-s9522I9T7oYVdmEKcbXTkZMds,5368
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..8b701e93c23159bc1f4145f779049ce0a6a6cf77
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..38fe4145754bf81c4dea2535da2bd438975e7da5
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/zip-safe b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/zip-safe
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1-0.4.8.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/__about__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/__about__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdaa499392c7eb51b7dd914ea08657d2deded05f
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/__about__.py
@@ -0,0 +1,38 @@
+import time
+
+_this_year = time.strftime("%Y")
+__version__ = "1.6.4"
+__author__ = "William Falcon et al."
+__author_email__ = "waf2107@columbia.edu"
+__license__ = "Apache-2.0"
+__copyright__ = f"Copyright (c) 2018-{_this_year}, {__author__}."
+__homepage__ = "https://github.com/PyTorchLightning/pytorch-lightning"
+__docs_url__ = "https://pytorch-lightning.readthedocs.io/en/stable/"
+# this has to be simple string, see: https://github.com/pypa/twine/issues/522
+__docs__ = (
+ "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers."
+ " Scale your models. Write less boilerplate."
+)
+__long_docs__ = """
+Lightning is a way to organize your PyTorch code to decouple the science code from the engineering.
+ It's more of a style-guide than a framework.
+
+In Lightning, you organize your code into 3 distinct categories:
+
+1. Research code (goes in the LightningModule).
+2. Engineering code (you delete, and is handled by the Trainer).
+3. Non-essential research code (logging, etc. this goes in Callbacks).
+
+Although your research/production project might start simple, once you add things like GPU AND TPU training,
+ 16-bit precision, etc, you end up spending more time engineering than researching.
+ Lightning automates AND rigorously tests those parts for you.
+
+Overall, Lightning guarantees rigorously tested, correct, modern best practices for the automated parts.
+
+Documentation
+-------------
+- https://pytorch-lightning.readthedocs.io/en/latest
+- https://pytorch-lightning.readthedocs.io/en/stable
+"""
+
+__all__ = ["__author__", "__author_email__", "__copyright__", "__docs__", "__homepage__", "__license__", "__version__"]
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..adc63486d2f8b6bff7a6965d17cd75f31aa7c45a
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/__init__.py
@@ -0,0 +1,38 @@
+"""Root package info."""
+
+import logging
+from typing import Any
+
+from pytorch_lightning.__about__ import * # noqa: F401, F403
+
+_DETAIL = 15 # between logging.INFO and logging.DEBUG, used for logging in production use cases
+
+
+def _detail(self: Any, message: str, *args: Any, **kwargs: Any) -> None:
+ if self.isEnabledFor(_DETAIL):
+ # logger takes its '*args' as 'args'
+ self._log(_DETAIL, message, args, **kwargs)
+
+
+logging.addLevelName(_DETAIL, "DETAIL")
+logging.detail = _detail
+logging.Logger.detail = _detail
+
+_root_logger = logging.getLogger()
+_logger = logging.getLogger(__name__)
+_logger.setLevel(logging.INFO)
+
+# if root logger has handlers, propagate messages up and let root logger process them
+if not _root_logger.hasHandlers():
+ _logger.addHandler(logging.StreamHandler())
+ _logger.propagate = False
+
+from pytorch_lightning.callbacks import Callback # noqa: E402
+from pytorch_lightning.core import LightningDataModule, LightningModule # noqa: E402
+from pytorch_lightning.trainer import Trainer # noqa: E402
+from pytorch_lightning.utilities.seed import seed_everything # noqa: E402
+
+__all__ = ["Trainer", "LightningDataModule", "LightningModule", "Callback", "seed_everything"]
+
+# for compatibility with namespace packages
+__import__("pkg_resources").declare_namespace(__name__)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/py.typed b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/setup_tools.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/setup_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..2df3c7946b4d9cb4e417da289274a9e06113173d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytorch_lightning/setup_tools.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# Copyright The PyTorch Lightning team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import re
+from typing import List
+
+_PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
+
+
+def _load_requirements(
+ path_dir: str, file_name: str = "base.txt", comment_char: str = "#", unfreeze: bool = True
+) -> List[str]:
+ """Load requirements from a file.
+
+ >>> _load_requirements(os.path.join(_PROJECT_ROOT, "requirements")) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+ ['numpy...', 'torch...', ...]
+ """
+ with open(os.path.join(path_dir, file_name)) as file:
+ lines = [ln.strip() for ln in file.readlines()]
+ reqs = []
+ for ln in lines:
+ # filer all comments
+ comment = ""
+ if comment_char in ln:
+ comment = ln[ln.index(comment_char) :]
+ ln = ln[: ln.index(comment_char)]
+ req = ln.strip()
+ # skip directly installed dependencies
+ if not req or req.startswith("http") or "@http" in req:
+ continue
+ # remove version restrictions unless they are strict
+ if unfreeze and "<" in req and "strict" not in comment:
+ req = re.sub(r",? *<=? *[\d\.\*]+", "", req).strip()
+ reqs.append(req)
+ return reqs
+
+
+def _load_readme_description(path_dir: str, homepage: str, version: str) -> str:
+ """Load readme as decribtion.
+
+ >>> _load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
+ '...'
+ """
+ path_readme = os.path.join(path_dir, "README.md")
+ text = open(path_readme, encoding="utf-8").read()
+
+ # drop images from readme
+ text = text.replace("", "")
+
+ # https://github.com/PyTorchLightning/pytorch-lightning/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png
+ github_source_url = os.path.join(homepage, "raw", version)
+ # replace relative repository path to absolute link to the release
+ # do not replace all "docs" as in the readme we reger some other sources with particular path to docs
+ text = text.replace("docs/source/_static/", f"{os.path.join(github_source_url, 'docs/source/_static/')}")
+
+ # readthedocs badge
+ text = text.replace("badge/?version=stable", f"badge/?version={version}")
+ text = text.replace("pytorch-lightning.readthedocs.io/en/stable/", f"pytorch-lightning.readthedocs.io/en/{version}")
+ # codecov badge
+ text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg")
+ # replace github badges for release ones
+ text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}")
+ # Azure...
+ text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}")
+ text = re.sub(r"\?definitionId=\d+&branchName=master", f"?definitionId=2&branchName=refs%2Ftags%2F{version}", text)
+
+ skip_begin = r""
+ skip_end = r""
+ # todo: wrap content as commented description
+ text = re.sub(rf"{skip_begin}.+?{skip_end}", "", text, flags=re.IGNORECASE + re.DOTALL)
+
+ # # https://github.com/Borda/pytorch-lightning/releases/download/1.1.0a6/codecov_badge.png
+ # github_release_url = os.path.join(homepage, "releases", "download", version)
+ # # download badge and replace url with local file
+ # text = _parse_for_badge(text, github_release_url)
+ return text
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytz/reference.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytz/reference.py
new file mode 100644
index 0000000000000000000000000000000000000000..f765ca0af0b24e66dc3b7d51b9bf97e71b2b67aa
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pytz/reference.py
@@ -0,0 +1,140 @@
+'''
+Reference tzinfo implementations from the Python docs.
+Used for testing against as they are only correct for the years
+1987 to 2006. Do not use these for real code.
+'''
+
+from datetime import tzinfo, timedelta, datetime
+from pytz import HOUR, ZERO, UTC
+
+__all__ = [
+ 'FixedOffset',
+ 'LocalTimezone',
+ 'USTimeZone',
+ 'Eastern',
+ 'Central',
+ 'Mountain',
+ 'Pacific',
+ 'UTC'
+]
+
+
+# A class building tzinfo objects for fixed-offset time zones.
+# Note that FixedOffset(0, "UTC") is a different way to build a
+# UTC tzinfo object.
+class FixedOffset(tzinfo):
+ """Fixed offset in minutes east from UTC."""
+
+ def __init__(self, offset, name):
+ self.__offset = timedelta(minutes=offset)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return ZERO
+
+
+import time as _time
+
+STDOFFSET = timedelta(seconds=-_time.timezone)
+if _time.daylight:
+ DSTOFFSET = timedelta(seconds=-_time.altzone)
+else:
+ DSTOFFSET = STDOFFSET
+
+DSTDIFF = DSTOFFSET - STDOFFSET
+
+
+# A class capturing the platform's idea of local time.
+class LocalTimezone(tzinfo):
+
+ def utcoffset(self, dt):
+ if self._isdst(dt):
+ return DSTOFFSET
+ else:
+ return STDOFFSET
+
+ def dst(self, dt):
+ if self._isdst(dt):
+ return DSTDIFF
+ else:
+ return ZERO
+
+ def tzname(self, dt):
+ return _time.tzname[self._isdst(dt)]
+
+ def _isdst(self, dt):
+ tt = (dt.year, dt.month, dt.day,
+ dt.hour, dt.minute, dt.second,
+ dt.weekday(), 0, -1)
+ stamp = _time.mktime(tt)
+ tt = _time.localtime(stamp)
+ return tt.tm_isdst > 0
+
+Local = LocalTimezone()
+
+
+def first_sunday_on_or_after(dt):
+ days_to_go = 6 - dt.weekday()
+ if days_to_go:
+ dt += timedelta(days_to_go)
+ return dt
+
+
+# In the US, DST starts at 2am (standard time) on the first Sunday in April.
+DSTSTART = datetime(1, 4, 1, 2)
+# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
+# which is the first Sunday on or after Oct 25.
+DSTEND = datetime(1, 10, 25, 1)
+
+
+# A complete implementation of current DST rules for major US time zones.
+class USTimeZone(tzinfo):
+
+ def __init__(self, hours, reprname, stdname, dstname):
+ self.stdoffset = timedelta(hours=hours)
+ self.reprname = reprname
+ self.stdname = stdname
+ self.dstname = dstname
+
+ def __repr__(self):
+ return self.reprname
+
+ def tzname(self, dt):
+ if self.dst(dt):
+ return self.dstname
+ else:
+ return self.stdname
+
+ def utcoffset(self, dt):
+ return self.stdoffset + self.dst(dt)
+
+ def dst(self, dt):
+ if dt is None or dt.tzinfo is None:
+ # An exception may be sensible here, in one or both cases.
+ # It depends on how you want to treat them. The default
+ # fromutc() implementation (called by the default astimezone()
+ # implementation) passes a datetime with dt.tzinfo is self.
+ return ZERO
+ assert dt.tzinfo is self
+
+ # Find first Sunday in April & the last in October.
+ start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
+ end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
+
+ # Can't compare naive to aware objects, so strip the timezone from
+ # dt first.
+ if start <= dt.replace(tzinfo=None) < end:
+ return HOUR
+ else:
+ return ZERO
+
+Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
+Central = USTimeZone(-6, "Central", "CST", "CDT")
+Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
+Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f2927f5f8147f137783bb5072794999e04655cfd
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2019, threadpoolctl contributors
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..6015744980a2df34560f836d20cae373b9ea6b94
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/METADATA
@@ -0,0 +1,281 @@
+Metadata-Version: 2.1
+Name: threadpoolctl
+Version: 3.1.0
+Summary: threadpoolctl
+Home-page: https://github.com/joblib/threadpoolctl
+License: BSD-3-Clause
+Author: Thomas Moreau
+Author-email: thomas.moreau.2010@gmail.com
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+# Thread-pool Controls [](https://dev.azure.com/joblib/threadpoolctl/_build/latest?definitionId=1&branchName=master) [](https://codecov.io/gh/joblib/threadpoolctl)
+
+Python helpers to limit the number of threads used in the
+threadpool-backed of common native libraries used for scientific
+computing and data science (e.g. BLAS and OpenMP).
+
+Fine control of the underlying thread-pool size can be useful in
+workloads that involve nested parallelism so as to mitigate
+oversubscription issues.
+
+## Installation
+
+- For users, install the last published version from PyPI:
+
+ ```bash
+ pip install threadpoolctl
+ ```
+
+- For contributors, install from the source repository in developer
+ mode:
+
+ ```bash
+ pip install -r dev-requirements.txt
+ flit install --symlink
+ ```
+
+ then you run the tests with pytest:
+
+ ```bash
+ pytest
+ ```
+
+## Usage
+
+### Command Line Interface
+
+Get a JSON description of thread-pools initialized when importing python
+packages such as numpy or scipy for instance:
+
+```
+python -m threadpoolctl -i numpy scipy.linalg
+[
+ {
+ "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so",
+ "prefix": "libmkl_rt",
+ "user_api": "blas",
+ "internal_api": "mkl",
+ "version": "2019.0.4",
+ "num_threads": 2,
+ "threading_layer": "intel"
+ },
+ {
+ "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so",
+ "prefix": "libiomp",
+ "user_api": "openmp",
+ "internal_api": "openmp",
+ "version": null,
+ "num_threads": 4
+ }
+]
+```
+
+The JSON information is written on STDOUT. If some of the packages are missing,
+a warning message is displayed on STDERR.
+
+### Python Runtime Programmatic Introspection
+
+Introspect the current state of the threadpool-enabled runtime libraries
+that are loaded when importing Python packages:
+
+```python
+>>> from threadpoolctl import threadpool_info
+>>> from pprint import pprint
+>>> pprint(threadpool_info())
+[]
+
+>>> import numpy
+>>> pprint(threadpool_info())
+[{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so',
+ 'internal_api': 'mkl',
+ 'num_threads': 2,
+ 'prefix': 'libmkl_rt',
+ 'threading_layer': 'intel',
+ 'user_api': 'blas',
+ 'version': '2019.0.4'},
+ {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so',
+ 'internal_api': 'openmp',
+ 'num_threads': 4,
+ 'prefix': 'libiomp',
+ 'user_api': 'openmp',
+ 'version': None}]
+
+>>> import xgboost
+>>> pprint(threadpool_info())
+[{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so',
+ 'internal_api': 'mkl',
+ 'num_threads': 2,
+ 'prefix': 'libmkl_rt',
+ 'threading_layer': 'intel',
+ 'user_api': 'blas',
+ 'version': '2019.0.4'},
+ {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so',
+ 'internal_api': 'openmp',
+ 'num_threads': 4,
+ 'prefix': 'libiomp',
+ 'user_api': 'openmp',
+ 'version': None},
+ {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libgomp.so.1.0.0',
+ 'internal_api': 'openmp',
+ 'num_threads': 4,
+ 'prefix': 'libgomp',
+ 'user_api': 'openmp',
+ 'version': None}]
+```
+
+In the above example, `numpy` was installed from the default anaconda channel and comes
+with MKL and its Intel OpenMP (`libiomp5`) implementation while `xgboost` was installed
+from pypi.org and links against GNU OpenMP (`libgomp`) so both OpenMP runtimes are
+loaded in the same Python program.
+
+The state of these libraries is also accessible through the object oriented API:
+
+```python
+>>> from threadpoolctl import ThreadpoolController, threadpool_info
+>>> from pprint import pprint
+>>> import numpy
+>>> controller = ThreadpoolController()
+>>> pprint(controller.info())
+[{'architecture': 'Haswell',
+ 'filepath': '/home/jeremie/miniconda/envs/dev/lib/libopenblasp-r0.3.17.so',
+ 'internal_api': 'openblas',
+ 'num_threads': 4,
+ 'prefix': 'libopenblas',
+ 'threading_layer': 'pthreads',
+ 'user_api': 'blas',
+ 'version': '0.3.17'}]
+
+>>> controller.info() == threadpool_info()
+True
+```
+
+### Setting the Maximum Size of Thread-Pools
+
+Control the number of threads used by the underlying runtime libraries
+in specific sections of your Python program:
+
+```python
+>>> from threadpoolctl import threadpool_limits
+>>> import numpy as np
+
+>>> with threadpool_limits(limits=1, user_api='blas'):
+... # In this block, calls to blas implementation (like openblas or MKL)
+... # will be limited to use only one thread. They can thus be used jointly
+... # with thread-parallelism.
+... a = np.random.randn(1000, 1000)
+... a_squared = a @ a
+```
+
+The threadpools can also be controlled via the object oriented API, which is especially
+useful to avoid searching through all the loaded shared libraries each time. It will
+however not act on libraries loaded after the instantiation of the
+`ThreadpoolController`:
+
+```python
+>>> from threadpoolctl import ThreadpoolController
+>>> import numpy as np
+>>> controller = ThreadpoolController()
+
+>>> with controller.limit(limits=1, user_api='blas'):
+... a = np.random.randn(1000, 1000)
+... a_squared = a @ a
+```
+
+### Restricting the limits to the scope of a function
+
+`threadpool_limits` and `ThreadpoolController` can also be used as decorators to set
+the maximum number of threads used by the supported libraries at a function level. The
+decorators are accessible through their `wrap` method:
+
+```python
+>>> from threadpoolctl import ThreadpoolController, threadpool_limits
+>>> import numpy as np
+>>> controller = ThreadpoolController()
+
+>>> @controller.wrap(limits=1, user_api='blas')
+... # or @threadpool_limits.wrap(limits=1, user_api='blas')
+... def my_func():
+... # Inside this function, calls to blas implementation (like openblas or MKL)
+... # will be limited to use only one thread.
+... a = np.random.randn(1000, 1000)
+... a_squared = a @ a
+...
+```
+
+### Sequential BLAS within OpenMP parallel region
+
+When one wants to have sequential BLAS calls within an OpenMP parallel region, it's
+safer to set `limits="sequential_blas_under_openmp"` since setting `limits=1` and `user_api="blas"` might not lead to the expected behavior in some configurations
+(e.g. OpenBLAS with the OpenMP threading layer
+https://github.com/xianyi/OpenBLAS/issues/2985).
+
+### Known Limitations
+
+- `threadpool_limits` can fail to limit the number of inner threads when nesting
+ parallel loops managed by distinct OpenMP runtime implementations (for instance
+ libgomp from GCC and libomp from clang/llvm or libiomp from ICC).
+
+ See the `test_openmp_nesting` function in [tests/test_threadpoolctl.py](
+ https://github.com/joblib/threadpoolctl/blob/master/tests/test_threadpoolctl.py)
+ for an example. More information can be found at:
+ https://github.com/jeremiedbb/Nested_OpenMP
+
+ Note however that this problem does not happen when `threadpool_limits` is
+ used to limit the number of threads used internally by BLAS calls that are
+ themselves nested under OpenMP parallel loops. `threadpool_limits` works as
+ expected, even if the inner BLAS implementation relies on a distinct OpenMP
+ implementation.
+
+- Using Intel OpenMP (ICC) and LLVM OpenMP (clang) in the same Python program
+ under Linux is known to cause problems. See the following guide for more details
+ and workarounds:
+ https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md
+
+- Setting the maximum number of threads of the OpenMP and BLAS libraries has a global
+ effect and impacts the whole Python process. There is no thread level isolation as
+ these libraries do not offer thread-local APIs to configure the number of threads to
+ use in nested parallel calls.
+
+
+## Maintainers
+
+To make a release:
+
+Bump the version number (`__version__`) in `threadpoolctl.py`.
+
+Build the distribution archives:
+
+```bash
+pip install flit
+flit build
+```
+
+Check the contents of `dist/`.
+
+If everything is fine, make a commit for the release, tag it, push the
+tag to github and then:
+
+```bash
+flit publish
+```
+
+### Credits
+
+The initial dynamic library introspection code was written by @anton-malakhov
+for the smp package available at https://github.com/IntelPython/smp .
+
+threadpoolctl extends this for other operating systems. Contrary to smp,
+threadpoolctl does not attempt to limit the size of Python multiprocessing
+pools (threads or processes) or set operating system-level CPU affinity
+constraints: threadpoolctl only interacts with native libraries via their
+public runtime APIs.
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..ea36bc59b94acda514bbc9f9497203c27d8de43f
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/RECORD
@@ -0,0 +1,8 @@
+__pycache__/threadpoolctl.cpython-38.pyc,,
+threadpoolctl-3.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+threadpoolctl-3.1.0.dist-info/LICENSE,sha256=gaxhkHUkiwblNmC2UtEOSF9GdfXQrg-X6iI3DaH34js,1507
+threadpoolctl-3.1.0.dist-info/METADATA,sha256=0uFBgSmHr-7L63_M0E0eilLjoLUDVgwtOpn8gObeA6o,9204
+threadpoolctl-3.1.0.dist-info/RECORD,,
+threadpoolctl-3.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+threadpoolctl-3.1.0.dist-info/WHEEL,sha256=jPMR_Dzkc4X4icQtmz81lnNY_kAsfog7ry7qoRvYLXw,81
+threadpoolctl.py,sha256=9nah2CGMkXS-jRBBSU8XjLlSvWADeodAktnYb6w4PH0,41112
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..c727d148239a36b0e8de2b97fdb23fb96da78869
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/threadpoolctl-3.1.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.6.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_classes.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_classes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f36463d88198777ad3ffb229273e04e34a24b3c9
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_classes.py
@@ -0,0 +1,51 @@
+import types
+import torch._C
+
+class _ClassNamespace(types.ModuleType):
+ def __init__(self, name):
+ super(_ClassNamespace, self).__init__('torch.classes' + name)
+ self.name = name
+
+ def __getattr__(self, attr):
+ proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
+ if proxy is None:
+ raise RuntimeError(f'Class {self.name}.{attr} not registered!')
+ return proxy
+
+class _Classes(types.ModuleType):
+ __file__ = '_classes.py'
+
+ def __init__(self):
+ super(_Classes, self).__init__('torch.classes')
+
+ def __getattr__(self, name):
+ namespace = _ClassNamespace(name)
+ setattr(self, name, namespace)
+ return namespace
+
+ @property
+ def loaded_libraries(self):
+ return torch.ops.loaded_libraries
+
+ def load_library(self, path):
+ """
+ Loads a shared library from the given path into the current process.
+
+ The library being loaded may run global initialization code to register
+ custom classes with the PyTorch JIT runtime. This allows dynamically
+ loading custom classes. For this, you should compile your class
+ and the static registration code into a shared library object, and then
+ call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
+ shared object.
+
+ After the library is loaded, it is added to the
+ ``torch.classes.loaded_libraries`` attribute, a set that may be inspected
+ for the paths of all libraries loaded using this function.
+
+ Args:
+ path (str): A path to a shared library to load.
+ """
+ torch.ops.load_library(path)
+
+# The classes "namespace"
+classes = _Classes()
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_lobpcg.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_lobpcg.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb7a6723683ab0d2c21d7ba18ec4fbc5c50b8c47
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_lobpcg.py
@@ -0,0 +1,1117 @@
+"""Locally Optimal Block Preconditioned Conjugate Gradient methods.
+"""
+# Author: Pearu Peterson
+# Created: February 2020
+
+from typing import Dict, Tuple, Optional
+
+import torch
+from torch import Tensor
+from . import _linalg_utils as _utils
+from .overrides import has_torch_function, handle_torch_function
+
+
+__all__ = ['lobpcg']
+
+def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U):
+ # compute F, such that F_ij = (d_j - d_i)^{-1} for i != j, F_ii = 0
+ F = D.unsqueeze(-2) - D.unsqueeze(-1)
+ F.diagonal(dim1=-2, dim2=-1).fill_(float('inf'))
+ F.pow_(-1)
+
+ # A.grad = U (D.grad + (U^T U.grad * F)) U^T
+ Ut = U.mT.contiguous()
+ res = torch.matmul(
+ U,
+ torch.matmul(
+ torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F,
+ Ut
+ )
+ )
+
+ return res
+
+
+def _polynomial_coefficients_given_roots(roots):
+ """
+ Given the `roots` of a polynomial, find the polynomial's coefficients.
+
+ If roots = (r_1, ..., r_n), then the method returns
+ coefficients (a_0, a_1, ..., a_n (== 1)) so that
+ p(x) = (x - r_1) * ... * (x - r_n)
+ = x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0
+
+ Note: for better performance requires writing a low-level kernel
+ """
+ poly_order = roots.shape[-1]
+ poly_coeffs_shape = list(roots.shape)
+ # we assume p(x) = x^n + a_{n-1} * x^{n-1} + ... + a_1 * x + a_0,
+ # so poly_coeffs = {a_0, ..., a_n, a_{n+1}(== 1)},
+ # but we insert one extra coefficient to enable better vectorization below
+ poly_coeffs_shape[-1] += 2
+ poly_coeffs = roots.new_zeros(poly_coeffs_shape)
+ poly_coeffs[..., 0] = 1
+ poly_coeffs[..., -1] = 1
+
+ # perform the Horner's rule
+ for i in range(1, poly_order + 1):
+ # note that it is computationally hard to compute backward for this method,
+ # because then given the coefficients it would require finding the roots and/or
+ # calculating the sensitivity based on the Vieta's theorem.
+ # So the code below tries to circumvent the explicit root finding by series
+ # of operations on memory copies imitating the Horner's method.
+ # The memory copies are required to construct nodes in the computational graph
+ # by exploting the explicit (not in-place, separate node for each step)
+ # recursion of the Horner's method.
+ # Needs more memory, O(... * k^2), but with only O(... * k^2) complexity.
+ poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs
+ out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1)
+ out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow(-1, poly_order - i + 1, i + 1)
+ poly_coeffs = poly_coeffs_new
+
+ return poly_coeffs.narrow(-1, 1, poly_order + 1)
+
+
+def _polynomial_value(poly, x, zero_power, transition):
+ """
+ A generic method for computing poly(x) using the Horner's rule.
+
+ Args:
+ poly (Tensor): the (possibly batched) 1D Tensor representing
+ polynomial coefficients such that
+ poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and
+ poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n
+
+ x (Tensor): the value (possible batched) to evalate the polynomial `poly` at.
+
+ zero_power (Tensor): the represenation of `x^0`. It is application-specific.
+
+ transition (Callable): the function that accepts some intermediate result `int_val`,
+ the `x` and a specific polynomial coefficient
+ `poly[..., k]` for some iteration `k`.
+ It basically performs one iteration of the Horner's rule
+ defined as `x * int_val + poly[..., k] * zero_power`.
+ Note that `zero_power` is not a parameter,
+ because the step `+ poly[..., k] * zero_power` depends on `x`,
+ whether it is a vector, a matrix, or something else, so this
+ functionality is delegated to the user.
+ """
+
+ res = zero_power.clone()
+ for k in range(poly.size(-1) - 2, -1, -1):
+ res = transition(res, x, poly[..., k])
+ return res
+
+def _matrix_polynomial_value(poly, x, zero_power=None):
+ """
+ Evaluates `poly(x)` for the (batched) matrix input `x`.
+ Check out `_polynomial_value` function for more details.
+ """
+
+ # matrix-aware Horner's rule iteration
+ def transition(curr_poly_val, x, poly_coeff):
+ res = x.matmul(curr_poly_val)
+ res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1))
+ return res
+
+ if zero_power is None:
+ zero_power = torch.eye(x.size(-1), x.size(-1), dtype=x.dtype, device=x.device) \
+ .view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1))
+
+ return _polynomial_value(poly, x, zero_power, transition)
+
+def _vector_polynomial_value(poly, x, zero_power=None):
+ """
+ Evaluates `poly(x)` for the (batched) vector input `x`.
+ Check out `_polynomial_value` function for more details.
+ """
+
+ # vector-aware Horner's rule iteration
+ def transition(curr_poly_val, x, poly_coeff):
+ res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)
+ return res
+
+ if zero_power is None:
+ zero_power = x.new_ones(1).expand(x.shape)
+
+ return _polynomial_value(poly, x, zero_power, transition)
+
+def _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest):
+ # compute a projection operator onto an orthogonal subspace spanned by the
+ # columns of U defined as (I - UU^T)
+ Ut = U.mT.contiguous()
+ proj_U_ortho = -U.matmul(Ut)
+ proj_U_ortho.diagonal(dim1=-2, dim2=-1).add_(1)
+
+ # compute U_ortho, a basis for the orthogonal complement to the span(U),
+ # by projecting a random [..., m, m - k] matrix onto the subspace spanned
+ # by the columns of U.
+ #
+ # fix generator for determinism
+ gen = torch.Generator(A.device)
+
+ # orthogonal complement to the span(U)
+ U_ortho = proj_U_ortho.matmul(
+ torch.randn(
+ (*A.shape[:-1], A.size(-1) - D.size(-1)),
+ dtype=A.dtype,
+ device=A.device,
+ generator=gen
+ )
+ )
+ U_ortho_t = U_ortho.mT.contiguous()
+
+ # compute the coefficients of the characteristic polynomial of the tensor D.
+ # Note that D is diagonal, so the diagonal elements are exactly the roots
+ # of the characteristic polynomial.
+ chr_poly_D = _polynomial_coefficients_given_roots(D)
+
+ # the code belows finds the explicit solution to the Sylvester equation
+ # U_ortho^T A U_ortho dX - dX D = -U_ortho^T A U
+ # and incorporates it into the whole gradient stored in the `res` variable.
+ #
+ # Equivalent to the following naive implementation:
+ # res = A.new_zeros(A.shape)
+ # p_res = A.new_zeros(*A.shape[:-1], D.size(-1))
+ # for k in range(1, chr_poly_D.size(-1)):
+ # p_res.zero_()
+ # for i in range(0, k):
+ # p_res += (A.matrix_power(k - 1 - i) @ U_grad) * D.pow(i).unsqueeze(-2)
+ # res -= chr_poly_D[k] * (U_ortho @ poly_D_at_A.inverse() @ U_ortho_t @ p_res @ U.t())
+ #
+ # Note that dX is a differential, so the gradient contribution comes from the backward sensitivity
+ # Tr(f(U_grad, D_grad, A, U, D)^T dX) = Tr(g(U_grad, A, U, D)^T dA) for some functions f and g,
+ # and we need to compute g(U_grad, A, U, D)
+ #
+ # The naive implementation is based on the paper
+ # Hu, Qingxi, and Daizhan Cheng.
+ # "The polynomial solution to the Sylvester matrix equation."
+ # Applied mathematics letters 19.9 (2006): 859-864.
+ #
+ # We can modify the computation of `p_res` from above in a more efficient way
+ # p_res = U_grad * (chr_poly_D[1] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k)).unsqueeze(-2)
+ # + A U_grad * (chr_poly_D[2] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k - 1)).unsqueeze(-2)
+ # + ...
+ # + A.matrix_power(k - 1) U_grad * chr_poly_D[k]
+ # Note that this saves us from redundant matrix products with A (elimination of matrix_power)
+ U_grad_projected = U_grad
+ series_acc = U_grad_projected.new_zeros(U_grad_projected.shape)
+ for k in range(1, chr_poly_D.size(-1)):
+ poly_D = _vector_polynomial_value(chr_poly_D[..., k:], D)
+ series_acc += U_grad_projected * poly_D.unsqueeze(-2)
+ U_grad_projected = A.matmul(U_grad_projected)
+
+ # compute chr_poly_D(A) which essentially is:
+ #
+ # chr_poly_D_at_A = A.new_zeros(A.shape)
+ # for k in range(chr_poly_D.size(-1)):
+ # chr_poly_D_at_A += chr_poly_D[k] * A.matrix_power(k)
+ #
+ # Note, however, for better performance we use the Horner's rule
+ chr_poly_D_at_A = _matrix_polynomial_value(chr_poly_D, A)
+
+ # compute the action of `chr_poly_D_at_A` restricted to U_ortho_t
+ chr_poly_D_at_A_to_U_ortho = torch.matmul(
+ U_ortho_t,
+ torch.matmul(
+ chr_poly_D_at_A,
+ U_ortho
+ )
+ )
+ # we need to invert 'chr_poly_D_at_A_to_U_ortho`, for that we compute its
+ # Cholesky decomposition and then use `torch.cholesky_solve` for better stability.
+ # Cholesky decomposition requires the input to be positive-definite.
+ # Note that `chr_poly_D_at_A_to_U_ortho` is positive-definite if
+ # 1. `largest` == False, or
+ # 2. `largest` == True and `k` is even
+ # under the assumption that `A` has distinct eigenvalues.
+ #
+ # check if `chr_poly_D_at_A_to_U_ortho` is positive-definite or negative-definite
+ chr_poly_D_at_A_to_U_ortho_sign = -1 if (largest and (k % 2 == 1)) else +1
+ chr_poly_D_at_A_to_U_ortho_L = torch.linalg.cholesky(
+ chr_poly_D_at_A_to_U_ortho_sign * chr_poly_D_at_A_to_U_ortho
+ )
+
+ # compute the gradient part in span(U)
+ res = _symeig_backward_complete_eigenspace(
+ D_grad, U_grad, A, D, U
+ )
+
+ # incorporate the Sylvester equation solution into the full gradient
+ # it resides in span(U_ortho)
+ res -= U_ortho.matmul(
+ chr_poly_D_at_A_to_U_ortho_sign * torch.cholesky_solve(
+ U_ortho_t.matmul(series_acc),
+ chr_poly_D_at_A_to_U_ortho_L
+ )
+ ).matmul(Ut)
+
+ return res
+
+def _symeig_backward(D_grad, U_grad, A, D, U, largest):
+ # if `U` is square, then the columns of `U` is a complete eigenspace
+ if U.size(-1) == U.size(-2):
+ return _symeig_backward_complete_eigenspace(
+ D_grad, U_grad, A, D, U
+ )
+ else:
+ return _symeig_backward_partial_eigenspace(
+ D_grad, U_grad, A, D, U, largest
+ )
+
+class LOBPCGAutogradFunction(torch.autograd.Function):
+
+ @staticmethod
+ def forward(ctx, # type: ignore[override]
+ A: Tensor,
+ k: Optional[int] = None,
+ B: Optional[Tensor] = None,
+ X: Optional[Tensor] = None,
+ n: Optional[int] = None,
+ iK: Optional[Tensor] = None,
+ niter: Optional[int] = None,
+ tol: Optional[float] = None,
+ largest: Optional[bool] = None,
+ method: Optional[str] = None,
+ tracker: None = None,
+ ortho_iparams: Optional[Dict[str, int]] = None,
+ ortho_fparams: Optional[Dict[str, float]] = None,
+ ortho_bparams: Optional[Dict[str, bool]] = None
+ ) -> Tuple[Tensor, Tensor]:
+
+ # makes sure that input is contiguous for efficiency.
+ # Note: autograd does not support dense gradients for sparse input yet.
+ A = A.contiguous() if (not A.is_sparse) else A
+ if B is not None:
+ B = B.contiguous() if (not B.is_sparse) else B
+
+ D, U = _lobpcg(
+ A, k, B, X,
+ n, iK, niter, tol, largest, method, tracker,
+ ortho_iparams, ortho_fparams, ortho_bparams
+ )
+
+ ctx.save_for_backward(A, B, D, U)
+ ctx.largest = largest
+
+ return D, U
+
+ @staticmethod
+ def backward(ctx, D_grad, U_grad):
+ A_grad = B_grad = None
+ grads = [None] * 14
+
+ A, B, D, U = ctx.saved_tensors
+ largest = ctx.largest
+
+ # lobpcg.backward has some limitations. Checks for unsupported input
+ if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]):
+ raise ValueError(
+ 'lobpcg.backward does not support sparse input yet.'
+ 'Note that lobpcg.forward does though.'
+ )
+ if A.dtype in (torch.complex64, torch.complex128) or \
+ B is not None and B.dtype in (torch.complex64, torch.complex128):
+ raise ValueError(
+ 'lobpcg.backward does not support complex input yet.'
+ 'Note that lobpcg.forward does though.'
+ )
+ if B is not None:
+ raise ValueError(
+ 'lobpcg.backward does not support backward with B != I yet.'
+ )
+
+ if largest is None:
+ largest = True
+
+ # symeig backward
+ if B is None:
+ A_grad = _symeig_backward(
+ D_grad, U_grad, A, D, U, largest
+ )
+
+ # A has index 0
+ grads[0] = A_grad
+ # B has index 2
+ grads[2] = B_grad
+ return tuple(grads)
+
+
+def lobpcg(A: Tensor,
+ k: Optional[int] = None,
+ B: Optional[Tensor] = None,
+ X: Optional[Tensor] = None,
+ n: Optional[int] = None,
+ iK: Optional[Tensor] = None,
+ niter: Optional[int] = None,
+ tol: Optional[float] = None,
+ largest: Optional[bool] = None,
+ method: Optional[str] = None,
+ tracker: None = None,
+ ortho_iparams: Optional[Dict[str, int]] = None,
+ ortho_fparams: Optional[Dict[str, float]] = None,
+ ortho_bparams: Optional[Dict[str, bool]] = None
+ ) -> Tuple[Tensor, Tensor]:
+
+ """Find the k largest (or smallest) eigenvalues and the corresponding
+ eigenvectors of a symmetric positive definite generalized
+ eigenvalue problem using matrix-free LOBPCG methods.
+
+ This function is a front-end to the following LOBPCG algorithms
+ selectable via `method` argument:
+
+ `method="basic"` - the LOBPCG method introduced by Andrew
+ Knyazev, see [Knyazev2001]. A less robust method, may fail when
+ Cholesky is applied to singular input.
+
+ `method="ortho"` - the LOBPCG method with orthogonal basis
+ selection [StathopoulosEtal2002]. A robust method.
+
+ Supported inputs are dense, sparse, and batches of dense matrices.
+
+ .. note:: In general, the basic method spends least time per
+ iteration. However, the robust methods converge much faster and
+ are more stable. So, the usage of the basic method is generally
+ not recommended but there exist cases where the usage of the
+ basic method may be preferred.
+
+ .. warning:: The backward method does not support sparse and complex inputs.
+ It works only when `B` is not provided (i.e. `B == None`).
+ We are actively working on extensions, and the details of
+ the algorithms are going to be published promptly.
+
+ .. warning:: While it is assumed that `A` is symmetric, `A.grad` is not.
+ To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric
+ in first-order optimization routines, prior to running `lobpcg`
+ we do the following symmetrization map: `A -> (A + A.t()) / 2`.
+ The map is performed only when the `A` requires gradients.
+
+ Args:
+
+ A (Tensor): the input tensor of size :math:`(*, m, m)`
+
+ B (Tensor, optional): the input tensor of size :math:`(*, m,
+ m)`. When not specified, `B` is interpereted as
+ identity matrix.
+
+ X (tensor, optional): the input tensor of size :math:`(*, m, n)`
+ where `k <= n <= m`. When specified, it is used as
+ initial approximation of eigenvectors. X must be a
+ dense tensor.
+
+ iK (tensor, optional): the input tensor of size :math:`(*, m,
+ m)`. When specified, it will be used as preconditioner.
+
+ k (integer, optional): the number of requested
+ eigenpairs. Default is the number of :math:`X`
+ columns (when specified) or `1`.
+
+ n (integer, optional): if :math:`X` is not specified then `n`
+ specifies the size of the generated random
+ approximation of eigenvectors. Default value for `n`
+ is `k`. If :math:`X` is specified, the value of `n`
+ (when specified) must be the number of :math:`X`
+ columns.
+
+ tol (float, optional): residual tolerance for stopping
+ criterion. Default is `feps ** 0.5` where `feps` is
+ smallest non-zero floating-point number of the given
+ input tensor `A` data type.
+
+ largest (bool, optional): when True, solve the eigenproblem for
+ the largest eigenvalues. Otherwise, solve the
+ eigenproblem for smallest eigenvalues. Default is
+ `True`.
+
+ method (str, optional): select LOBPCG method. See the
+ description of the function above. Default is
+ "ortho".
+
+ niter (int, optional): maximum number of iterations. When
+ reached, the iteration process is hard-stopped and
+ the current approximation of eigenpairs is returned.
+ For infinite iteration but until convergence criteria
+ is met, use `-1`.
+
+ tracker (callable, optional) : a function for tracing the
+ iteration process. When specified, it is called at
+ each iteration step with LOBPCG instance as an
+ argument. The LOBPCG instance holds the full state of
+ the iteration process in the following attributes:
+
+ `iparams`, `fparams`, `bparams` - dictionaries of
+ integer, float, and boolean valued input
+ parameters, respectively
+
+ `ivars`, `fvars`, `bvars`, `tvars` - dictionaries
+ of integer, float, boolean, and Tensor valued
+ iteration variables, respectively.
+
+ `A`, `B`, `iK` - input Tensor arguments.
+
+ `E`, `X`, `S`, `R` - iteration Tensor variables.
+
+ For instance:
+
+ `ivars["istep"]` - the current iteration step
+ `X` - the current approximation of eigenvectors
+ `E` - the current approximation of eigenvalues
+ `R` - the current residual
+ `ivars["converged_count"]` - the current number of converged eigenpairs
+ `tvars["rerr"]` - the current state of convergence criteria
+
+ Note that when `tracker` stores Tensor objects from
+ the LOBPCG instance, it must make copies of these.
+
+ If `tracker` sets `bvars["force_stop"] = True`, the
+ iteration process will be hard-stopped.
+
+ ortho_iparams, ortho_fparams, ortho_bparams (dict, optional):
+ various parameters to LOBPCG algorithm when using
+ `method="ortho"`.
+
+ Returns:
+
+ E (Tensor): tensor of eigenvalues of size :math:`(*, k)`
+
+ X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)`
+
+ References:
+
+ [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal
+ Preconditioned Eigensolver: Locally Optimal Block Preconditioned
+ Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2),
+ 517-541. (25 pages)
+ https://epubs.siam.org/doi/abs/10.1137/S1064827500366124
+
+ [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng
+ Wu. (2002) A Block Orthogonalization Procedure with Constant
+ Synchronization Requirements. SIAM J. Sci. Comput., 23(6),
+ 2165-2182. (18 pages)
+ https://epubs.siam.org/doi/10.1137/S1064827500370883
+
+ [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming
+ Gu. (2018) A Robust and Efficient Implementation of LOBPCG.
+ SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages)
+ https://epubs.siam.org/doi/abs/10.1137/17M1129830
+
+ """
+
+ if not torch.jit.is_scripting():
+ tensor_ops = (A, B, X, iK)
+ if (not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops)):
+ return handle_torch_function(
+ lobpcg, tensor_ops, A, k=k,
+ B=B, X=X, n=n, iK=iK, niter=niter, tol=tol,
+ largest=largest, method=method, tracker=tracker,
+ ortho_iparams=ortho_iparams,
+ ortho_fparams=ortho_fparams,
+ ortho_bparams=ortho_bparams)
+
+ if not torch._jit_internal.is_scripting():
+ if A.requires_grad or (B is not None and B.requires_grad):
+ # While it is expected that `A` is symmetric,
+ # the `A_grad` might be not. Therefore we perform the trick below,
+ # so that `A_grad` becomes symmetric.
+ # The symmetrization is important for first-order optimization methods,
+ # so that (A - alpha * A_grad) is still a symmetric matrix.
+ # Same holds for `B`.
+ A_sym = (A + A.mT) / 2
+ B_sym = (B + B.mT) / 2 if (B is not None) else None
+
+ return LOBPCGAutogradFunction.apply(
+ A_sym, k, B_sym, X, n, iK, niter, tol, largest,
+ method, tracker, ortho_iparams, ortho_fparams, ortho_bparams
+ )
+ else:
+ if A.requires_grad or (B is not None and B.requires_grad):
+ raise RuntimeError(
+ 'Script and require grads is not supported atm.'
+ 'If you just want to do the forward, use .detach()'
+ 'on A and B before calling into lobpcg'
+ )
+
+ return _lobpcg(
+ A, k, B, X,
+ n, iK, niter, tol, largest, method, tracker,
+ ortho_iparams, ortho_fparams, ortho_bparams
+ )
+
+def _lobpcg(A: Tensor,
+ k: Optional[int] = None,
+ B: Optional[Tensor] = None,
+ X: Optional[Tensor] = None,
+ n: Optional[int] = None,
+ iK: Optional[Tensor] = None,
+ niter: Optional[int] = None,
+ tol: Optional[float] = None,
+ largest: Optional[bool] = None,
+ method: Optional[str] = None,
+ tracker: None = None,
+ ortho_iparams: Optional[Dict[str, int]] = None,
+ ortho_fparams: Optional[Dict[str, float]] = None,
+ ortho_bparams: Optional[Dict[str, bool]] = None
+ ) -> Tuple[Tensor, Tensor]:
+
+ # A must be square:
+ assert A.shape[-2] == A.shape[-1], A.shape
+ if B is not None:
+ # A and B must have the same shapes:
+ assert A.shape == B.shape, (A.shape, B.shape)
+
+ dtype = _utils.get_floating_dtype(A)
+ device = A.device
+ if tol is None:
+ feps = {torch.float32: 1.2e-07,
+ torch.float64: 2.23e-16}[dtype]
+ tol = feps ** 0.5
+
+ m = A.shape[-1]
+ k = (1 if X is None else X.shape[-1]) if k is None else k
+ n = (k if n is None else n) if X is None else X.shape[-1]
+
+ if (m < 3 * n):
+ raise ValueError(
+ 'LPBPCG algorithm is not applicable when the number of A rows (={})'
+ ' is smaller than 3 x the number of requested eigenpairs (={})'
+ .format(m, n))
+
+ method = 'ortho' if method is None else method
+
+ iparams = {
+ 'm': m,
+ 'n': n,
+ 'k': k,
+ 'niter': 1000 if niter is None else niter,
+ }
+
+ fparams = {
+ 'tol': tol,
+ }
+
+ bparams = {
+ 'largest': True if largest is None else largest
+ }
+
+ if method == 'ortho':
+ if ortho_iparams is not None:
+ iparams.update(ortho_iparams)
+ if ortho_fparams is not None:
+ fparams.update(ortho_fparams)
+ if ortho_bparams is not None:
+ bparams.update(ortho_bparams)
+ iparams['ortho_i_max'] = iparams.get('ortho_i_max', 3)
+ iparams['ortho_j_max'] = iparams.get('ortho_j_max', 3)
+ fparams['ortho_tol'] = fparams.get('ortho_tol', tol)
+ fparams['ortho_tol_drop'] = fparams.get('ortho_tol_drop', tol)
+ fparams['ortho_tol_replace'] = fparams.get('ortho_tol_replace', tol)
+ bparams['ortho_use_drop'] = bparams.get('ortho_use_drop', False)
+
+ if not torch.jit.is_scripting():
+ LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[assignment]
+
+ if len(A.shape) > 2:
+ N = int(torch.prod(torch.tensor(A.shape[:-2])))
+ bA = A.reshape((N,) + A.shape[-2:])
+ bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None
+ bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None
+ bE = torch.empty((N, k), dtype=dtype, device=device)
+ bXret = torch.empty((N, m, k), dtype=dtype, device=device)
+
+ for i in range(N):
+ A_ = bA[i]
+ B_ = bB[i] if bB is not None else None
+ X_ = torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i]
+ assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n))
+ iparams['batch_index'] = i
+ worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker)
+ worker.run()
+ bE[i] = worker.E[:k]
+ bXret[i] = worker.X[:, :k]
+
+ if not torch.jit.is_scripting():
+ LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
+
+ return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k))
+
+ X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X
+ assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n))
+
+ worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker)
+
+ worker.run()
+
+ if not torch.jit.is_scripting():
+ LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
+
+ return worker.E[:k], worker.X[:, :k]
+
+
+class LOBPCG(object):
+ """Worker class of LOBPCG methods.
+ """
+
+ def __init__(self,
+ A: Optional[Tensor],
+ B: Optional[Tensor],
+ X: Tensor,
+ iK: Optional[Tensor],
+ iparams: Dict[str, int],
+ fparams: Dict[str, float],
+ bparams: Dict[str, bool],
+ method: str,
+ tracker: None
+ ) -> None:
+
+ # constant parameters
+ self.A = A
+ self.B = B
+ self.iK = iK
+ self.iparams = iparams
+ self.fparams = fparams
+ self.bparams = bparams
+ self.method = method
+ self.tracker = tracker
+ m = iparams['m']
+ n = iparams['n']
+
+ # variable parameters
+ self.X = X
+ self.E = torch.zeros((n, ), dtype=X.dtype, device=X.device)
+ self.R = torch.zeros((m, n), dtype=X.dtype, device=X.device)
+ self.S = torch.zeros((m, 3 * n), dtype=X.dtype, device=X.device)
+ self.tvars: Dict[str, Tensor] = {}
+ self.ivars: Dict[str, int] = {'istep': 0}
+ self.fvars: Dict[str, float] = {'_': 0.0}
+ self.bvars: Dict[str, bool] = {'_': False}
+
+ def __str__(self):
+ lines = ['LOPBCG:']
+ lines += [' iparams={}'.format(self.iparams)]
+ lines += [' fparams={}'.format(self.fparams)]
+ lines += [' bparams={}'.format(self.bparams)]
+ lines += [' ivars={}'.format(self.ivars)]
+ lines += [' fvars={}'.format(self.fvars)]
+ lines += [' bvars={}'.format(self.bvars)]
+ lines += [' tvars={}'.format(self.tvars)]
+ lines += [' A={}'.format(self.A)]
+ lines += [' B={}'.format(self.B)]
+ lines += [' iK={}'.format(self.iK)]
+ lines += [' X={}'.format(self.X)]
+ lines += [' E={}'.format(self.E)]
+ r = ''
+ for line in lines:
+ r += line + '\n'
+ return r
+
+ def update(self):
+ """Set and update iteration variables.
+ """
+ if self.ivars['istep'] == 0:
+ X_norm = float(torch.norm(self.X))
+ iX_norm = X_norm ** -1
+ A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm
+ B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm
+ self.fvars['X_norm'] = X_norm
+ self.fvars['A_norm'] = A_norm
+ self.fvars['B_norm'] = B_norm
+ self.ivars['iterations_left'] = self.iparams['niter']
+ self.ivars['converged_count'] = 0
+ self.ivars['converged_end'] = 0
+
+ if self.method == 'ortho':
+ self._update_ortho()
+ else:
+ self._update_basic()
+
+ self.ivars['iterations_left'] = self.ivars['iterations_left'] - 1
+ self.ivars['istep'] = self.ivars['istep'] + 1
+
+ def update_residual(self):
+ """Update residual R from A, B, X, E.
+ """
+ mm = _utils.matmul
+ self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E
+
+ def update_converged_count(self):
+ """Determine the number of converged eigenpairs using backward stable
+ convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018].
+
+ Users may redefine this method for custom convergence criteria.
+ """
+ # (...) -> int
+ prev_count = self.ivars['converged_count']
+ tol = self.fparams['tol']
+ A_norm = self.fvars['A_norm']
+ B_norm = self.fvars['B_norm']
+ E, X, R = self.E, self.X, self.R
+ rerr = torch.norm(R, 2, (0, )) * (torch.norm(X, 2, (0, )) * (A_norm + E[:X.shape[-1]] * B_norm)) ** -1
+ converged = rerr < tol
+ count = 0
+ for b in converged:
+ if not b:
+ # ignore convergence of following pairs to ensure
+ # strict ordering of eigenpairs
+ break
+ count += 1
+ assert count >= prev_count, 'the number of converged eigenpairs ' \
+ '(was {}, got {}) cannot decrease'.format(prev_count, count)
+ self.ivars['converged_count'] = count
+ self.tvars['rerr'] = rerr
+ return count
+
+ def stop_iteration(self):
+ """Return True to stop iterations.
+
+ Note that tracker (if defined) can force-stop iterations by
+ setting ``worker.bvars['force_stop'] = True``.
+ """
+ return (self.bvars.get('force_stop', False)
+ or self.ivars['iterations_left'] == 0
+ or self.ivars['converged_count'] >= self.iparams['k'])
+
+ def run(self):
+ """Run LOBPCG iterations.
+
+ Use this method as a template for implementing LOBPCG
+ iteration scheme with custom tracker that is compatible with
+ TorchScript.
+ """
+ self.update()
+
+ if not torch.jit.is_scripting() and self.tracker is not None:
+ self.call_tracker()
+
+ while not self.stop_iteration():
+
+ self.update()
+
+ if not torch.jit.is_scripting() and self.tracker is not None:
+ self.call_tracker()
+
+ @torch.jit.unused
+ def call_tracker(self):
+ """Interface for tracking iteration process in Python mode.
+
+ Tracking the iteration process is disabled in TorchScript
+ mode. In fact, one should specify tracker=None when JIT
+ compiling functions using lobpcg.
+ """
+ # do nothing when in TorchScript mode
+ pass
+
+ # Internal methods
+
+ def _update_basic(self):
+ """
+ Update or initialize iteration variables when `method == "basic"`.
+ """
+ mm = torch.matmul
+ ns = self.ivars['converged_end']
+ nc = self.ivars['converged_count']
+ n = self.iparams['n']
+ largest = self.bparams['largest']
+
+ if self.ivars['istep'] == 0:
+ Ri = self._get_rayleigh_ritz_transform(self.X)
+ M = _utils.qform(_utils.qform(self.A, self.X), Ri)
+ E, Z = _utils.symeig(M, largest)
+ self.X[:] = mm(self.X, mm(Ri, Z))
+ self.E[:] = E
+ np = 0
+ self.update_residual()
+ nc = self.update_converged_count()
+ self.S[..., :n] = self.X
+
+ W = _utils.matmul(self.iK, self.R)
+ self.ivars['converged_end'] = ns = n + np + W.shape[-1]
+ self.S[:, n + np:ns] = W
+ else:
+ S_ = self.S[:, nc:ns]
+ Ri = self._get_rayleigh_ritz_transform(S_)
+ M = _utils.qform(_utils.qform(self.A, S_), Ri)
+ E_, Z = _utils.symeig(M, largest)
+ self.X[:, nc:] = mm(S_, mm(Ri, Z[:, :n - nc]))
+ self.E[nc:] = E_[:n - nc]
+ P = mm(S_, mm(Ri, Z[:, n:2 * n - nc]))
+ np = P.shape[-1]
+
+ self.update_residual()
+ nc = self.update_converged_count()
+ self.S[..., :n] = self.X
+ self.S[:, n:n + np] = P
+ W = _utils.matmul(self.iK, self.R[:, nc:])
+
+ self.ivars['converged_end'] = ns = n + np + W.shape[-1]
+ self.S[:, n + np:ns] = W
+
+ def _update_ortho(self):
+ """
+ Update or initialize iteration variables when `method == "ortho"`.
+ """
+ mm = torch.matmul
+ ns = self.ivars['converged_end']
+ nc = self.ivars['converged_count']
+ n = self.iparams['n']
+ largest = self.bparams['largest']
+
+ if self.ivars['istep'] == 0:
+ Ri = self._get_rayleigh_ritz_transform(self.X)
+ M = _utils.qform(_utils.qform(self.A, self.X), Ri)
+ E, Z = _utils.symeig(M, largest)
+ self.X = mm(self.X, mm(Ri, Z))
+ self.update_residual()
+ np = 0
+ nc = self.update_converged_count()
+ self.S[:, :n] = self.X
+ W = self._get_ortho(self.R, self.X)
+ ns = self.ivars['converged_end'] = n + np + W.shape[-1]
+ self.S[:, n + np:ns] = W
+
+ else:
+ S_ = self.S[:, nc:ns]
+ # Rayleigh-Ritz procedure
+ E_, Z = _utils.symeig(_utils.qform(self.A, S_), largest)
+
+ # Update E, X, P
+ self.X[:, nc:] = mm(S_, Z[:, :n - nc])
+ self.E[nc:] = E_[:n - nc]
+ P = mm(S_, mm(Z[:, n - nc:], _utils.basis(_utils.transpose(Z[:n - nc, n - nc:]))))
+ np = P.shape[-1]
+
+ # check convergence
+ self.update_residual()
+ nc = self.update_converged_count()
+
+ # update S
+ self.S[:, :n] = self.X
+ self.S[:, n:n + np] = P
+ W = self._get_ortho(self.R[:, nc:], self.S[:, :n + np])
+ ns = self.ivars['converged_end'] = n + np + W.shape[-1]
+ self.S[:, n + np:ns] = W
+
+ def _get_rayleigh_ritz_transform(self, S):
+ """Return a transformation matrix that is used in Rayleigh-Ritz
+ procedure for reducing a general eigenvalue problem :math:`(S^TAS)
+ C = (S^TBS) C E` to a standard eigenvalue problem :math: `(Ri^T
+ S^TAS Ri) Z = Z E` where `C = Ri Z`.
+
+ .. note:: In the original Rayleight-Ritz procedure in
+ [DuerschEtal2018], the problem is formulated as follows::
+
+ SAS = S^T A S
+ SBS = S^T B S
+ D = () ** -1/2
+ R^T R = Cholesky(D SBS D)
+ Ri = D R^-1
+ solve symeig problem Ri^T SAS Ri Z = Theta Z
+ C = Ri Z
+
+ To reduce the number of matrix products (denoted by empty
+ space between matrices), here we introduce element-wise
+ products (denoted by symbol `*`) so that the Rayleight-Ritz
+ procedure becomes::
+
+ SAS = S^T A S
+ SBS = S^T B S
+ d = () ** -1/2 # this is 1-d column vector
+ dd = d d^T # this is 2-d matrix
+ R^T R = Cholesky(dd * SBS)
+ Ri = R^-1 * d # broadcasting
+ solve symeig problem Ri^T SAS Ri Z = Theta Z
+ C = Ri Z
+
+ where `dd` is 2-d matrix that replaces matrix products `D M
+ D` with one element-wise product `M * dd`; and `d` replaces
+ matrix product `D M` with element-wise product `M *
+ d`. Also, creating the diagonal matrix `D` is avoided.
+
+ Args:
+ S (Tensor): the matrix basis for the search subspace, size is
+ :math:`(m, n)`.
+
+ Returns:
+ Ri (tensor): upper-triangular transformation matrix of size
+ :math:`(n, n)`.
+
+ """
+ B = self.B
+ mm = torch.matmul
+ SBS = _utils.qform(B, S)
+ d_row = SBS.diagonal(0, -2, -1) ** -0.5
+ d_col = d_row.reshape(d_row.shape[0], 1)
+ # TODO use torch.linalg.cholesky_solve once it is implemented
+ R = torch.linalg.cholesky((SBS * d_row) * d_col, upper=True)
+ return torch.linalg.solve_triangular(R, d_row.diag_embed(), upper=True, left=False)
+
+ def _get_svqb(self,
+ U: Tensor, # Tensor
+ drop: bool, # bool
+ tau: float # float
+ ) -> Tensor:
+ """Return B-orthonormal U.
+
+ .. note:: When `drop` is `False` then `svqb` is based on the
+ Algorithm 4 from [DuerschPhD2015] that is a slight
+ modification of the corresponding algorithm
+ introduced in [StathopolousWu2002].
+
+ Args:
+
+ U (Tensor) : initial approximation, size is (m, n)
+ drop (bool) : when True, drop columns that
+ contribution to the `span([U])` is small.
+ tau (float) : positive tolerance
+
+ Returns:
+
+ U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`), size
+ is (m, n1), where `n1 = n` if `drop` is `False,
+ otherwise `n1 <= n`.
+
+ """
+ if torch.numel(U) == 0:
+ return U
+ UBU = _utils.qform(self.B, U)
+ d = UBU.diagonal(0, -2, -1)
+
+ # Detect and drop exact zero columns from U. While the test
+ # `abs(d) == 0` is unlikely to be True for random data, it is
+ # possible to construct input data to lobpcg where it will be
+ # True leading to a failure (notice the `d ** -0.5` operation
+ # in the original algorithm). To prevent the failure, we drop
+ # the exact zero columns here and then continue with the
+ # original algorithm below.
+ nz = torch.where(abs(d) != 0.0)
+ assert len(nz) == 1, nz
+ if len(nz[0]) < len(d):
+ U = U[:, nz[0]]
+ if torch.numel(U) == 0:
+ return U
+ UBU = _utils.qform(self.B, U)
+ d = UBU.diagonal(0, -2, -1)
+ nz = torch.where(abs(d) != 0.0)
+ assert len(nz[0]) == len(d)
+
+ # The original algorithm 4 from [DuerschPhD2015].
+ d_col = (d ** -0.5).reshape(d.shape[0], 1)
+ DUBUD = (UBU * d_col) * _utils.transpose(d_col)
+ E, Z = _utils.symeig(DUBUD)
+ t = tau * abs(E).max()
+ if drop:
+ keep = torch.where(E > t)
+ assert len(keep) == 1, keep
+ E = E[keep[0]]
+ Z = Z[:, keep[0]]
+ d_col = d_col[keep[0]]
+ else:
+ E[(torch.where(E < t))[0]] = t
+
+ return torch.matmul(U * _utils.transpose(d_col), Z * E ** -0.5)
+
+ def _get_ortho(self, U, V):
+ """Return B-orthonormal U with columns are B-orthogonal to V.
+
+ .. note:: When `bparams["ortho_use_drop"] == False` then
+ `_get_ortho` is based on the Algorithm 3 from
+ [DuerschPhD2015] that is a slight modification of
+ the corresponding algorithm introduced in
+ [StathopolousWu2002]. Otherwise, the method
+ implements Algorithm 6 from [DuerschPhD2015]
+
+ .. note:: If all U columns are B-collinear to V then the
+ returned tensor U will be empty.
+
+ Args:
+
+ U (Tensor) : initial approximation, size is (m, n)
+ V (Tensor) : B-orthogonal external basis, size is (m, k)
+
+ Returns:
+
+ U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`)
+ such that :math:`V^T B U=0`, size is (m, n1),
+ where `n1 = n` if `drop` is `False, otherwise
+ `n1 <= n`.
+ """
+ mm = torch.matmul
+ mm_B = _utils.matmul
+ m = self.iparams['m']
+ tau_ortho = self.fparams['ortho_tol']
+ tau_drop = self.fparams['ortho_tol_drop']
+ tau_replace = self.fparams['ortho_tol_replace']
+ i_max = self.iparams['ortho_i_max']
+ j_max = self.iparams['ortho_j_max']
+ # when use_drop==True, enable dropping U columns that have
+ # small contribution to the `span([U, V])`.
+ use_drop = self.bparams['ortho_use_drop']
+
+ # clean up variables from the previous call
+ for vkey in list(self.fvars.keys()):
+ if vkey.startswith('ortho_') and vkey.endswith('_rerr'):
+ self.fvars.pop(vkey)
+ self.ivars.pop('ortho_i', 0)
+ self.ivars.pop('ortho_j', 0)
+
+ BV_norm = torch.norm(mm_B(self.B, V))
+ BU = mm_B(self.B, U)
+ VBU = mm(_utils.transpose(V), BU)
+ i = j = 0
+ stats = ''
+ for i in range(i_max):
+ U = U - mm(V, VBU)
+ drop = False
+ tau_svqb = tau_drop
+ for j in range(j_max):
+ if use_drop:
+ U = self._get_svqb(U, drop, tau_svqb)
+ drop = True
+ tau_svqb = tau_replace
+ else:
+ U = self._get_svqb(U, False, tau_replace)
+ if torch.numel(U) == 0:
+ # all initial U columns are B-collinear to V
+ self.ivars['ortho_i'] = i
+ self.ivars['ortho_j'] = j
+ return U
+ BU = mm_B(self.B, U)
+ UBU = mm(_utils.transpose(U), BU)
+ U_norm = torch.norm(U)
+ BU_norm = torch.norm(BU)
+ R = UBU - torch.eye(UBU.shape[-1],
+ device=UBU.device,
+ dtype=UBU.dtype)
+ R_norm = torch.norm(R)
+ # https://github.com/pytorch/pytorch/issues/33810 workaround:
+ rerr = float(R_norm) * float(BU_norm * U_norm) ** -1
+ vkey = 'ortho_UBUmI_rerr[{}, {}]'.format(i, j)
+ self.fvars[vkey] = rerr
+ if rerr < tau_ortho:
+ break
+ VBU = mm(_utils.transpose(V), BU)
+ VBU_norm = torch.norm(VBU)
+ U_norm = torch.norm(U)
+ rerr = float(VBU_norm) * float(BV_norm * U_norm) ** -1
+ vkey = 'ortho_VBU_rerr[{}]'.format(i)
+ self.fvars[vkey] = rerr
+ if rerr < tau_ortho:
+ break
+ if m < U.shape[-1] + V.shape[-1]:
+ # TorchScript needs the class var to be assigned to a local to
+ # do optional type refinement
+ B = self.B
+ assert B is not None
+ raise ValueError(
+ 'Overdetermined shape of U:'
+ ' #B-cols(={}) >= #U-cols(={}) + #V-cols(={}) must hold'
+ .format(B.shape[-1], U.shape[-1], V.shape[-1]))
+ self.ivars['ortho_i'] = i
+ self.ivars['ortho_j'] = j
+ return U
+
+
+# Calling tracker is separated from LOBPCG definitions because
+# TorchScript does not support user-defined callback arguments:
+LOBPCG_call_tracker_orig = LOBPCG.call_tracker
+def LOBPCG_call_tracker(self):
+ self.tracker(self)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_python_dispatcher.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_python_dispatcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee2c7d279458e3410421f5fc447fc8e20057209a
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/_python_dispatcher.py
@@ -0,0 +1,159 @@
+import re
+import torch._C as C
+
+
+"""
+PythonDispatcher class is a thin python-binding to C++ dispatcher and it
+is designed to show how dispatcher precompute works. In particular,
+it shows for a certain op `foo`, what the computed dispatch table looks
+like after user register their kernels to certains dispatch keys.
+
+In the real C++ dispatcher we support many dispatch keys for different
+functionalities. For simplicity PythonDispatcher only supports dispatch
+keys for a single example of each use case. These use cases are listed below:
+
+- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
+ autograd kernel in pytorch core library.
+ E.g. CPU, CUDA
+- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
+ inference kernels, but they share the same autograd kernel specified in AutogradOther.
+ E.g. FPGA, SparseCsrCPU
+- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
+ kernel defined in pytorch core library. Backend owner is responsible for registering both
+ inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
+ E.g. XLA, XPU, MPS
+- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
+ Kernels registered to this key MUST work for inference for all backends.
+- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
+ Kernels registered to this key MUST work for autograd for all backends.
+- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
+ Kernels registered to this key MUST work for both inference + autograd for all backends.
+
+Note we only allow registrations to alias keys inside pytorch core library. E.g
+you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
+kernel from torch-xla extension, instead you should upstream the kernel into
+pytorch/pytorch repo so that it's available for all backends and continuously
+tested even without the extension.
+
+Usage:
+ dispatcher = PythonDispatcher()
+ dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
+ print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
+ # For more debugging information
+ # print(dispatcher.keys())
+ # print(dispatcher.registrations())
+ # print(dispatcher.rawRegistrations())
+ # print(dispatcher.rawDispatchTable())
+PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
+This file only provides the simplified API for developers, revelant test code is located in
+test/test_dispatch.py
+"""
+class PythonDispatcher:
+ namespace = "__test__"
+ name = "foo"
+ runtime_keys = [
+ "CPU", "AutogradCPU",
+ "FPGA", "AutogradOther",
+ "XLA", "AutogradXLA",
+ "Lazy", "AutogradLazy",
+ ]
+ alias_keys = [
+ "CompositeExplicitAutograd",
+ "Autograd",
+ "CompositeImplicitAutograd",
+ ]
+ supported_keys = runtime_keys + alias_keys
+
+ def __init__(self):
+ C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
+ self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
+ self.ref.def_("foo(Tensor x) -> Tensor")
+
+ """
+ Returns a list of dispatch keys supported by PythonDispatcher.
+ You can register kernels to these keys.
+ """
+ def keys(self):
+ return self.supported_keys
+
+ """
+ Register kernels to the target dispatchKeys.
+ dispatchKeys(list[str]): a list of dispatch keys that you want to register
+ your own kernel. Note that you don't need to write the kernel yourself in
+ this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
+ automatically generated and registered.
+ """
+ def register(self, dispatchKeys):
+ # Overriden is not supported and triggers a warning in C++ dispatcher.
+ if len(set(dispatchKeys)) != len(dispatchKeys):
+ raise RuntimeError(f"Overriden is not allowed but found duplicates in {dispatchKeys}.")
+ # We currently forbid this in codegen instead of C++ dispatcher.
+ if 'CompositeImplicitAutograd' in dispatchKeys and 'CompositeExplicitAutograd' in dispatchKeys:
+ raise RuntimeError("Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed.")
+ for key in dispatchKeys:
+ if key not in self.supported_keys:
+ raise RuntimeError(f"{key} is not supported, please select a dispatch key in {self.supported_keys}.")
+ self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
+
+ """
+ Helper function to format (key, kernel).
+ """
+ def _format_line(self, key, kernel):
+ return "{:<15} {}\n".format(key, kernel)
+
+ """
+ Helper function to print a table header.
+ """
+ def _format_header(self, header):
+ s = f"""
+{header}
+"""
+ s += self._format_line("key", "kernel")
+ s += "---------------------------\n"
+ return s
+
+ """
+ Returns raw output of all registration info for debugging only.
+ Use registrations() for a simplified version.
+ """
+ def rawRegistrations(self):
+ return C._dispatch_dump("{}::{}".format(self.namespace, self.name)) # type: ignore[attr-defined]
+
+ """
+ Returns raw output of computed dispatch table for debugging only.
+ Use dispatchTable() for a simplified version.
+ """
+ def rawDispatchTable(self):
+ return C._dispatch_dump_table("{}::{}".format(self.namespace, self.name)) # type: ignore[attr-defined]
+
+ """
+ Returns a table(str) including all the registrations from users.
+ Note this includes registrations to both runtime keys and alias keys.
+ """
+ def registrations(self):
+ output = self._format_header("Registered Kernels")
+ state = self.rawRegistrations()
+ state_entries = state.split('\n')
+ for line in state_entries:
+ first = line.split(":")[0]
+ if any(first.startswith(k) for k in self.supported_keys):
+ kernel = line.split("::")[0].split(" ")[1]
+ output += self._format_line(first, kernel)
+ return output
+
+ """
+ Returns the computed dispatch table(str). Note this only include
+ runtime keys, registrations to alias keys have been decoded to their
+ mapped runtime keys.
+ """
+ def dispatchTable(self):
+ output = self._format_header("Computed Dispatch Table")
+ table = self.rawDispatchTable()
+ table_entries = table.split('\n')
+ regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
+ for line in table_entries:
+ k = line.split(":")[0]
+ if k in self.runtime_keys:
+ entry = regex.sub('[', line)
+ output += self._format_line(k, entry.split(": ")[1])
+ return output