Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- parrot/lib/libreadline.a +3 -0
- parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so +3 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/constants.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_queue.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_relay.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_shared.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_sock.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future_poll.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_queue.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_relay.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_sock.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/summary_record.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/constants.py +4 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface.py +996 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_queue.py +59 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_relay.py +53 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_shared.py +549 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_sock.py +61 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future.py +27 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future_poll.py +50 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/router.py +118 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_queue.py +44 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_relay.py +39 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_sock.py +36 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/interface/summary_record.py +67 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/datastore.py +297 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/handler.py +911 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/internal_api.py +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/job_builder.py +629 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/progress.py +83 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/thread_local_settings.py +18 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/update.py +113 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch_add.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_project_spec.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/create_job.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/errors.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/git_reference.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/loader.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/wandb_reference.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -142,3 +142,6 @@ parrot/lib/python3.10/site-packages/wandb/vendor/pynvml/__pycache__/pynvml.cpyth
|
|
| 142 |
parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-BoldOblique.ttf filter=lfs diff=lfs merge=lfs -text
|
| 143 |
parrot/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 144 |
parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-Bold.ttf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-BoldOblique.ttf filter=lfs diff=lfs merge=lfs -text
|
| 143 |
parrot/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 144 |
parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-Bold.ttf filter=lfs diff=lfs merge=lfs -text
|
| 145 |
+
parrot/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text
|
| 146 |
+
parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 147 |
+
parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/libreadline.a
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:34edb0aaf24f86fa37e869bb46389534179d560e141a744b15d854497148663a
|
| 3 |
+
size 749782
|
parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5436569093dd69cf0f00b018a9189d08fb2ddd45b65049de429719f1540fa777
|
| 3 |
+
size 459376
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (171 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (249 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface.cpython-310.pyc
ADDED
|
Binary file (31.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_queue.cpython-310.pyc
ADDED
|
Binary file (2.24 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_relay.cpython-310.pyc
ADDED
|
Binary file (1.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_shared.cpython-310.pyc
ADDED
|
Binary file (19.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_sock.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future.cpython-310.pyc
ADDED
|
Binary file (1.28 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future_poll.cpython-310.pyc
ADDED
|
Binary file (1.79 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router.cpython-310.pyc
ADDED
|
Binary file (3.99 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_queue.cpython-310.pyc
ADDED
|
Binary file (1.73 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_relay.cpython-310.pyc
ADDED
|
Binary file (1.48 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_sock.cpython-310.pyc
ADDED
|
Binary file (1.65 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/summary_record.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/constants.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
NOTIFY_PROCESS = 1
|
| 3 |
+
NOTIFY_SHUTDOWN = 2
|
| 4 |
+
NOTIFY_REQUEST = 3
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface.py
ADDED
|
@@ -0,0 +1,996 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Interface base class - Used to send messages to the internal process.
|
| 2 |
+
|
| 3 |
+
InterfaceBase: The abstract class
|
| 4 |
+
InterfaceShared: Common routines for socket and queue based implementations
|
| 5 |
+
InterfaceQueue: Use multiprocessing queues to send and receive messages
|
| 6 |
+
InterfaceSock: Use socket to send and receive messages
|
| 7 |
+
InterfaceRelay: Responses are routed to a relay queue (not matching uuids)
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import gzip
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import time
|
| 16 |
+
from abc import abstractmethod
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from secrets import token_hex
|
| 19 |
+
from typing import (
|
| 20 |
+
TYPE_CHECKING,
|
| 21 |
+
Any,
|
| 22 |
+
Dict,
|
| 23 |
+
Iterable,
|
| 24 |
+
List,
|
| 25 |
+
NewType,
|
| 26 |
+
Optional,
|
| 27 |
+
Tuple,
|
| 28 |
+
Union,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
from wandb import termwarn
|
| 32 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 33 |
+
from wandb.proto import wandb_telemetry_pb2 as tpb
|
| 34 |
+
from wandb.sdk.artifacts.artifact import Artifact
|
| 35 |
+
from wandb.sdk.artifacts.artifact_manifest import ArtifactManifest
|
| 36 |
+
from wandb.sdk.artifacts.staging import get_staging_dir
|
| 37 |
+
from wandb.sdk.lib import json_util as json
|
| 38 |
+
from wandb.util import (
|
| 39 |
+
WandBJSONEncoderOld,
|
| 40 |
+
get_h5_typename,
|
| 41 |
+
json_dumps_safer,
|
| 42 |
+
json_dumps_safer_history,
|
| 43 |
+
json_friendly,
|
| 44 |
+
json_friendly_val,
|
| 45 |
+
maybe_compress_summary,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
from ..data_types.utils import history_dict_to_json, val_to_json
|
| 49 |
+
from ..lib.mailbox import MailboxHandle
|
| 50 |
+
from . import summary_record as sr
|
| 51 |
+
from .message_future import MessageFuture
|
| 52 |
+
|
| 53 |
+
MANIFEST_FILE_SIZE_THRESHOLD = 100_000
|
| 54 |
+
|
| 55 |
+
GlobStr = NewType("GlobStr", str)
|
| 56 |
+
|
| 57 |
+
if sys.version_info >= (3, 8):
|
| 58 |
+
from typing import Literal, TypedDict
|
| 59 |
+
else:
|
| 60 |
+
from typing_extensions import Literal, TypedDict
|
| 61 |
+
|
| 62 |
+
PolicyName = Literal["now", "live", "end"]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class FilesDict(TypedDict):
|
| 66 |
+
files: Iterable[Tuple[GlobStr, PolicyName]]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
if TYPE_CHECKING:
|
| 70 |
+
from ..wandb_run import Run
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
logger = logging.getLogger("wandb")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def file_policy_to_enum(policy: "PolicyName") -> "pb.FilesItem.PolicyType.V":
|
| 77 |
+
if policy == "now":
|
| 78 |
+
enum = pb.FilesItem.PolicyType.NOW
|
| 79 |
+
elif policy == "end":
|
| 80 |
+
enum = pb.FilesItem.PolicyType.END
|
| 81 |
+
elif policy == "live":
|
| 82 |
+
enum = pb.FilesItem.PolicyType.LIVE
|
| 83 |
+
return enum
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def file_enum_to_policy(enum: "pb.FilesItem.PolicyType.V") -> "PolicyName":
|
| 87 |
+
if enum == pb.FilesItem.PolicyType.NOW:
|
| 88 |
+
policy: PolicyName = "now"
|
| 89 |
+
elif enum == pb.FilesItem.PolicyType.END:
|
| 90 |
+
policy = "end"
|
| 91 |
+
elif enum == pb.FilesItem.PolicyType.LIVE:
|
| 92 |
+
policy = "live"
|
| 93 |
+
return policy
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class InterfaceBase:
|
| 97 |
+
_run: Optional["Run"]
|
| 98 |
+
_drop: bool
|
| 99 |
+
|
| 100 |
+
def __init__(self) -> None:
|
| 101 |
+
self._run = None
|
| 102 |
+
self._drop = False
|
| 103 |
+
|
| 104 |
+
def _hack_set_run(self, run: "Run") -> None:
|
| 105 |
+
self._run = run
|
| 106 |
+
current_pid = os.getpid()
|
| 107 |
+
self._run._set_iface_pid(current_pid)
|
| 108 |
+
|
| 109 |
+
def publish_header(self) -> None:
|
| 110 |
+
header = pb.HeaderRecord()
|
| 111 |
+
self._publish_header(header)
|
| 112 |
+
|
| 113 |
+
@abstractmethod
|
| 114 |
+
def _publish_header(self, header: pb.HeaderRecord) -> None:
|
| 115 |
+
raise NotImplementedError
|
| 116 |
+
|
| 117 |
+
def deliver_status(self) -> MailboxHandle:
|
| 118 |
+
return self._deliver_status(pb.StatusRequest())
|
| 119 |
+
|
| 120 |
+
@abstractmethod
|
| 121 |
+
def _deliver_status(
|
| 122 |
+
self,
|
| 123 |
+
status: pb.StatusRequest,
|
| 124 |
+
) -> MailboxHandle:
|
| 125 |
+
raise NotImplementedError
|
| 126 |
+
|
| 127 |
+
def _make_config(
|
| 128 |
+
self,
|
| 129 |
+
data: Optional[dict] = None,
|
| 130 |
+
key: Optional[Union[Tuple[str, ...], str]] = None,
|
| 131 |
+
val: Optional[Any] = None,
|
| 132 |
+
obj: Optional[pb.ConfigRecord] = None,
|
| 133 |
+
) -> pb.ConfigRecord:
|
| 134 |
+
config = obj or pb.ConfigRecord()
|
| 135 |
+
if data:
|
| 136 |
+
for k, v in data.items():
|
| 137 |
+
update = config.update.add()
|
| 138 |
+
update.key = k
|
| 139 |
+
update.value_json = json_dumps_safer(json_friendly(v)[0])
|
| 140 |
+
if key:
|
| 141 |
+
update = config.update.add()
|
| 142 |
+
if isinstance(key, tuple):
|
| 143 |
+
for k in key:
|
| 144 |
+
update.nested_key.append(k)
|
| 145 |
+
else:
|
| 146 |
+
update.key = key
|
| 147 |
+
update.value_json = json_dumps_safer(json_friendly(val)[0])
|
| 148 |
+
return config
|
| 149 |
+
|
| 150 |
+
def _make_run(self, run: "Run") -> pb.RunRecord:
|
| 151 |
+
proto_run = pb.RunRecord()
|
| 152 |
+
run._make_proto_run(proto_run)
|
| 153 |
+
if run._settings.host:
|
| 154 |
+
proto_run.host = run._settings.host
|
| 155 |
+
if run._config is not None:
|
| 156 |
+
config_dict = run._config._as_dict() # type: ignore
|
| 157 |
+
self._make_config(data=config_dict, obj=proto_run.config)
|
| 158 |
+
if run._telemetry_obj:
|
| 159 |
+
proto_run.telemetry.MergeFrom(run._telemetry_obj)
|
| 160 |
+
return proto_run
|
| 161 |
+
|
| 162 |
+
def publish_run(self, run: "Run") -> None:
|
| 163 |
+
run_record = self._make_run(run)
|
| 164 |
+
self._publish_run(run_record)
|
| 165 |
+
|
| 166 |
+
@abstractmethod
|
| 167 |
+
def _publish_run(self, run: pb.RunRecord) -> None:
|
| 168 |
+
raise NotImplementedError
|
| 169 |
+
|
| 170 |
+
def publish_cancel(self, cancel_slot: str) -> None:
|
| 171 |
+
cancel = pb.CancelRequest(cancel_slot=cancel_slot)
|
| 172 |
+
self._publish_cancel(cancel)
|
| 173 |
+
|
| 174 |
+
@abstractmethod
|
| 175 |
+
def _publish_cancel(self, cancel: pb.CancelRequest) -> None:
|
| 176 |
+
raise NotImplementedError
|
| 177 |
+
|
| 178 |
+
def publish_config(
|
| 179 |
+
self,
|
| 180 |
+
data: Optional[dict] = None,
|
| 181 |
+
key: Optional[Union[Tuple[str, ...], str]] = None,
|
| 182 |
+
val: Optional[Any] = None,
|
| 183 |
+
) -> None:
|
| 184 |
+
cfg = self._make_config(data=data, key=key, val=val)
|
| 185 |
+
|
| 186 |
+
self._publish_config(cfg)
|
| 187 |
+
|
| 188 |
+
@abstractmethod
|
| 189 |
+
def _publish_config(self, cfg: pb.ConfigRecord) -> None:
|
| 190 |
+
raise NotImplementedError
|
| 191 |
+
|
| 192 |
+
@abstractmethod
|
| 193 |
+
def _publish_metric(self, metric: pb.MetricRecord) -> None:
|
| 194 |
+
raise NotImplementedError
|
| 195 |
+
|
| 196 |
+
def _make_summary_from_dict(self, summary_dict: dict) -> pb.SummaryRecord:
|
| 197 |
+
summary = pb.SummaryRecord()
|
| 198 |
+
for k, v in summary_dict.items():
|
| 199 |
+
update = summary.update.add()
|
| 200 |
+
update.key = k
|
| 201 |
+
update.value_json = json.dumps(v)
|
| 202 |
+
return summary
|
| 203 |
+
|
| 204 |
+
def _summary_encode(self, value: Any, path_from_root: str) -> dict:
|
| 205 |
+
"""Normalize, compress, and encode sub-objects for backend storage.
|
| 206 |
+
|
| 207 |
+
value: Object to encode.
|
| 208 |
+
path_from_root: `str` dot separated string from the top-level summary to the
|
| 209 |
+
current `value`.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
A new tree of dict's with large objects replaced with dictionaries
|
| 213 |
+
with "_type" entries that say which type the original data was.
|
| 214 |
+
"""
|
| 215 |
+
# Constructs a new `dict` tree in `json_value` that discards and/or
|
| 216 |
+
# encodes objects that aren't JSON serializable.
|
| 217 |
+
|
| 218 |
+
if isinstance(value, dict):
|
| 219 |
+
json_value = {}
|
| 220 |
+
for key, value in value.items(): # noqa: B020
|
| 221 |
+
json_value[key] = self._summary_encode(
|
| 222 |
+
value, path_from_root + "." + key
|
| 223 |
+
)
|
| 224 |
+
return json_value
|
| 225 |
+
else:
|
| 226 |
+
friendly_value, converted = json_friendly(
|
| 227 |
+
val_to_json(self._run, path_from_root, value, namespace="summary")
|
| 228 |
+
)
|
| 229 |
+
json_value, compressed = maybe_compress_summary(
|
| 230 |
+
friendly_value, get_h5_typename(value)
|
| 231 |
+
)
|
| 232 |
+
if compressed:
|
| 233 |
+
# TODO(jhr): impleement me
|
| 234 |
+
pass
|
| 235 |
+
# self.write_h5(path_from_root, friendly_value)
|
| 236 |
+
|
| 237 |
+
return json_value
|
| 238 |
+
|
| 239 |
+
def _make_summary(self, summary_record: sr.SummaryRecord) -> pb.SummaryRecord:
|
| 240 |
+
pb_summary_record = pb.SummaryRecord()
|
| 241 |
+
|
| 242 |
+
for item in summary_record.update:
|
| 243 |
+
pb_summary_item = pb_summary_record.update.add()
|
| 244 |
+
key_length = len(item.key)
|
| 245 |
+
|
| 246 |
+
assert key_length > 0
|
| 247 |
+
|
| 248 |
+
if key_length > 1:
|
| 249 |
+
pb_summary_item.nested_key.extend(item.key)
|
| 250 |
+
else:
|
| 251 |
+
pb_summary_item.key = item.key[0]
|
| 252 |
+
|
| 253 |
+
path_from_root = ".".join(item.key)
|
| 254 |
+
json_value = self._summary_encode(item.value, path_from_root)
|
| 255 |
+
json_value, _ = json_friendly(json_value) # type: ignore
|
| 256 |
+
|
| 257 |
+
pb_summary_item.value_json = json.dumps(
|
| 258 |
+
json_value,
|
| 259 |
+
cls=WandBJSONEncoderOld,
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
for item in summary_record.remove:
|
| 263 |
+
pb_summary_item = pb_summary_record.remove.add()
|
| 264 |
+
key_length = len(item.key)
|
| 265 |
+
|
| 266 |
+
assert key_length > 0
|
| 267 |
+
|
| 268 |
+
if key_length > 1:
|
| 269 |
+
pb_summary_item.nested_key.extend(item.key)
|
| 270 |
+
else:
|
| 271 |
+
pb_summary_item.key = item.key[0]
|
| 272 |
+
|
| 273 |
+
return pb_summary_record
|
| 274 |
+
|
| 275 |
+
def publish_summary(self, summary_record: sr.SummaryRecord) -> None:
|
| 276 |
+
pb_summary_record = self._make_summary(summary_record)
|
| 277 |
+
self._publish_summary(pb_summary_record)
|
| 278 |
+
|
| 279 |
+
@abstractmethod
|
| 280 |
+
def _publish_summary(self, summary: pb.SummaryRecord) -> None:
|
| 281 |
+
raise NotImplementedError
|
| 282 |
+
|
| 283 |
+
def _make_files(self, files_dict: "FilesDict") -> pb.FilesRecord:
|
| 284 |
+
files = pb.FilesRecord()
|
| 285 |
+
for path, policy in files_dict["files"]:
|
| 286 |
+
f = files.files.add()
|
| 287 |
+
f.path = path
|
| 288 |
+
f.policy = file_policy_to_enum(policy)
|
| 289 |
+
return files
|
| 290 |
+
|
| 291 |
+
def publish_files(self, files_dict: "FilesDict") -> None:
|
| 292 |
+
files = self._make_files(files_dict)
|
| 293 |
+
self._publish_files(files)
|
| 294 |
+
|
| 295 |
+
@abstractmethod
|
| 296 |
+
def _publish_files(self, files: pb.FilesRecord) -> None:
|
| 297 |
+
raise NotImplementedError
|
| 298 |
+
|
| 299 |
+
def publish_python_packages(self, working_set) -> None:
|
| 300 |
+
python_packages = pb.PythonPackagesRequest()
|
| 301 |
+
for pkg in working_set:
|
| 302 |
+
python_packages.package.add(name=pkg.key, version=pkg.version)
|
| 303 |
+
self._publish_python_packages(python_packages)
|
| 304 |
+
|
| 305 |
+
@abstractmethod
|
| 306 |
+
def _publish_python_packages(
|
| 307 |
+
self, python_packages: pb.PythonPackagesRequest
|
| 308 |
+
) -> None:
|
| 309 |
+
raise NotImplementedError
|
| 310 |
+
|
| 311 |
+
def _make_artifact(self, artifact: "Artifact") -> pb.ArtifactRecord:
|
| 312 |
+
proto_artifact = pb.ArtifactRecord()
|
| 313 |
+
proto_artifact.type = artifact.type
|
| 314 |
+
proto_artifact.name = artifact.name
|
| 315 |
+
proto_artifact.client_id = artifact._client_id
|
| 316 |
+
proto_artifact.sequence_client_id = artifact._sequence_client_id
|
| 317 |
+
proto_artifact.digest = artifact.digest
|
| 318 |
+
if artifact.distributed_id:
|
| 319 |
+
proto_artifact.distributed_id = artifact.distributed_id
|
| 320 |
+
if artifact.description:
|
| 321 |
+
proto_artifact.description = artifact.description
|
| 322 |
+
if artifact.metadata:
|
| 323 |
+
proto_artifact.metadata = json.dumps(json_friendly_val(artifact.metadata))
|
| 324 |
+
if artifact._base_id:
|
| 325 |
+
proto_artifact.base_id = artifact._base_id
|
| 326 |
+
|
| 327 |
+
ttl_duration_input = artifact._ttl_duration_seconds_to_gql()
|
| 328 |
+
if ttl_duration_input:
|
| 329 |
+
proto_artifact.ttl_duration_seconds = ttl_duration_input
|
| 330 |
+
proto_artifact.incremental_beta1 = artifact.incremental
|
| 331 |
+
self._make_artifact_manifest(artifact.manifest, obj=proto_artifact.manifest)
|
| 332 |
+
return proto_artifact
|
| 333 |
+
|
| 334 |
+
def _make_artifact_manifest(
|
| 335 |
+
self,
|
| 336 |
+
artifact_manifest: ArtifactManifest,
|
| 337 |
+
obj: Optional[pb.ArtifactManifest] = None,
|
| 338 |
+
) -> pb.ArtifactManifest:
|
| 339 |
+
proto_manifest = obj or pb.ArtifactManifest()
|
| 340 |
+
proto_manifest.version = artifact_manifest.version()
|
| 341 |
+
proto_manifest.storage_policy = artifact_manifest.storage_policy.name()
|
| 342 |
+
|
| 343 |
+
# Very large manifests need to be written to file to avoid protobuf size limits.
|
| 344 |
+
if len(artifact_manifest) > MANIFEST_FILE_SIZE_THRESHOLD:
|
| 345 |
+
path = self._write_artifact_manifest_file(artifact_manifest)
|
| 346 |
+
proto_manifest.manifest_file_path = path
|
| 347 |
+
return proto_manifest
|
| 348 |
+
|
| 349 |
+
for k, v in artifact_manifest.storage_policy.config().items() or {}.items():
|
| 350 |
+
cfg = proto_manifest.storage_policy_config.add()
|
| 351 |
+
cfg.key = k
|
| 352 |
+
cfg.value_json = json.dumps(v)
|
| 353 |
+
|
| 354 |
+
for entry in sorted(artifact_manifest.entries.values(), key=lambda k: k.path):
|
| 355 |
+
proto_entry = proto_manifest.contents.add()
|
| 356 |
+
proto_entry.path = entry.path
|
| 357 |
+
proto_entry.digest = entry.digest
|
| 358 |
+
if entry.size:
|
| 359 |
+
proto_entry.size = entry.size
|
| 360 |
+
if entry.birth_artifact_id:
|
| 361 |
+
proto_entry.birth_artifact_id = entry.birth_artifact_id
|
| 362 |
+
if entry.ref:
|
| 363 |
+
proto_entry.ref = entry.ref
|
| 364 |
+
if entry.local_path:
|
| 365 |
+
proto_entry.local_path = entry.local_path
|
| 366 |
+
proto_entry.skip_cache = entry.skip_cache
|
| 367 |
+
for k, v in entry.extra.items():
|
| 368 |
+
proto_extra = proto_entry.extra.add()
|
| 369 |
+
proto_extra.key = k
|
| 370 |
+
proto_extra.value_json = json.dumps(v)
|
| 371 |
+
return proto_manifest
|
| 372 |
+
|
| 373 |
+
def _write_artifact_manifest_file(self, manifest: ArtifactManifest) -> str:
|
| 374 |
+
manifest_dir = Path(get_staging_dir()) / "artifact_manifests"
|
| 375 |
+
manifest_dir.mkdir(parents=True, exist_ok=True)
|
| 376 |
+
# It would be simpler to use `manifest.to_json()`, but that gets very slow for
|
| 377 |
+
# large manifests since it encodes the whole thing as a single JSON object.
|
| 378 |
+
filename = f"{time.time()}_{token_hex(8)}.manifest_contents.jl.gz"
|
| 379 |
+
manifest_file_path = manifest_dir / filename
|
| 380 |
+
with gzip.open(manifest_file_path, mode="wt", compresslevel=1) as f:
|
| 381 |
+
for entry in manifest.entries.values():
|
| 382 |
+
f.write(f"{json.dumps(entry.to_json())}\n")
|
| 383 |
+
return str(manifest_file_path)
|
| 384 |
+
|
| 385 |
+
def deliver_link_artifact(
|
| 386 |
+
self,
|
| 387 |
+
run: "Run",
|
| 388 |
+
artifact: "Artifact",
|
| 389 |
+
portfolio_name: str,
|
| 390 |
+
aliases: Iterable[str],
|
| 391 |
+
entity: Optional[str] = None,
|
| 392 |
+
project: Optional[str] = None,
|
| 393 |
+
) -> MailboxHandle:
|
| 394 |
+
link_artifact = pb.LinkArtifactRequest()
|
| 395 |
+
if artifact.is_draft():
|
| 396 |
+
link_artifact.client_id = artifact._client_id
|
| 397 |
+
else:
|
| 398 |
+
link_artifact.server_id = artifact.id if artifact.id else ""
|
| 399 |
+
link_artifact.portfolio_name = portfolio_name
|
| 400 |
+
link_artifact.portfolio_entity = entity or run.entity
|
| 401 |
+
link_artifact.portfolio_project = project or run.project
|
| 402 |
+
link_artifact.portfolio_aliases.extend(aliases)
|
| 403 |
+
|
| 404 |
+
return self._deliver_link_artifact(link_artifact)
|
| 405 |
+
|
| 406 |
+
@abstractmethod
|
| 407 |
+
def _deliver_link_artifact(
|
| 408 |
+
self, link_artifact: pb.LinkArtifactRequest
|
| 409 |
+
) -> MailboxHandle:
|
| 410 |
+
raise NotImplementedError
|
| 411 |
+
|
| 412 |
+
@staticmethod
|
| 413 |
+
def _make_partial_source_str(
|
| 414 |
+
source: Any, job_info: Dict[str, Any], metadata: Dict[str, Any]
|
| 415 |
+
) -> str:
|
| 416 |
+
"""Construct use_artifact.partial.source_info.source as str."""
|
| 417 |
+
source_type = job_info.get("source_type", "").strip()
|
| 418 |
+
if source_type == "artifact":
|
| 419 |
+
info_source = job_info.get("source", {})
|
| 420 |
+
source.artifact.artifact = info_source.get("artifact", "")
|
| 421 |
+
source.artifact.entrypoint.extend(info_source.get("entrypoint", []))
|
| 422 |
+
source.artifact.notebook = info_source.get("notebook", False)
|
| 423 |
+
build_context = info_source.get("build_context")
|
| 424 |
+
if build_context:
|
| 425 |
+
source.artifact.build_context = build_context
|
| 426 |
+
dockerfile = info_source.get("dockerfile")
|
| 427 |
+
if dockerfile:
|
| 428 |
+
source.artifact.dockerfile = dockerfile
|
| 429 |
+
elif source_type == "repo":
|
| 430 |
+
source.git.git_info.remote = metadata.get("git", {}).get("remote", "")
|
| 431 |
+
source.git.git_info.commit = metadata.get("git", {}).get("commit", "")
|
| 432 |
+
source.git.entrypoint.extend(metadata.get("entrypoint", []))
|
| 433 |
+
source.git.notebook = metadata.get("notebook", False)
|
| 434 |
+
build_context = metadata.get("build_context")
|
| 435 |
+
if build_context:
|
| 436 |
+
source.git.build_context = build_context
|
| 437 |
+
dockerfile = metadata.get("dockerfile")
|
| 438 |
+
if dockerfile:
|
| 439 |
+
source.git.dockerfile = dockerfile
|
| 440 |
+
elif source_type == "image":
|
| 441 |
+
source.image.image = metadata.get("docker", "")
|
| 442 |
+
else:
|
| 443 |
+
raise ValueError("Invalid source type")
|
| 444 |
+
|
| 445 |
+
source_str: str = source.SerializeToString()
|
| 446 |
+
return source_str
|
| 447 |
+
|
| 448 |
+
def _make_proto_use_artifact(
|
| 449 |
+
self,
|
| 450 |
+
use_artifact: pb.UseArtifactRecord,
|
| 451 |
+
job_name: str,
|
| 452 |
+
job_info: Dict[str, Any],
|
| 453 |
+
metadata: Dict[str, Any],
|
| 454 |
+
) -> pb.UseArtifactRecord:
|
| 455 |
+
use_artifact.partial.job_name = job_name
|
| 456 |
+
use_artifact.partial.source_info._version = job_info.get("_version", "")
|
| 457 |
+
use_artifact.partial.source_info.source_type = job_info.get("source_type", "")
|
| 458 |
+
use_artifact.partial.source_info.runtime = job_info.get("runtime", "")
|
| 459 |
+
|
| 460 |
+
src_str = self._make_partial_source_str(
|
| 461 |
+
source=use_artifact.partial.source_info.source,
|
| 462 |
+
job_info=job_info,
|
| 463 |
+
metadata=metadata,
|
| 464 |
+
)
|
| 465 |
+
use_artifact.partial.source_info.source.ParseFromString(src_str) # type: ignore[arg-type]
|
| 466 |
+
|
| 467 |
+
return use_artifact
|
| 468 |
+
|
| 469 |
+
def publish_use_artifact(
|
| 470 |
+
self,
|
| 471 |
+
artifact: "Artifact",
|
| 472 |
+
) -> None:
|
| 473 |
+
assert artifact.id is not None, "Artifact must have an id"
|
| 474 |
+
|
| 475 |
+
use_artifact = pb.UseArtifactRecord(
|
| 476 |
+
id=artifact.id,
|
| 477 |
+
type=artifact.type,
|
| 478 |
+
name=artifact.name,
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# TODO(gst): move to internal process
|
| 482 |
+
if "_partial" in artifact.metadata:
|
| 483 |
+
# Download source info from logged partial job artifact
|
| 484 |
+
job_info = {}
|
| 485 |
+
try:
|
| 486 |
+
path = artifact.get_entry("wandb-job.json").download()
|
| 487 |
+
with open(path) as f:
|
| 488 |
+
job_info = json.load(f)
|
| 489 |
+
|
| 490 |
+
except Exception as e:
|
| 491 |
+
logger.warning(
|
| 492 |
+
f"Failed to download partial job info from artifact {artifact}, : {e}"
|
| 493 |
+
)
|
| 494 |
+
termwarn(
|
| 495 |
+
f"Failed to download partial job info from artifact {artifact}, : {e}"
|
| 496 |
+
)
|
| 497 |
+
return
|
| 498 |
+
|
| 499 |
+
try:
|
| 500 |
+
use_artifact = self._make_proto_use_artifact(
|
| 501 |
+
use_artifact=use_artifact,
|
| 502 |
+
job_name=artifact.name,
|
| 503 |
+
job_info=job_info,
|
| 504 |
+
metadata=artifact.metadata,
|
| 505 |
+
)
|
| 506 |
+
except Exception as e:
|
| 507 |
+
logger.warning(f"Failed to construct use artifact proto: {e}")
|
| 508 |
+
termwarn(f"Failed to construct use artifact proto: {e}")
|
| 509 |
+
return
|
| 510 |
+
|
| 511 |
+
self._publish_use_artifact(use_artifact)
|
| 512 |
+
|
| 513 |
+
@abstractmethod
|
| 514 |
+
def _publish_use_artifact(self, proto_artifact: pb.UseArtifactRecord) -> None:
|
| 515 |
+
raise NotImplementedError
|
| 516 |
+
|
| 517 |
+
def communicate_artifact(
|
| 518 |
+
self,
|
| 519 |
+
run: "Run",
|
| 520 |
+
artifact: "Artifact",
|
| 521 |
+
aliases: Iterable[str],
|
| 522 |
+
tags: Optional[Iterable[str]] = None,
|
| 523 |
+
history_step: Optional[int] = None,
|
| 524 |
+
is_user_created: bool = False,
|
| 525 |
+
use_after_commit: bool = False,
|
| 526 |
+
finalize: bool = True,
|
| 527 |
+
) -> MessageFuture:
|
| 528 |
+
proto_run = self._make_run(run)
|
| 529 |
+
proto_artifact = self._make_artifact(artifact)
|
| 530 |
+
proto_artifact.run_id = proto_run.run_id
|
| 531 |
+
proto_artifact.project = proto_run.project
|
| 532 |
+
proto_artifact.entity = proto_run.entity
|
| 533 |
+
proto_artifact.user_created = is_user_created
|
| 534 |
+
proto_artifact.use_after_commit = use_after_commit
|
| 535 |
+
proto_artifact.finalize = finalize
|
| 536 |
+
|
| 537 |
+
proto_artifact.aliases.extend(aliases or [])
|
| 538 |
+
proto_artifact.tags.extend(tags or [])
|
| 539 |
+
|
| 540 |
+
log_artifact = pb.LogArtifactRequest()
|
| 541 |
+
log_artifact.artifact.CopyFrom(proto_artifact)
|
| 542 |
+
if history_step is not None:
|
| 543 |
+
log_artifact.history_step = history_step
|
| 544 |
+
log_artifact.staging_dir = get_staging_dir()
|
| 545 |
+
resp = self._communicate_artifact(log_artifact)
|
| 546 |
+
return resp
|
| 547 |
+
|
| 548 |
+
@abstractmethod
|
| 549 |
+
def _communicate_artifact(
|
| 550 |
+
self, log_artifact: pb.LogArtifactRequest
|
| 551 |
+
) -> MessageFuture:
|
| 552 |
+
raise NotImplementedError
|
| 553 |
+
|
| 554 |
+
def deliver_download_artifact(
|
| 555 |
+
self,
|
| 556 |
+
artifact_id: str,
|
| 557 |
+
download_root: str,
|
| 558 |
+
allow_missing_references: bool,
|
| 559 |
+
skip_cache: bool,
|
| 560 |
+
path_prefix: Optional[str],
|
| 561 |
+
) -> MailboxHandle:
|
| 562 |
+
download_artifact = pb.DownloadArtifactRequest()
|
| 563 |
+
download_artifact.artifact_id = artifact_id
|
| 564 |
+
download_artifact.download_root = download_root
|
| 565 |
+
download_artifact.allow_missing_references = allow_missing_references
|
| 566 |
+
download_artifact.skip_cache = skip_cache
|
| 567 |
+
download_artifact.path_prefix = path_prefix or ""
|
| 568 |
+
resp = self._deliver_download_artifact(download_artifact)
|
| 569 |
+
return resp
|
| 570 |
+
|
| 571 |
+
@abstractmethod
|
| 572 |
+
def _deliver_download_artifact(
|
| 573 |
+
self, download_artifact: pb.DownloadArtifactRequest
|
| 574 |
+
) -> MailboxHandle:
|
| 575 |
+
raise NotImplementedError
|
| 576 |
+
|
| 577 |
+
def publish_artifact(
|
| 578 |
+
self,
|
| 579 |
+
run: "Run",
|
| 580 |
+
artifact: "Artifact",
|
| 581 |
+
aliases: Iterable[str],
|
| 582 |
+
tags: Optional[Iterable[str]] = None,
|
| 583 |
+
is_user_created: bool = False,
|
| 584 |
+
use_after_commit: bool = False,
|
| 585 |
+
finalize: bool = True,
|
| 586 |
+
) -> None:
|
| 587 |
+
proto_run = self._make_run(run)
|
| 588 |
+
proto_artifact = self._make_artifact(artifact)
|
| 589 |
+
proto_artifact.run_id = proto_run.run_id
|
| 590 |
+
proto_artifact.project = proto_run.project
|
| 591 |
+
proto_artifact.entity = proto_run.entity
|
| 592 |
+
proto_artifact.user_created = is_user_created
|
| 593 |
+
proto_artifact.use_after_commit = use_after_commit
|
| 594 |
+
proto_artifact.finalize = finalize
|
| 595 |
+
proto_artifact.aliases.extend(aliases or [])
|
| 596 |
+
proto_artifact.tags.extend(tags or [])
|
| 597 |
+
self._publish_artifact(proto_artifact)
|
| 598 |
+
|
| 599 |
+
@abstractmethod
|
| 600 |
+
def _publish_artifact(self, proto_artifact: pb.ArtifactRecord) -> None:
|
| 601 |
+
raise NotImplementedError
|
| 602 |
+
|
| 603 |
+
def publish_tbdata(self, log_dir: str, save: bool, root_logdir: str = "") -> None:
|
| 604 |
+
tbrecord = pb.TBRecord()
|
| 605 |
+
tbrecord.log_dir = log_dir
|
| 606 |
+
tbrecord.save = save
|
| 607 |
+
tbrecord.root_dir = root_logdir
|
| 608 |
+
self._publish_tbdata(tbrecord)
|
| 609 |
+
|
| 610 |
+
@abstractmethod
|
| 611 |
+
def _publish_tbdata(self, tbrecord: pb.TBRecord) -> None:
|
| 612 |
+
raise NotImplementedError
|
| 613 |
+
|
| 614 |
+
@abstractmethod
|
| 615 |
+
def _publish_telemetry(self, telem: tpb.TelemetryRecord) -> None:
|
| 616 |
+
raise NotImplementedError
|
| 617 |
+
|
| 618 |
+
def publish_partial_history(
|
| 619 |
+
self,
|
| 620 |
+
data: dict,
|
| 621 |
+
user_step: int,
|
| 622 |
+
step: Optional[int] = None,
|
| 623 |
+
flush: Optional[bool] = None,
|
| 624 |
+
publish_step: bool = True,
|
| 625 |
+
run: Optional["Run"] = None,
|
| 626 |
+
) -> None:
|
| 627 |
+
run = run or self._run
|
| 628 |
+
|
| 629 |
+
data = history_dict_to_json(run, data, step=user_step, ignore_copy_err=True)
|
| 630 |
+
data.pop("_step", None)
|
| 631 |
+
|
| 632 |
+
# add timestamp to the history request, if not already present
|
| 633 |
+
# the timestamp might come from the tensorboard log logic
|
| 634 |
+
if "_timestamp" not in data:
|
| 635 |
+
data["_timestamp"] = time.time()
|
| 636 |
+
|
| 637 |
+
partial_history = pb.PartialHistoryRequest()
|
| 638 |
+
for k, v in data.items():
|
| 639 |
+
item = partial_history.item.add()
|
| 640 |
+
item.key = k
|
| 641 |
+
item.value_json = json_dumps_safer_history(v)
|
| 642 |
+
|
| 643 |
+
if publish_step and step is not None:
|
| 644 |
+
partial_history.step.num = step
|
| 645 |
+
if flush is not None:
|
| 646 |
+
partial_history.action.flush = flush
|
| 647 |
+
self._publish_partial_history(partial_history)
|
| 648 |
+
|
| 649 |
+
@abstractmethod
|
| 650 |
+
def _publish_partial_history(self, history: pb.PartialHistoryRequest) -> None:
|
| 651 |
+
raise NotImplementedError
|
| 652 |
+
|
| 653 |
+
def publish_history(
|
| 654 |
+
self,
|
| 655 |
+
data: dict,
|
| 656 |
+
step: Optional[int] = None,
|
| 657 |
+
run: Optional["Run"] = None,
|
| 658 |
+
publish_step: bool = True,
|
| 659 |
+
) -> None:
|
| 660 |
+
run = run or self._run
|
| 661 |
+
data = history_dict_to_json(run, data, step=step)
|
| 662 |
+
history = pb.HistoryRecord()
|
| 663 |
+
if publish_step:
|
| 664 |
+
assert step is not None
|
| 665 |
+
history.step.num = step
|
| 666 |
+
data.pop("_step", None)
|
| 667 |
+
for k, v in data.items():
|
| 668 |
+
item = history.item.add()
|
| 669 |
+
item.key = k
|
| 670 |
+
item.value_json = json_dumps_safer_history(v)
|
| 671 |
+
self._publish_history(history)
|
| 672 |
+
|
| 673 |
+
@abstractmethod
|
| 674 |
+
def _publish_history(self, history: pb.HistoryRecord) -> None:
|
| 675 |
+
raise NotImplementedError
|
| 676 |
+
|
| 677 |
+
def publish_preempting(self) -> None:
|
| 678 |
+
preempt_rec = pb.RunPreemptingRecord()
|
| 679 |
+
self._publish_preempting(preempt_rec)
|
| 680 |
+
|
| 681 |
+
@abstractmethod
|
| 682 |
+
def _publish_preempting(self, preempt_rec: pb.RunPreemptingRecord) -> None:
|
| 683 |
+
raise NotImplementedError
|
| 684 |
+
|
| 685 |
+
def publish_output(self, name: str, data: str) -> None:
|
| 686 |
+
# from vendor.protobuf import google3.protobuf.timestamp
|
| 687 |
+
# ts = timestamp.Timestamp()
|
| 688 |
+
# ts.GetCurrentTime()
|
| 689 |
+
# now = datetime.now()
|
| 690 |
+
if name == "stdout":
|
| 691 |
+
otype = pb.OutputRecord.OutputType.STDOUT
|
| 692 |
+
elif name == "stderr":
|
| 693 |
+
otype = pb.OutputRecord.OutputType.STDERR
|
| 694 |
+
else:
|
| 695 |
+
# TODO(jhr): throw error?
|
| 696 |
+
print("unknown type")
|
| 697 |
+
o = pb.OutputRecord(output_type=otype, line=data)
|
| 698 |
+
o.timestamp.GetCurrentTime()
|
| 699 |
+
self._publish_output(o)
|
| 700 |
+
|
| 701 |
+
@abstractmethod
|
| 702 |
+
def _publish_output(self, outdata: pb.OutputRecord) -> None:
|
| 703 |
+
raise NotImplementedError
|
| 704 |
+
|
| 705 |
+
def publish_output_raw(self, name: str, data: str) -> None:
|
| 706 |
+
# from vendor.protobuf import google3.protobuf.timestamp
|
| 707 |
+
# ts = timestamp.Timestamp()
|
| 708 |
+
# ts.GetCurrentTime()
|
| 709 |
+
# now = datetime.now()
|
| 710 |
+
if name == "stdout":
|
| 711 |
+
otype = pb.OutputRawRecord.OutputType.STDOUT
|
| 712 |
+
elif name == "stderr":
|
| 713 |
+
otype = pb.OutputRawRecord.OutputType.STDERR
|
| 714 |
+
else:
|
| 715 |
+
# TODO(jhr): throw error?
|
| 716 |
+
print("unknown type")
|
| 717 |
+
o = pb.OutputRawRecord(output_type=otype, line=data)
|
| 718 |
+
o.timestamp.GetCurrentTime()
|
| 719 |
+
self._publish_output_raw(o)
|
| 720 |
+
|
| 721 |
+
@abstractmethod
|
| 722 |
+
def _publish_output_raw(self, outdata: pb.OutputRawRecord) -> None:
|
| 723 |
+
raise NotImplementedError
|
| 724 |
+
|
| 725 |
+
def publish_pause(self) -> None:
|
| 726 |
+
pause = pb.PauseRequest()
|
| 727 |
+
self._publish_pause(pause)
|
| 728 |
+
|
| 729 |
+
@abstractmethod
|
| 730 |
+
def _publish_pause(self, pause: pb.PauseRequest) -> None:
|
| 731 |
+
raise NotImplementedError
|
| 732 |
+
|
| 733 |
+
def publish_resume(self) -> None:
|
| 734 |
+
resume = pb.ResumeRequest()
|
| 735 |
+
self._publish_resume(resume)
|
| 736 |
+
|
| 737 |
+
@abstractmethod
|
| 738 |
+
def _publish_resume(self, resume: pb.ResumeRequest) -> None:
|
| 739 |
+
raise NotImplementedError
|
| 740 |
+
|
| 741 |
+
def publish_alert(
|
| 742 |
+
self, title: str, text: str, level: str, wait_duration: int
|
| 743 |
+
) -> None:
|
| 744 |
+
proto_alert = pb.AlertRecord()
|
| 745 |
+
proto_alert.title = title
|
| 746 |
+
proto_alert.text = text
|
| 747 |
+
proto_alert.level = level
|
| 748 |
+
proto_alert.wait_duration = wait_duration
|
| 749 |
+
self._publish_alert(proto_alert)
|
| 750 |
+
|
| 751 |
+
@abstractmethod
|
| 752 |
+
def _publish_alert(self, alert: pb.AlertRecord) -> None:
|
| 753 |
+
raise NotImplementedError
|
| 754 |
+
|
| 755 |
+
def _make_exit(self, exit_code: Optional[int]) -> pb.RunExitRecord:
|
| 756 |
+
exit = pb.RunExitRecord()
|
| 757 |
+
if exit_code is not None:
|
| 758 |
+
exit.exit_code = exit_code
|
| 759 |
+
return exit
|
| 760 |
+
|
| 761 |
+
def publish_exit(self, exit_code: Optional[int]) -> None:
|
| 762 |
+
exit_data = self._make_exit(exit_code)
|
| 763 |
+
self._publish_exit(exit_data)
|
| 764 |
+
|
| 765 |
+
@abstractmethod
|
| 766 |
+
def _publish_exit(self, exit_data: pb.RunExitRecord) -> None:
|
| 767 |
+
raise NotImplementedError
|
| 768 |
+
|
| 769 |
+
def publish_keepalive(self) -> None:
|
| 770 |
+
keepalive = pb.KeepaliveRequest()
|
| 771 |
+
self._publish_keepalive(keepalive)
|
| 772 |
+
|
| 773 |
+
@abstractmethod
|
| 774 |
+
def _publish_keepalive(self, keepalive: pb.KeepaliveRequest) -> None:
|
| 775 |
+
raise NotImplementedError
|
| 776 |
+
|
| 777 |
+
def publish_job_input(
|
| 778 |
+
self,
|
| 779 |
+
include_paths: List[List[str]],
|
| 780 |
+
exclude_paths: List[List[str]],
|
| 781 |
+
input_schema: Optional[dict],
|
| 782 |
+
run_config: bool = False,
|
| 783 |
+
file_path: str = "",
|
| 784 |
+
):
|
| 785 |
+
"""Publishes a request to add inputs to the job.
|
| 786 |
+
|
| 787 |
+
If run_config is True, the wandb.config will be added as a job input.
|
| 788 |
+
If file_path is provided, the file at file_path will be added as a job
|
| 789 |
+
input.
|
| 790 |
+
|
| 791 |
+
The paths provided as arguments are sequences of dictionary keys that
|
| 792 |
+
specify a path within the wandb.config. If a path is included, the
|
| 793 |
+
corresponding field will be treated as a job input. If a path is
|
| 794 |
+
excluded, the corresponding field will not be treated as a job input.
|
| 795 |
+
|
| 796 |
+
Args:
|
| 797 |
+
include_paths: paths within config to include as job inputs.
|
| 798 |
+
exclude_paths: paths within config to exclude as job inputs.
|
| 799 |
+
input_schema: A JSON Schema describing which attributes will be
|
| 800 |
+
editable from the Launch drawer.
|
| 801 |
+
run_config: bool indicating whether wandb.config is the input source.
|
| 802 |
+
file_path: path to file to include as a job input.
|
| 803 |
+
"""
|
| 804 |
+
if run_config and file_path:
|
| 805 |
+
raise ValueError(
|
| 806 |
+
"run_config and file_path are mutually exclusive arguments."
|
| 807 |
+
)
|
| 808 |
+
request = pb.JobInputRequest()
|
| 809 |
+
include_records = [pb.JobInputPath(path=path) for path in include_paths]
|
| 810 |
+
exclude_records = [pb.JobInputPath(path=path) for path in exclude_paths]
|
| 811 |
+
request.include_paths.extend(include_records)
|
| 812 |
+
request.exclude_paths.extend(exclude_records)
|
| 813 |
+
source = pb.JobInputSource(
|
| 814 |
+
run_config=pb.JobInputSource.RunConfigSource(),
|
| 815 |
+
)
|
| 816 |
+
if run_config:
|
| 817 |
+
source.run_config.CopyFrom(pb.JobInputSource.RunConfigSource())
|
| 818 |
+
else:
|
| 819 |
+
source.file.CopyFrom(
|
| 820 |
+
pb.JobInputSource.ConfigFileSource(path=file_path),
|
| 821 |
+
)
|
| 822 |
+
request.input_source.CopyFrom(source)
|
| 823 |
+
if input_schema:
|
| 824 |
+
request.input_schema = json_dumps_safer(input_schema)
|
| 825 |
+
|
| 826 |
+
return self._publish_job_input(request)
|
| 827 |
+
|
| 828 |
+
@abstractmethod
|
| 829 |
+
def _publish_job_input(self, request: pb.JobInputRequest) -> MailboxHandle:
|
| 830 |
+
raise NotImplementedError
|
| 831 |
+
|
| 832 |
+
def join(self) -> None:
|
| 833 |
+
# Drop indicates that the internal process has already been shutdown
|
| 834 |
+
if self._drop:
|
| 835 |
+
return
|
| 836 |
+
_ = self._communicate_shutdown()
|
| 837 |
+
|
| 838 |
+
@abstractmethod
|
| 839 |
+
def _communicate_shutdown(self) -> None:
|
| 840 |
+
raise NotImplementedError
|
| 841 |
+
|
| 842 |
+
def deliver_run(self, run: "Run") -> MailboxHandle:
|
| 843 |
+
run_record = self._make_run(run)
|
| 844 |
+
return self._deliver_run(run_record)
|
| 845 |
+
|
| 846 |
+
def deliver_sync(
|
| 847 |
+
self,
|
| 848 |
+
start_offset: int,
|
| 849 |
+
final_offset: int,
|
| 850 |
+
entity: Optional[str] = None,
|
| 851 |
+
project: Optional[str] = None,
|
| 852 |
+
run_id: Optional[str] = None,
|
| 853 |
+
skip_output_raw: Optional[bool] = None,
|
| 854 |
+
) -> MailboxHandle:
|
| 855 |
+
sync = pb.SyncRequest(
|
| 856 |
+
start_offset=start_offset,
|
| 857 |
+
final_offset=final_offset,
|
| 858 |
+
)
|
| 859 |
+
if entity:
|
| 860 |
+
sync.overwrite.entity = entity
|
| 861 |
+
if project:
|
| 862 |
+
sync.overwrite.project = project
|
| 863 |
+
if run_id:
|
| 864 |
+
sync.overwrite.run_id = run_id
|
| 865 |
+
if skip_output_raw:
|
| 866 |
+
sync.skip.output_raw = skip_output_raw
|
| 867 |
+
return self._deliver_sync(sync)
|
| 868 |
+
|
| 869 |
+
@abstractmethod
|
| 870 |
+
def _deliver_sync(self, sync: pb.SyncRequest) -> MailboxHandle:
|
| 871 |
+
raise NotImplementedError
|
| 872 |
+
|
| 873 |
+
@abstractmethod
|
| 874 |
+
def _deliver_run(self, run: pb.RunRecord) -> MailboxHandle:
|
| 875 |
+
raise NotImplementedError
|
| 876 |
+
|
| 877 |
+
def deliver_run_start(self, run_pb: pb.RunRecord) -> MailboxHandle:
|
| 878 |
+
run_start = pb.RunStartRequest()
|
| 879 |
+
run_start.run.CopyFrom(run_pb)
|
| 880 |
+
return self._deliver_run_start(run_start)
|
| 881 |
+
|
| 882 |
+
@abstractmethod
|
| 883 |
+
def _deliver_run_start(self, run_start: pb.RunStartRequest) -> MailboxHandle:
|
| 884 |
+
raise NotImplementedError
|
| 885 |
+
|
| 886 |
+
def deliver_attach(self, attach_id: str) -> MailboxHandle:
|
| 887 |
+
attach = pb.AttachRequest(attach_id=attach_id)
|
| 888 |
+
return self._deliver_attach(attach)
|
| 889 |
+
|
| 890 |
+
@abstractmethod
|
| 891 |
+
def _deliver_attach(self, status: pb.AttachRequest) -> MailboxHandle:
|
| 892 |
+
raise NotImplementedError
|
| 893 |
+
|
| 894 |
+
def deliver_check_version(
|
| 895 |
+
self, current_version: Optional[str] = None
|
| 896 |
+
) -> MailboxHandle:
|
| 897 |
+
check_version = pb.CheckVersionRequest()
|
| 898 |
+
if current_version:
|
| 899 |
+
check_version.current_version = current_version
|
| 900 |
+
return self._deliver_check_version(check_version)
|
| 901 |
+
|
| 902 |
+
@abstractmethod
|
| 903 |
+
def _deliver_check_version(
|
| 904 |
+
self, check_version: pb.CheckVersionRequest
|
| 905 |
+
) -> MailboxHandle:
|
| 906 |
+
raise NotImplementedError
|
| 907 |
+
|
| 908 |
+
def deliver_stop_status(self) -> MailboxHandle:
|
| 909 |
+
status = pb.StopStatusRequest()
|
| 910 |
+
return self._deliver_stop_status(status)
|
| 911 |
+
|
| 912 |
+
@abstractmethod
|
| 913 |
+
def _deliver_stop_status(self, status: pb.StopStatusRequest) -> MailboxHandle:
|
| 914 |
+
raise NotImplementedError
|
| 915 |
+
|
| 916 |
+
def deliver_network_status(self) -> MailboxHandle:
|
| 917 |
+
status = pb.NetworkStatusRequest()
|
| 918 |
+
return self._deliver_network_status(status)
|
| 919 |
+
|
| 920 |
+
@abstractmethod
|
| 921 |
+
def _deliver_network_status(self, status: pb.NetworkStatusRequest) -> MailboxHandle:
|
| 922 |
+
raise NotImplementedError
|
| 923 |
+
|
| 924 |
+
def deliver_internal_messages(self) -> MailboxHandle:
|
| 925 |
+
internal_message = pb.InternalMessagesRequest()
|
| 926 |
+
return self._deliver_internal_messages(internal_message)
|
| 927 |
+
|
| 928 |
+
@abstractmethod
|
| 929 |
+
def _deliver_internal_messages(
|
| 930 |
+
self, internal_message: pb.InternalMessagesRequest
|
| 931 |
+
) -> MailboxHandle:
|
| 932 |
+
raise NotImplementedError
|
| 933 |
+
|
| 934 |
+
def deliver_get_summary(self) -> MailboxHandle:
|
| 935 |
+
get_summary = pb.GetSummaryRequest()
|
| 936 |
+
return self._deliver_get_summary(get_summary)
|
| 937 |
+
|
| 938 |
+
@abstractmethod
|
| 939 |
+
def _deliver_get_summary(self, get_summary: pb.GetSummaryRequest) -> MailboxHandle:
|
| 940 |
+
raise NotImplementedError
|
| 941 |
+
|
| 942 |
+
def deliver_get_system_metrics(self) -> MailboxHandle:
|
| 943 |
+
get_summary = pb.GetSystemMetricsRequest()
|
| 944 |
+
return self._deliver_get_system_metrics(get_summary)
|
| 945 |
+
|
| 946 |
+
@abstractmethod
|
| 947 |
+
def _deliver_get_system_metrics(
|
| 948 |
+
self, get_summary: pb.GetSystemMetricsRequest
|
| 949 |
+
) -> MailboxHandle:
|
| 950 |
+
raise NotImplementedError
|
| 951 |
+
|
| 952 |
+
def deliver_exit(self, exit_code: Optional[int]) -> MailboxHandle:
|
| 953 |
+
exit_data = self._make_exit(exit_code)
|
| 954 |
+
return self._deliver_exit(exit_data)
|
| 955 |
+
|
| 956 |
+
@abstractmethod
|
| 957 |
+
def _deliver_exit(self, exit_data: pb.RunExitRecord) -> MailboxHandle:
|
| 958 |
+
raise NotImplementedError
|
| 959 |
+
|
| 960 |
+
def deliver_poll_exit(self) -> MailboxHandle:
|
| 961 |
+
poll_exit = pb.PollExitRequest()
|
| 962 |
+
return self._deliver_poll_exit(poll_exit)
|
| 963 |
+
|
| 964 |
+
@abstractmethod
|
| 965 |
+
def _deliver_poll_exit(self, poll_exit: pb.PollExitRequest) -> MailboxHandle:
|
| 966 |
+
raise NotImplementedError
|
| 967 |
+
|
| 968 |
+
def deliver_request_server_info(self) -> MailboxHandle:
|
| 969 |
+
server_info = pb.ServerInfoRequest()
|
| 970 |
+
return self._deliver_request_server_info(server_info)
|
| 971 |
+
|
| 972 |
+
@abstractmethod
|
| 973 |
+
def _deliver_request_server_info(
|
| 974 |
+
self, server_info: pb.ServerInfoRequest
|
| 975 |
+
) -> MailboxHandle:
|
| 976 |
+
raise NotImplementedError
|
| 977 |
+
|
| 978 |
+
def deliver_request_sampled_history(self) -> MailboxHandle:
|
| 979 |
+
sampled_history = pb.SampledHistoryRequest()
|
| 980 |
+
return self._deliver_request_sampled_history(sampled_history)
|
| 981 |
+
|
| 982 |
+
@abstractmethod
|
| 983 |
+
def _deliver_request_sampled_history(
|
| 984 |
+
self, sampled_history: pb.SampledHistoryRequest
|
| 985 |
+
) -> MailboxHandle:
|
| 986 |
+
raise NotImplementedError
|
| 987 |
+
|
| 988 |
+
def deliver_request_run_status(self) -> MailboxHandle:
|
| 989 |
+
run_status = pb.RunStatusRequest()
|
| 990 |
+
return self._deliver_request_run_status(run_status)
|
| 991 |
+
|
| 992 |
+
@abstractmethod
|
| 993 |
+
def _deliver_request_run_status(
|
| 994 |
+
self, run_status: pb.RunStatusRequest
|
| 995 |
+
) -> MailboxHandle:
|
| 996 |
+
raise NotImplementedError
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_queue.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""InterfaceQueue - Derived from InterfaceShared using queues to send to internal thread.
|
| 2 |
+
|
| 3 |
+
See interface.py for how interface classes relate to each other.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from multiprocessing.process import BaseProcess
|
| 9 |
+
from typing import TYPE_CHECKING, Optional
|
| 10 |
+
|
| 11 |
+
from ..lib import tracelog
|
| 12 |
+
from ..lib.mailbox import Mailbox
|
| 13 |
+
from .interface_shared import InterfaceShared
|
| 14 |
+
from .router_queue import MessageQueueRouter
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from queue import Queue
|
| 18 |
+
|
| 19 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger("wandb")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class InterfaceQueue(InterfaceShared):
|
| 26 |
+
record_q: Optional["Queue[pb.Record]"]
|
| 27 |
+
result_q: Optional["Queue[pb.Result]"]
|
| 28 |
+
_mailbox: Optional[Mailbox]
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
record_q: Optional["Queue[pb.Record]"] = None,
|
| 33 |
+
result_q: Optional["Queue[pb.Result]"] = None,
|
| 34 |
+
process: Optional[BaseProcess] = None,
|
| 35 |
+
process_check: bool = True,
|
| 36 |
+
mailbox: Optional[Mailbox] = None,
|
| 37 |
+
) -> None:
|
| 38 |
+
self.record_q = record_q
|
| 39 |
+
self.result_q = result_q
|
| 40 |
+
if self.record_q:
|
| 41 |
+
tracelog.annotate_queue(self.record_q, "record_q")
|
| 42 |
+
if self.result_q:
|
| 43 |
+
tracelog.annotate_queue(self.result_q, "result_q")
|
| 44 |
+
super().__init__(process=process, process_check=process_check, mailbox=mailbox)
|
| 45 |
+
|
| 46 |
+
def _init_router(self) -> None:
|
| 47 |
+
if self.record_q and self.result_q:
|
| 48 |
+
self._router = MessageQueueRouter(
|
| 49 |
+
self.record_q, self.result_q, mailbox=self._mailbox
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
def _publish(self, record: "pb.Record", local: Optional[bool] = None) -> None:
|
| 53 |
+
if self._process_check and self._process and not self._process.is_alive():
|
| 54 |
+
raise Exception("The wandb backend process has shutdown")
|
| 55 |
+
if local:
|
| 56 |
+
record.control.local = local
|
| 57 |
+
if self.record_q:
|
| 58 |
+
tracelog.log_message_queue(record, self.record_q)
|
| 59 |
+
self.record_q.put(record)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_relay.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""InterfaceRelay - Derived from InterfaceQueue using RelayRouter to preserve uuid req/resp.
|
| 2 |
+
|
| 3 |
+
See interface.py for how interface classes relate to each other.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from multiprocessing.process import BaseProcess
|
| 9 |
+
from typing import TYPE_CHECKING, Optional
|
| 10 |
+
|
| 11 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 12 |
+
|
| 13 |
+
from ..lib.mailbox import Mailbox
|
| 14 |
+
from .interface_queue import InterfaceQueue
|
| 15 |
+
from .router_relay import MessageRelayRouter
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from queue import Queue
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger("wandb")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class InterfaceRelay(InterfaceQueue):
|
| 25 |
+
_mailbox: Mailbox
|
| 26 |
+
relay_q: Optional["Queue[pb.Result]"]
|
| 27 |
+
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
mailbox: Mailbox,
|
| 31 |
+
record_q: Optional["Queue[pb.Record]"] = None,
|
| 32 |
+
result_q: Optional["Queue[pb.Result]"] = None,
|
| 33 |
+
relay_q: Optional["Queue[pb.Result]"] = None,
|
| 34 |
+
process: Optional[BaseProcess] = None,
|
| 35 |
+
process_check: bool = True,
|
| 36 |
+
) -> None:
|
| 37 |
+
self.relay_q = relay_q
|
| 38 |
+
super().__init__(
|
| 39 |
+
record_q=record_q,
|
| 40 |
+
result_q=result_q,
|
| 41 |
+
process=process,
|
| 42 |
+
process_check=process_check,
|
| 43 |
+
mailbox=mailbox,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def _init_router(self) -> None:
|
| 47 |
+
if self.record_q and self.result_q and self.relay_q:
|
| 48 |
+
self._router = MessageRelayRouter(
|
| 49 |
+
request_queue=self.record_q,
|
| 50 |
+
response_queue=self.result_q,
|
| 51 |
+
relay_queue=self.relay_q,
|
| 52 |
+
mailbox=self._mailbox,
|
| 53 |
+
)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_shared.py
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""InterfaceShared - Derived from InterfaceBase - shared with InterfaceQueue and InterfaceSock.
|
| 2 |
+
|
| 3 |
+
See interface.py for how interface classes relate to each other.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
import time
|
| 9 |
+
from abc import abstractmethod
|
| 10 |
+
from multiprocessing.process import BaseProcess
|
| 11 |
+
from typing import Any, Optional, cast
|
| 12 |
+
|
| 13 |
+
import wandb
|
| 14 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 15 |
+
from wandb.proto import wandb_telemetry_pb2 as tpb
|
| 16 |
+
from wandb.util import json_dumps_safer, json_friendly
|
| 17 |
+
|
| 18 |
+
from ..lib.mailbox import Mailbox, MailboxHandle
|
| 19 |
+
from .interface import InterfaceBase
|
| 20 |
+
from .message_future import MessageFuture
|
| 21 |
+
from .router import MessageRouter
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger("wandb")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class InterfaceShared(InterfaceBase):
|
| 27 |
+
process: Optional[BaseProcess]
|
| 28 |
+
_process_check: bool
|
| 29 |
+
_router: Optional[MessageRouter]
|
| 30 |
+
_mailbox: Optional[Mailbox]
|
| 31 |
+
_transport_success_timestamp: float
|
| 32 |
+
_transport_failed: bool
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
process: Optional[BaseProcess] = None,
|
| 37 |
+
process_check: bool = True,
|
| 38 |
+
mailbox: Optional[Any] = None,
|
| 39 |
+
) -> None:
|
| 40 |
+
super().__init__()
|
| 41 |
+
self._transport_success_timestamp = time.monotonic()
|
| 42 |
+
self._transport_failed = False
|
| 43 |
+
self._process = process
|
| 44 |
+
self._router = None
|
| 45 |
+
self._process_check = process_check
|
| 46 |
+
self._mailbox = mailbox
|
| 47 |
+
self._init_router()
|
| 48 |
+
|
| 49 |
+
@abstractmethod
|
| 50 |
+
def _init_router(self) -> None:
|
| 51 |
+
raise NotImplementedError
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def transport_failed(self) -> bool:
|
| 55 |
+
return self._transport_failed
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def transport_success_timestamp(self) -> float:
|
| 59 |
+
return self._transport_success_timestamp
|
| 60 |
+
|
| 61 |
+
def _transport_mark_failed(self) -> None:
|
| 62 |
+
self._transport_failed = True
|
| 63 |
+
|
| 64 |
+
def _transport_mark_success(self) -> None:
|
| 65 |
+
self._transport_success_timestamp = time.monotonic()
|
| 66 |
+
|
| 67 |
+
def _publish_output(self, outdata: pb.OutputRecord) -> None:
|
| 68 |
+
rec = pb.Record()
|
| 69 |
+
rec.output.CopyFrom(outdata)
|
| 70 |
+
self._publish(rec)
|
| 71 |
+
|
| 72 |
+
def _publish_cancel(self, cancel: pb.CancelRequest) -> None:
|
| 73 |
+
rec = self._make_request(cancel=cancel)
|
| 74 |
+
self._publish(rec)
|
| 75 |
+
|
| 76 |
+
def _publish_output_raw(self, outdata: pb.OutputRawRecord) -> None:
|
| 77 |
+
rec = pb.Record()
|
| 78 |
+
rec.output_raw.CopyFrom(outdata)
|
| 79 |
+
self._publish(rec)
|
| 80 |
+
|
| 81 |
+
def _publish_tbdata(self, tbrecord: pb.TBRecord) -> None:
|
| 82 |
+
rec = self._make_record(tbrecord=tbrecord)
|
| 83 |
+
self._publish(rec)
|
| 84 |
+
|
| 85 |
+
def _publish_partial_history(
|
| 86 |
+
self, partial_history: pb.PartialHistoryRequest
|
| 87 |
+
) -> None:
|
| 88 |
+
rec = self._make_request(partial_history=partial_history)
|
| 89 |
+
self._publish(rec)
|
| 90 |
+
|
| 91 |
+
def _publish_history(self, history: pb.HistoryRecord) -> None:
|
| 92 |
+
rec = self._make_record(history=history)
|
| 93 |
+
self._publish(rec)
|
| 94 |
+
|
| 95 |
+
def _publish_preempting(self, preempt_rec: pb.RunPreemptingRecord) -> None:
|
| 96 |
+
rec = self._make_record(preempting=preempt_rec)
|
| 97 |
+
self._publish(rec)
|
| 98 |
+
|
| 99 |
+
def _publish_telemetry(self, telem: tpb.TelemetryRecord) -> None:
|
| 100 |
+
rec = self._make_record(telemetry=telem)
|
| 101 |
+
self._publish(rec)
|
| 102 |
+
|
| 103 |
+
def _publish_job_input(self, job_input: pb.JobInputRequest) -> MailboxHandle:
|
| 104 |
+
record = self._make_request(job_input=job_input)
|
| 105 |
+
return self._deliver_record(record)
|
| 106 |
+
|
| 107 |
+
def _make_stats(self, stats_dict: dict) -> pb.StatsRecord:
|
| 108 |
+
stats = pb.StatsRecord()
|
| 109 |
+
stats.stats_type = pb.StatsRecord.StatsType.SYSTEM
|
| 110 |
+
stats.timestamp.GetCurrentTime() # todo: fix this, this is wrong :)
|
| 111 |
+
for k, v in stats_dict.items():
|
| 112 |
+
item = stats.item.add()
|
| 113 |
+
item.key = k
|
| 114 |
+
item.value_json = json_dumps_safer(json_friendly(v)[0])
|
| 115 |
+
return stats
|
| 116 |
+
|
| 117 |
+
def _make_login(self, api_key: Optional[str] = None) -> pb.LoginRequest:
|
| 118 |
+
login = pb.LoginRequest()
|
| 119 |
+
if api_key:
|
| 120 |
+
login.api_key = api_key
|
| 121 |
+
return login
|
| 122 |
+
|
| 123 |
+
def _make_request( # noqa: C901
|
| 124 |
+
self,
|
| 125 |
+
login: Optional[pb.LoginRequest] = None,
|
| 126 |
+
get_summary: Optional[pb.GetSummaryRequest] = None,
|
| 127 |
+
pause: Optional[pb.PauseRequest] = None,
|
| 128 |
+
resume: Optional[pb.ResumeRequest] = None,
|
| 129 |
+
status: Optional[pb.StatusRequest] = None,
|
| 130 |
+
stop_status: Optional[pb.StopStatusRequest] = None,
|
| 131 |
+
internal_messages: Optional[pb.InternalMessagesRequest] = None,
|
| 132 |
+
network_status: Optional[pb.NetworkStatusRequest] = None,
|
| 133 |
+
poll_exit: Optional[pb.PollExitRequest] = None,
|
| 134 |
+
partial_history: Optional[pb.PartialHistoryRequest] = None,
|
| 135 |
+
sampled_history: Optional[pb.SampledHistoryRequest] = None,
|
| 136 |
+
run_start: Optional[pb.RunStartRequest] = None,
|
| 137 |
+
check_version: Optional[pb.CheckVersionRequest] = None,
|
| 138 |
+
log_artifact: Optional[pb.LogArtifactRequest] = None,
|
| 139 |
+
download_artifact: Optional[pb.DownloadArtifactRequest] = None,
|
| 140 |
+
link_artifact: Optional[pb.LinkArtifactRequest] = None,
|
| 141 |
+
defer: Optional[pb.DeferRequest] = None,
|
| 142 |
+
attach: Optional[pb.AttachRequest] = None,
|
| 143 |
+
server_info: Optional[pb.ServerInfoRequest] = None,
|
| 144 |
+
keepalive: Optional[pb.KeepaliveRequest] = None,
|
| 145 |
+
run_status: Optional[pb.RunStatusRequest] = None,
|
| 146 |
+
sender_mark: Optional[pb.SenderMarkRequest] = None,
|
| 147 |
+
sender_read: Optional[pb.SenderReadRequest] = None,
|
| 148 |
+
sync: Optional[pb.SyncRequest] = None,
|
| 149 |
+
status_report: Optional[pb.StatusReportRequest] = None,
|
| 150 |
+
cancel: Optional[pb.CancelRequest] = None,
|
| 151 |
+
summary_record: Optional[pb.SummaryRecordRequest] = None,
|
| 152 |
+
telemetry_record: Optional[pb.TelemetryRecordRequest] = None,
|
| 153 |
+
get_system_metrics: Optional[pb.GetSystemMetricsRequest] = None,
|
| 154 |
+
python_packages: Optional[pb.PythonPackagesRequest] = None,
|
| 155 |
+
job_input: Optional[pb.JobInputRequest] = None,
|
| 156 |
+
) -> pb.Record:
|
| 157 |
+
request = pb.Request()
|
| 158 |
+
if login:
|
| 159 |
+
request.login.CopyFrom(login)
|
| 160 |
+
elif get_summary:
|
| 161 |
+
request.get_summary.CopyFrom(get_summary)
|
| 162 |
+
elif pause:
|
| 163 |
+
request.pause.CopyFrom(pause)
|
| 164 |
+
elif resume:
|
| 165 |
+
request.resume.CopyFrom(resume)
|
| 166 |
+
elif status:
|
| 167 |
+
request.status.CopyFrom(status)
|
| 168 |
+
elif stop_status:
|
| 169 |
+
request.stop_status.CopyFrom(stop_status)
|
| 170 |
+
elif internal_messages:
|
| 171 |
+
request.internal_messages.CopyFrom(internal_messages)
|
| 172 |
+
elif network_status:
|
| 173 |
+
request.network_status.CopyFrom(network_status)
|
| 174 |
+
elif poll_exit:
|
| 175 |
+
request.poll_exit.CopyFrom(poll_exit)
|
| 176 |
+
elif partial_history:
|
| 177 |
+
request.partial_history.CopyFrom(partial_history)
|
| 178 |
+
elif sampled_history:
|
| 179 |
+
request.sampled_history.CopyFrom(sampled_history)
|
| 180 |
+
elif run_start:
|
| 181 |
+
request.run_start.CopyFrom(run_start)
|
| 182 |
+
elif check_version:
|
| 183 |
+
request.check_version.CopyFrom(check_version)
|
| 184 |
+
elif log_artifact:
|
| 185 |
+
request.log_artifact.CopyFrom(log_artifact)
|
| 186 |
+
elif download_artifact:
|
| 187 |
+
request.download_artifact.CopyFrom(download_artifact)
|
| 188 |
+
elif link_artifact:
|
| 189 |
+
request.link_artifact.CopyFrom(link_artifact)
|
| 190 |
+
elif defer:
|
| 191 |
+
request.defer.CopyFrom(defer)
|
| 192 |
+
elif attach:
|
| 193 |
+
request.attach.CopyFrom(attach)
|
| 194 |
+
elif server_info:
|
| 195 |
+
request.server_info.CopyFrom(server_info)
|
| 196 |
+
elif keepalive:
|
| 197 |
+
request.keepalive.CopyFrom(keepalive)
|
| 198 |
+
elif run_status:
|
| 199 |
+
request.run_status.CopyFrom(run_status)
|
| 200 |
+
elif sender_mark:
|
| 201 |
+
request.sender_mark.CopyFrom(sender_mark)
|
| 202 |
+
elif sender_read:
|
| 203 |
+
request.sender_read.CopyFrom(sender_read)
|
| 204 |
+
elif cancel:
|
| 205 |
+
request.cancel.CopyFrom(cancel)
|
| 206 |
+
elif status_report:
|
| 207 |
+
request.status_report.CopyFrom(status_report)
|
| 208 |
+
elif summary_record:
|
| 209 |
+
request.summary_record.CopyFrom(summary_record)
|
| 210 |
+
elif telemetry_record:
|
| 211 |
+
request.telemetry_record.CopyFrom(telemetry_record)
|
| 212 |
+
elif get_system_metrics:
|
| 213 |
+
request.get_system_metrics.CopyFrom(get_system_metrics)
|
| 214 |
+
elif sync:
|
| 215 |
+
request.sync.CopyFrom(sync)
|
| 216 |
+
elif python_packages:
|
| 217 |
+
request.python_packages.CopyFrom(python_packages)
|
| 218 |
+
elif job_input:
|
| 219 |
+
request.job_input.CopyFrom(job_input)
|
| 220 |
+
else:
|
| 221 |
+
raise Exception("Invalid request")
|
| 222 |
+
record = self._make_record(request=request)
|
| 223 |
+
# All requests do not get persisted
|
| 224 |
+
record.control.local = True
|
| 225 |
+
if status_report:
|
| 226 |
+
record.control.flow_control = True
|
| 227 |
+
return record
|
| 228 |
+
|
| 229 |
+
def _make_record( # noqa: C901
|
| 230 |
+
self,
|
| 231 |
+
run: Optional[pb.RunRecord] = None,
|
| 232 |
+
config: Optional[pb.ConfigRecord] = None,
|
| 233 |
+
files: Optional[pb.FilesRecord] = None,
|
| 234 |
+
summary: Optional[pb.SummaryRecord] = None,
|
| 235 |
+
history: Optional[pb.HistoryRecord] = None,
|
| 236 |
+
stats: Optional[pb.StatsRecord] = None,
|
| 237 |
+
exit: Optional[pb.RunExitRecord] = None,
|
| 238 |
+
artifact: Optional[pb.ArtifactRecord] = None,
|
| 239 |
+
tbrecord: Optional[pb.TBRecord] = None,
|
| 240 |
+
alert: Optional[pb.AlertRecord] = None,
|
| 241 |
+
final: Optional[pb.FinalRecord] = None,
|
| 242 |
+
metric: Optional[pb.MetricRecord] = None,
|
| 243 |
+
header: Optional[pb.HeaderRecord] = None,
|
| 244 |
+
footer: Optional[pb.FooterRecord] = None,
|
| 245 |
+
request: Optional[pb.Request] = None,
|
| 246 |
+
telemetry: Optional[tpb.TelemetryRecord] = None,
|
| 247 |
+
preempting: Optional[pb.RunPreemptingRecord] = None,
|
| 248 |
+
use_artifact: Optional[pb.UseArtifactRecord] = None,
|
| 249 |
+
output: Optional[pb.OutputRecord] = None,
|
| 250 |
+
output_raw: Optional[pb.OutputRawRecord] = None,
|
| 251 |
+
) -> pb.Record:
|
| 252 |
+
record = pb.Record()
|
| 253 |
+
if run:
|
| 254 |
+
record.run.CopyFrom(run)
|
| 255 |
+
elif config:
|
| 256 |
+
record.config.CopyFrom(config)
|
| 257 |
+
elif summary:
|
| 258 |
+
record.summary.CopyFrom(summary)
|
| 259 |
+
elif history:
|
| 260 |
+
record.history.CopyFrom(history)
|
| 261 |
+
elif files:
|
| 262 |
+
record.files.CopyFrom(files)
|
| 263 |
+
elif stats:
|
| 264 |
+
record.stats.CopyFrom(stats)
|
| 265 |
+
elif exit:
|
| 266 |
+
record.exit.CopyFrom(exit)
|
| 267 |
+
elif artifact:
|
| 268 |
+
record.artifact.CopyFrom(artifact)
|
| 269 |
+
elif tbrecord:
|
| 270 |
+
record.tbrecord.CopyFrom(tbrecord)
|
| 271 |
+
elif alert:
|
| 272 |
+
record.alert.CopyFrom(alert)
|
| 273 |
+
elif final:
|
| 274 |
+
record.final.CopyFrom(final)
|
| 275 |
+
elif header:
|
| 276 |
+
record.header.CopyFrom(header)
|
| 277 |
+
elif footer:
|
| 278 |
+
record.footer.CopyFrom(footer)
|
| 279 |
+
elif request:
|
| 280 |
+
record.request.CopyFrom(request)
|
| 281 |
+
elif telemetry:
|
| 282 |
+
record.telemetry.CopyFrom(telemetry)
|
| 283 |
+
elif metric:
|
| 284 |
+
record.metric.CopyFrom(metric)
|
| 285 |
+
elif preempting:
|
| 286 |
+
record.preempting.CopyFrom(preempting)
|
| 287 |
+
elif use_artifact:
|
| 288 |
+
record.use_artifact.CopyFrom(use_artifact)
|
| 289 |
+
elif output:
|
| 290 |
+
record.output.CopyFrom(output)
|
| 291 |
+
elif output_raw:
|
| 292 |
+
record.output_raw.CopyFrom(output_raw)
|
| 293 |
+
else:
|
| 294 |
+
raise Exception("Invalid record")
|
| 295 |
+
return record
|
| 296 |
+
|
| 297 |
+
@abstractmethod
|
| 298 |
+
def _publish(self, record: pb.Record, local: Optional[bool] = None) -> None:
|
| 299 |
+
raise NotImplementedError
|
| 300 |
+
|
| 301 |
+
def _communicate(
|
| 302 |
+
self, rec: pb.Record, timeout: Optional[int] = 30, local: Optional[bool] = None
|
| 303 |
+
) -> Optional[pb.Result]:
|
| 304 |
+
return self._communicate_async(rec, local=local).get(timeout=timeout)
|
| 305 |
+
|
| 306 |
+
def _communicate_async(
|
| 307 |
+
self, rec: pb.Record, local: Optional[bool] = None
|
| 308 |
+
) -> MessageFuture:
|
| 309 |
+
assert self._router
|
| 310 |
+
if self._process_check and self._process and not self._process.is_alive():
|
| 311 |
+
raise Exception("The wandb backend process has shutdown")
|
| 312 |
+
future = self._router.send_and_receive(rec, local=local)
|
| 313 |
+
return future
|
| 314 |
+
|
| 315 |
+
def communicate_login(
|
| 316 |
+
self, api_key: Optional[str] = None, timeout: Optional[int] = 15
|
| 317 |
+
) -> pb.LoginResponse:
|
| 318 |
+
login = self._make_login(api_key)
|
| 319 |
+
rec = self._make_request(login=login)
|
| 320 |
+
result = self._communicate(rec, timeout=timeout)
|
| 321 |
+
if result is None:
|
| 322 |
+
# TODO: friendlier error message here
|
| 323 |
+
raise wandb.Error(
|
| 324 |
+
"Couldn't communicate with backend after {} seconds".format(timeout)
|
| 325 |
+
)
|
| 326 |
+
login_response = result.response.login_response
|
| 327 |
+
assert login_response
|
| 328 |
+
return login_response
|
| 329 |
+
|
| 330 |
+
def _publish_defer(self, state: "pb.DeferRequest.DeferState.V") -> None:
|
| 331 |
+
defer = pb.DeferRequest(state=state)
|
| 332 |
+
rec = self._make_request(defer=defer)
|
| 333 |
+
self._publish(rec, local=True)
|
| 334 |
+
|
| 335 |
+
def publish_defer(self, state: int = 0) -> None:
|
| 336 |
+
self._publish_defer(cast("pb.DeferRequest.DeferState.V", state))
|
| 337 |
+
|
| 338 |
+
def _publish_header(self, header: pb.HeaderRecord) -> None:
|
| 339 |
+
rec = self._make_record(header=header)
|
| 340 |
+
self._publish(rec)
|
| 341 |
+
|
| 342 |
+
def publish_footer(self) -> None:
|
| 343 |
+
footer = pb.FooterRecord()
|
| 344 |
+
rec = self._make_record(footer=footer)
|
| 345 |
+
self._publish(rec)
|
| 346 |
+
|
| 347 |
+
def publish_final(self) -> None:
|
| 348 |
+
final = pb.FinalRecord()
|
| 349 |
+
rec = self._make_record(final=final)
|
| 350 |
+
self._publish(rec)
|
| 351 |
+
|
| 352 |
+
def publish_login(self, api_key: Optional[str] = None) -> None:
|
| 353 |
+
login = self._make_login(api_key)
|
| 354 |
+
rec = self._make_request(login=login)
|
| 355 |
+
self._publish(rec)
|
| 356 |
+
|
| 357 |
+
def _publish_pause(self, pause: pb.PauseRequest) -> None:
|
| 358 |
+
rec = self._make_request(pause=pause)
|
| 359 |
+
self._publish(rec)
|
| 360 |
+
|
| 361 |
+
def _publish_resume(self, resume: pb.ResumeRequest) -> None:
|
| 362 |
+
rec = self._make_request(resume=resume)
|
| 363 |
+
self._publish(rec)
|
| 364 |
+
|
| 365 |
+
def _publish_run(self, run: pb.RunRecord) -> None:
|
| 366 |
+
rec = self._make_record(run=run)
|
| 367 |
+
self._publish(rec)
|
| 368 |
+
|
| 369 |
+
def _publish_config(self, cfg: pb.ConfigRecord) -> None:
|
| 370 |
+
rec = self._make_record(config=cfg)
|
| 371 |
+
self._publish(rec)
|
| 372 |
+
|
| 373 |
+
def _publish_summary(self, summary: pb.SummaryRecord) -> None:
|
| 374 |
+
rec = self._make_record(summary=summary)
|
| 375 |
+
self._publish(rec)
|
| 376 |
+
|
| 377 |
+
def _publish_metric(self, metric: pb.MetricRecord) -> None:
|
| 378 |
+
rec = self._make_record(metric=metric)
|
| 379 |
+
self._publish(rec)
|
| 380 |
+
|
| 381 |
+
def publish_stats(self, stats_dict: dict) -> None:
|
| 382 |
+
stats = self._make_stats(stats_dict)
|
| 383 |
+
rec = self._make_record(stats=stats)
|
| 384 |
+
self._publish(rec)
|
| 385 |
+
|
| 386 |
+
def _publish_python_packages(
|
| 387 |
+
self, python_packages: pb.PythonPackagesRequest
|
| 388 |
+
) -> None:
|
| 389 |
+
rec = self._make_request(python_packages=python_packages)
|
| 390 |
+
self._publish(rec)
|
| 391 |
+
|
| 392 |
+
def _publish_files(self, files: pb.FilesRecord) -> None:
|
| 393 |
+
rec = self._make_record(files=files)
|
| 394 |
+
self._publish(rec)
|
| 395 |
+
|
| 396 |
+
def _publish_use_artifact(self, use_artifact: pb.UseArtifactRecord) -> Any:
|
| 397 |
+
rec = self._make_record(use_artifact=use_artifact)
|
| 398 |
+
self._publish(rec)
|
| 399 |
+
|
| 400 |
+
def _communicate_artifact(self, log_artifact: pb.LogArtifactRequest) -> Any:
|
| 401 |
+
rec = self._make_request(log_artifact=log_artifact)
|
| 402 |
+
return self._communicate_async(rec)
|
| 403 |
+
|
| 404 |
+
def _deliver_download_artifact(
|
| 405 |
+
self, download_artifact: pb.DownloadArtifactRequest
|
| 406 |
+
) -> MailboxHandle:
|
| 407 |
+
rec = self._make_request(download_artifact=download_artifact)
|
| 408 |
+
return self._deliver_record(rec)
|
| 409 |
+
|
| 410 |
+
def _deliver_link_artifact(
|
| 411 |
+
self, link_artifact: pb.LinkArtifactRequest
|
| 412 |
+
) -> MailboxHandle:
|
| 413 |
+
rec = self._make_request(link_artifact=link_artifact)
|
| 414 |
+
return self._deliver_record(rec)
|
| 415 |
+
|
| 416 |
+
def _publish_artifact(self, proto_artifact: pb.ArtifactRecord) -> None:
|
| 417 |
+
rec = self._make_record(artifact=proto_artifact)
|
| 418 |
+
self._publish(rec)
|
| 419 |
+
|
| 420 |
+
def _publish_alert(self, proto_alert: pb.AlertRecord) -> None:
|
| 421 |
+
rec = self._make_record(alert=proto_alert)
|
| 422 |
+
self._publish(rec)
|
| 423 |
+
|
| 424 |
+
def _deliver_status(
|
| 425 |
+
self,
|
| 426 |
+
status: pb.StatusRequest,
|
| 427 |
+
) -> MailboxHandle:
|
| 428 |
+
req = self._make_request(status=status)
|
| 429 |
+
return self._deliver_record(req)
|
| 430 |
+
|
| 431 |
+
def _publish_exit(self, exit_data: pb.RunExitRecord) -> None:
|
| 432 |
+
rec = self._make_record(exit=exit_data)
|
| 433 |
+
self._publish(rec)
|
| 434 |
+
|
| 435 |
+
def _publish_keepalive(self, keepalive: pb.KeepaliveRequest) -> None:
|
| 436 |
+
record = self._make_request(keepalive=keepalive)
|
| 437 |
+
self._publish(record)
|
| 438 |
+
|
| 439 |
+
def _communicate_shutdown(self) -> None:
|
| 440 |
+
# shutdown
|
| 441 |
+
request = pb.Request(shutdown=pb.ShutdownRequest())
|
| 442 |
+
record = self._make_record(request=request)
|
| 443 |
+
_ = self._communicate(record)
|
| 444 |
+
|
| 445 |
+
def _get_mailbox(self) -> Mailbox:
|
| 446 |
+
mailbox = self._mailbox
|
| 447 |
+
assert mailbox
|
| 448 |
+
return mailbox
|
| 449 |
+
|
| 450 |
+
def _deliver_record(self, record: pb.Record) -> MailboxHandle:
|
| 451 |
+
mailbox = self._get_mailbox()
|
| 452 |
+
handle = mailbox._deliver_record(record, interface=self)
|
| 453 |
+
return handle
|
| 454 |
+
|
| 455 |
+
def _deliver_run(self, run: pb.RunRecord) -> MailboxHandle:
|
| 456 |
+
record = self._make_record(run=run)
|
| 457 |
+
return self._deliver_record(record)
|
| 458 |
+
|
| 459 |
+
def _deliver_sync(self, sync: pb.SyncRequest) -> MailboxHandle:
|
| 460 |
+
record = self._make_request(sync=sync)
|
| 461 |
+
return self._deliver_record(record)
|
| 462 |
+
|
| 463 |
+
def _deliver_run_start(self, run_start: pb.RunStartRequest) -> MailboxHandle:
|
| 464 |
+
record = self._make_request(run_start=run_start)
|
| 465 |
+
return self._deliver_record(record)
|
| 466 |
+
|
| 467 |
+
def _deliver_get_summary(self, get_summary: pb.GetSummaryRequest) -> MailboxHandle:
|
| 468 |
+
record = self._make_request(get_summary=get_summary)
|
| 469 |
+
return self._deliver_record(record)
|
| 470 |
+
|
| 471 |
+
def _deliver_get_system_metrics(
|
| 472 |
+
self, get_system_metrics: pb.GetSystemMetricsRequest
|
| 473 |
+
) -> MailboxHandle:
|
| 474 |
+
record = self._make_request(get_system_metrics=get_system_metrics)
|
| 475 |
+
return self._deliver_record(record)
|
| 476 |
+
|
| 477 |
+
def _deliver_exit(self, exit_data: pb.RunExitRecord) -> MailboxHandle:
|
| 478 |
+
record = self._make_record(exit=exit_data)
|
| 479 |
+
return self._deliver_record(record)
|
| 480 |
+
|
| 481 |
+
def _deliver_poll_exit(self, poll_exit: pb.PollExitRequest) -> MailboxHandle:
|
| 482 |
+
record = self._make_request(poll_exit=poll_exit)
|
| 483 |
+
return self._deliver_record(record)
|
| 484 |
+
|
| 485 |
+
def _deliver_stop_status(self, stop_status: pb.StopStatusRequest) -> MailboxHandle:
|
| 486 |
+
record = self._make_request(stop_status=stop_status)
|
| 487 |
+
return self._deliver_record(record)
|
| 488 |
+
|
| 489 |
+
def _deliver_attach(self, attach: pb.AttachRequest) -> MailboxHandle:
|
| 490 |
+
record = self._make_request(attach=attach)
|
| 491 |
+
return self._deliver_record(record)
|
| 492 |
+
|
| 493 |
+
def _deliver_check_version(
|
| 494 |
+
self, check_version: pb.CheckVersionRequest
|
| 495 |
+
) -> MailboxHandle:
|
| 496 |
+
record = self._make_request(check_version=check_version)
|
| 497 |
+
return self._deliver_record(record)
|
| 498 |
+
|
| 499 |
+
def _deliver_network_status(
|
| 500 |
+
self, network_status: pb.NetworkStatusRequest
|
| 501 |
+
) -> MailboxHandle:
|
| 502 |
+
record = self._make_request(network_status=network_status)
|
| 503 |
+
return self._deliver_record(record)
|
| 504 |
+
|
| 505 |
+
def _deliver_internal_messages(
|
| 506 |
+
self, internal_message: pb.InternalMessagesRequest
|
| 507 |
+
) -> MailboxHandle:
|
| 508 |
+
record = self._make_request(internal_messages=internal_message)
|
| 509 |
+
return self._deliver_record(record)
|
| 510 |
+
|
| 511 |
+
def _deliver_request_server_info(
|
| 512 |
+
self, server_info: pb.ServerInfoRequest
|
| 513 |
+
) -> MailboxHandle:
|
| 514 |
+
record = self._make_request(server_info=server_info)
|
| 515 |
+
return self._deliver_record(record)
|
| 516 |
+
|
| 517 |
+
def _deliver_request_sampled_history(
|
| 518 |
+
self, sampled_history: pb.SampledHistoryRequest
|
| 519 |
+
) -> MailboxHandle:
|
| 520 |
+
record = self._make_request(sampled_history=sampled_history)
|
| 521 |
+
return self._deliver_record(record)
|
| 522 |
+
|
| 523 |
+
def _deliver_request_run_status(
|
| 524 |
+
self, run_status: pb.RunStatusRequest
|
| 525 |
+
) -> MailboxHandle:
|
| 526 |
+
record = self._make_request(run_status=run_status)
|
| 527 |
+
return self._deliver_record(record)
|
| 528 |
+
|
| 529 |
+
def _transport_keepalive_failed(self, keepalive_interval: int = 5) -> bool:
|
| 530 |
+
if self._transport_failed:
|
| 531 |
+
return True
|
| 532 |
+
|
| 533 |
+
now = time.monotonic()
|
| 534 |
+
if now < self._transport_success_timestamp + keepalive_interval:
|
| 535 |
+
return False
|
| 536 |
+
|
| 537 |
+
try:
|
| 538 |
+
self.publish_keepalive()
|
| 539 |
+
except Exception:
|
| 540 |
+
self._transport_mark_failed()
|
| 541 |
+
else:
|
| 542 |
+
self._transport_mark_success()
|
| 543 |
+
return self._transport_failed
|
| 544 |
+
|
| 545 |
+
def join(self) -> None:
|
| 546 |
+
super().join()
|
| 547 |
+
|
| 548 |
+
if self._router:
|
| 549 |
+
self._router.join()
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_sock.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""InterfaceSock - Derived from InterfaceShared using a socket to send to internal thread.
|
| 2 |
+
|
| 3 |
+
See interface.py for how interface classes relate to each other.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from typing import TYPE_CHECKING, Any, Optional
|
| 9 |
+
|
| 10 |
+
from ..lib.mailbox import Mailbox
|
| 11 |
+
from ..lib.sock_client import SockClient
|
| 12 |
+
from .interface_shared import InterfaceShared
|
| 13 |
+
from .message_future import MessageFuture
|
| 14 |
+
from .router_sock import MessageSockRouter
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 18 |
+
|
| 19 |
+
from ..wandb_run import Run
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger("wandb")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class InterfaceSock(InterfaceShared):
|
| 26 |
+
_stream_id: Optional[str]
|
| 27 |
+
_sock_client: SockClient
|
| 28 |
+
_mailbox: Mailbox
|
| 29 |
+
|
| 30 |
+
def __init__(self, sock_client: SockClient, mailbox: Mailbox) -> None:
|
| 31 |
+
# _sock_client is used when abstract method _init_router() is called by constructor
|
| 32 |
+
self._sock_client = sock_client
|
| 33 |
+
super().__init__(mailbox=mailbox)
|
| 34 |
+
self._process_check = False
|
| 35 |
+
self._stream_id = None
|
| 36 |
+
|
| 37 |
+
def _init_router(self) -> None:
|
| 38 |
+
self._router = MessageSockRouter(self._sock_client, mailbox=self._mailbox)
|
| 39 |
+
|
| 40 |
+
def _hack_set_run(self, run: "Run") -> None:
|
| 41 |
+
super()._hack_set_run(run)
|
| 42 |
+
assert run._run_id
|
| 43 |
+
self._stream_id = run._run_id
|
| 44 |
+
|
| 45 |
+
def _assign(self, record: Any) -> None:
|
| 46 |
+
assert self._stream_id
|
| 47 |
+
record._info.stream_id = self._stream_id
|
| 48 |
+
|
| 49 |
+
def _publish(self, record: "pb.Record", local: Optional[bool] = None) -> None:
|
| 50 |
+
self._assign(record)
|
| 51 |
+
self._sock_client.send_record_publish(record)
|
| 52 |
+
|
| 53 |
+
def _communicate_async(
|
| 54 |
+
self, rec: "pb.Record", local: Optional[bool] = None
|
| 55 |
+
) -> MessageFuture:
|
| 56 |
+
self._assign(rec)
|
| 57 |
+
assert self._router
|
| 58 |
+
if self._process_check and self._process and not self._process.is_alive():
|
| 59 |
+
raise Exception("The wandb backend process has shutdown")
|
| 60 |
+
future = self._router.send_and_receive(rec, local=local)
|
| 61 |
+
return future
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MessageFuture - represents a message result of an asynchronous operation.
|
| 2 |
+
|
| 3 |
+
Base class MessageFuture for MessageFutureObject and MessageFuturePoll
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import threading
|
| 8 |
+
from abc import abstractmethod
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class MessageFuture:
|
| 15 |
+
_object: Optional[pb.Result]
|
| 16 |
+
|
| 17 |
+
def __init__(self) -> None:
|
| 18 |
+
self._object = None
|
| 19 |
+
self._object_ready = threading.Event()
|
| 20 |
+
|
| 21 |
+
def _set_object(self, obj: pb.Result) -> None:
|
| 22 |
+
self._object = obj
|
| 23 |
+
self._object_ready.set()
|
| 24 |
+
|
| 25 |
+
@abstractmethod
|
| 26 |
+
def get(self, timeout: Optional[int] = None) -> Optional[pb.Result]:
|
| 27 |
+
raise NotImplementedError
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future_poll.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MessageFuturePoll - Derived from MessageFuture but implementing polling loop.
|
| 2 |
+
|
| 3 |
+
MessageFuture represents a message result of an asynchronous operation.
|
| 4 |
+
|
| 5 |
+
MessageFuturePoll implements a polling loop to periodically query for a
|
| 6 |
+
completed async operation.
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import time
|
| 11 |
+
from typing import Any, Optional
|
| 12 |
+
|
| 13 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 14 |
+
|
| 15 |
+
from .message_future import MessageFuture
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class MessageFuturePoll(MessageFuture):
|
| 19 |
+
_fn: Any
|
| 20 |
+
_xid: str
|
| 21 |
+
|
| 22 |
+
def __init__(self, fn: Any, xid: str) -> None:
|
| 23 |
+
super().__init__()
|
| 24 |
+
self._fn = fn
|
| 25 |
+
self._xid = xid
|
| 26 |
+
|
| 27 |
+
def get(self, timeout: Optional[int] = None) -> Optional[pb.Result]:
|
| 28 |
+
self._poll(timeout=timeout)
|
| 29 |
+
if self._object_ready.is_set():
|
| 30 |
+
return self._object
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
def _poll(self, timeout: Optional[int] = None) -> None:
|
| 34 |
+
if self._object_ready.is_set():
|
| 35 |
+
return
|
| 36 |
+
done = False
|
| 37 |
+
start_time = time.time()
|
| 38 |
+
sleep_time = 0.5
|
| 39 |
+
while not done:
|
| 40 |
+
result = self._fn(xid=self._xid)
|
| 41 |
+
if result:
|
| 42 |
+
self._set_object(result)
|
| 43 |
+
done = True
|
| 44 |
+
continue
|
| 45 |
+
now_time = time.time()
|
| 46 |
+
if timeout and start_time - now_time > timeout:
|
| 47 |
+
done = True
|
| 48 |
+
continue
|
| 49 |
+
time.sleep(sleep_time)
|
| 50 |
+
sleep_time = min(sleep_time * 2, 5)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/router.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Router - handle message router (base class).
|
| 2 |
+
|
| 3 |
+
Router to manage responses.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
import threading
|
| 9 |
+
import uuid
|
| 10 |
+
from abc import abstractmethod
|
| 11 |
+
from typing import TYPE_CHECKING, Dict, Optional
|
| 12 |
+
|
| 13 |
+
from ..lib import mailbox, tracelog
|
| 14 |
+
from .message_future import MessageFuture
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from queue import Queue
|
| 18 |
+
|
| 19 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger("wandb")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MessageRouterClosedError(Exception):
|
| 26 |
+
"""Router has been closed."""
|
| 27 |
+
|
| 28 |
+
pass
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class MessageFutureObject(MessageFuture):
|
| 32 |
+
def __init__(self) -> None:
|
| 33 |
+
super().__init__()
|
| 34 |
+
|
| 35 |
+
def get(self, timeout: Optional[int] = None) -> Optional["pb.Result"]:
|
| 36 |
+
is_set = self._object_ready.wait(timeout)
|
| 37 |
+
if is_set and self._object:
|
| 38 |
+
return self._object
|
| 39 |
+
return None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class MessageRouter:
|
| 43 |
+
_pending_reqs: Dict[str, MessageFutureObject]
|
| 44 |
+
_request_queue: "Queue[pb.Record]"
|
| 45 |
+
_response_queue: "Queue[pb.Result]"
|
| 46 |
+
_mailbox: Optional[mailbox.Mailbox]
|
| 47 |
+
|
| 48 |
+
def __init__(self, mailbox: Optional[mailbox.Mailbox] = None) -> None:
|
| 49 |
+
self._mailbox = mailbox
|
| 50 |
+
self._pending_reqs = {}
|
| 51 |
+
self._lock = threading.Lock()
|
| 52 |
+
|
| 53 |
+
self._join_event = threading.Event()
|
| 54 |
+
self._thread = threading.Thread(target=self.message_loop)
|
| 55 |
+
self._thread.name = "MsgRouterThr"
|
| 56 |
+
self._thread.daemon = True
|
| 57 |
+
self._thread.start()
|
| 58 |
+
|
| 59 |
+
@abstractmethod
|
| 60 |
+
def _read_message(self) -> Optional["pb.Result"]:
|
| 61 |
+
raise NotImplementedError
|
| 62 |
+
|
| 63 |
+
@abstractmethod
|
| 64 |
+
def _send_message(self, record: "pb.Record") -> None:
|
| 65 |
+
raise NotImplementedError
|
| 66 |
+
|
| 67 |
+
def message_loop(self) -> None:
|
| 68 |
+
while not self._join_event.is_set():
|
| 69 |
+
try:
|
| 70 |
+
msg = self._read_message()
|
| 71 |
+
except EOFError:
|
| 72 |
+
# On abnormal shutdown the queue will be destroyed underneath
|
| 73 |
+
# resulting in EOFError. message_loop needs to exit..
|
| 74 |
+
logger.warning("EOFError seen in message_loop")
|
| 75 |
+
break
|
| 76 |
+
except MessageRouterClosedError:
|
| 77 |
+
logger.warning("message_loop has been closed")
|
| 78 |
+
break
|
| 79 |
+
if not msg:
|
| 80 |
+
continue
|
| 81 |
+
self._handle_msg_rcv(msg)
|
| 82 |
+
|
| 83 |
+
def send_and_receive(
|
| 84 |
+
self, rec: "pb.Record", local: Optional[bool] = None
|
| 85 |
+
) -> MessageFuture:
|
| 86 |
+
rec.control.req_resp = True
|
| 87 |
+
if local:
|
| 88 |
+
rec.control.local = local
|
| 89 |
+
rec.uuid = uuid.uuid4().hex
|
| 90 |
+
future = MessageFutureObject()
|
| 91 |
+
with self._lock:
|
| 92 |
+
self._pending_reqs[rec.uuid] = future
|
| 93 |
+
|
| 94 |
+
self._send_message(rec)
|
| 95 |
+
|
| 96 |
+
return future
|
| 97 |
+
|
| 98 |
+
def join(self) -> None:
|
| 99 |
+
self._join_event.set()
|
| 100 |
+
self._thread.join()
|
| 101 |
+
|
| 102 |
+
def _handle_msg_rcv(self, msg: "pb.Result") -> None:
|
| 103 |
+
# deliver mailbox addressed messages to mailbox
|
| 104 |
+
if self._mailbox and msg.control.mailbox_slot:
|
| 105 |
+
self._mailbox.deliver(msg)
|
| 106 |
+
return
|
| 107 |
+
with self._lock:
|
| 108 |
+
future = self._pending_reqs.pop(msg.uuid, None)
|
| 109 |
+
if future is None:
|
| 110 |
+
# TODO (cvp): saw this in tests, seemed benign enough to ignore, but
|
| 111 |
+
# could point to other issues.
|
| 112 |
+
if msg.uuid != "":
|
| 113 |
+
tracelog.log_message_assert(msg)
|
| 114 |
+
logger.warning(
|
| 115 |
+
"No listener found for msg with uuid %s (%s)", msg.uuid, msg
|
| 116 |
+
)
|
| 117 |
+
return
|
| 118 |
+
future._set_object(msg)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_queue.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Router - handle message router (queue).
|
| 2 |
+
|
| 3 |
+
Router to manage responses from a queue.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import queue
|
| 8 |
+
from typing import TYPE_CHECKING, Optional
|
| 9 |
+
|
| 10 |
+
from ..lib import tracelog
|
| 11 |
+
from ..lib.mailbox import Mailbox
|
| 12 |
+
from .router import MessageRouter
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from queue import Queue
|
| 16 |
+
|
| 17 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class MessageQueueRouter(MessageRouter):
|
| 21 |
+
_request_queue: "Queue[pb.Record]"
|
| 22 |
+
_response_queue: "Queue[pb.Result]"
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
request_queue: "Queue[pb.Record]",
|
| 27 |
+
response_queue: "Queue[pb.Result]",
|
| 28 |
+
mailbox: Optional[Mailbox] = None,
|
| 29 |
+
) -> None:
|
| 30 |
+
self._request_queue = request_queue
|
| 31 |
+
self._response_queue = response_queue
|
| 32 |
+
super().__init__(mailbox=mailbox)
|
| 33 |
+
|
| 34 |
+
def _read_message(self) -> Optional["pb.Result"]:
|
| 35 |
+
try:
|
| 36 |
+
msg = self._response_queue.get(timeout=1)
|
| 37 |
+
except queue.Empty:
|
| 38 |
+
return None
|
| 39 |
+
tracelog.log_message_dequeue(msg, self._response_queue)
|
| 40 |
+
return msg
|
| 41 |
+
|
| 42 |
+
def _send_message(self, record: "pb.Record") -> None:
|
| 43 |
+
tracelog.log_message_queue(record, self._request_queue)
|
| 44 |
+
self._request_queue.put(record)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_relay.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Router - handle message router (relay).
|
| 2 |
+
|
| 3 |
+
Router to manage responses from a queue with relay.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import TYPE_CHECKING
|
| 8 |
+
|
| 9 |
+
from ..lib import tracelog
|
| 10 |
+
from ..lib.mailbox import Mailbox
|
| 11 |
+
from .router_queue import MessageQueueRouter
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from queue import Queue
|
| 15 |
+
|
| 16 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MessageRelayRouter(MessageQueueRouter):
|
| 20 |
+
_relay_queue: "Queue[pb.Result]"
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
request_queue: "Queue[pb.Record]",
|
| 25 |
+
response_queue: "Queue[pb.Result]",
|
| 26 |
+
relay_queue: "Queue[pb.Result]",
|
| 27 |
+
mailbox: Mailbox,
|
| 28 |
+
) -> None:
|
| 29 |
+
self._relay_queue = relay_queue
|
| 30 |
+
super().__init__(
|
| 31 |
+
request_queue=request_queue, response_queue=response_queue, mailbox=mailbox
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
def _handle_msg_rcv(self, msg: "pb.Result") -> None:
|
| 35 |
+
if msg.control.relay_id:
|
| 36 |
+
tracelog.log_message_queue(msg, self._relay_queue)
|
| 37 |
+
self._relay_queue.put(msg)
|
| 38 |
+
return
|
| 39 |
+
super()._handle_msg_rcv(msg)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_sock.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Router - handle message router (sock).
|
| 2 |
+
|
| 3 |
+
Router to manage responses from a socket client.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import TYPE_CHECKING, Optional
|
| 8 |
+
|
| 9 |
+
from ..lib.mailbox import Mailbox
|
| 10 |
+
from ..lib.sock_client import SockClient, SockClientClosedError
|
| 11 |
+
from .router import MessageRouter, MessageRouterClosedError
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from wandb.proto import wandb_internal_pb2 as pb
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MessageSockRouter(MessageRouter):
|
| 18 |
+
_sock_client: SockClient
|
| 19 |
+
_mailbox: Mailbox
|
| 20 |
+
|
| 21 |
+
def __init__(self, sock_client: SockClient, mailbox: Mailbox) -> None:
|
| 22 |
+
self._sock_client = sock_client
|
| 23 |
+
super().__init__(mailbox=mailbox)
|
| 24 |
+
|
| 25 |
+
def _read_message(self) -> Optional["pb.Result"]:
|
| 26 |
+
try:
|
| 27 |
+
resp = self._sock_client.read_server_response(timeout=1)
|
| 28 |
+
except SockClientClosedError:
|
| 29 |
+
raise MessageRouterClosedError
|
| 30 |
+
if not resp:
|
| 31 |
+
return None
|
| 32 |
+
msg = resp.result_communicate
|
| 33 |
+
return msg
|
| 34 |
+
|
| 35 |
+
def _send_message(self, record: "pb.Record") -> None:
|
| 36 |
+
self._sock_client.send_record_communicate(record)
|
parrot/lib/python3.10/site-packages/wandb/sdk/interface/summary_record.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Summary Record.
|
| 2 |
+
|
| 3 |
+
This module implements a summary record as an intermediate format before being converted
|
| 4 |
+
to a protocol buffer.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import typing as t
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class SummaryRecord:
|
| 11 |
+
"""Encodes a diff -- analogous to the SummaryRecord protobuf message."""
|
| 12 |
+
|
| 13 |
+
update: t.List["SummaryItem"]
|
| 14 |
+
remove: t.List["SummaryItem"]
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.update = []
|
| 18 |
+
self.remove = []
|
| 19 |
+
|
| 20 |
+
def __str__(self):
|
| 21 |
+
s = "SummaryRecord:\n Update:\n "
|
| 22 |
+
s += "\n ".join([str(item) for item in self.update])
|
| 23 |
+
s += "\n Remove:\n "
|
| 24 |
+
s += "\n ".join([str(item) for item in self.remove])
|
| 25 |
+
s += "\n"
|
| 26 |
+
return s
|
| 27 |
+
|
| 28 |
+
__repr__ = __str__
|
| 29 |
+
|
| 30 |
+
def _add_next_parent(self, parent_key):
|
| 31 |
+
with_next_parent = SummaryRecord()
|
| 32 |
+
with_next_parent.update = [
|
| 33 |
+
item._add_next_parent(parent_key) for item in self.update
|
| 34 |
+
]
|
| 35 |
+
with_next_parent.remove = [
|
| 36 |
+
item._add_next_parent(parent_key) for item in self.remove
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
return with_next_parent
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class SummaryItem:
|
| 43 |
+
"""Analogous to the SummaryItem protobuf message."""
|
| 44 |
+
|
| 45 |
+
key: t.Tuple[str]
|
| 46 |
+
value: t.Any
|
| 47 |
+
|
| 48 |
+
def __init__(self):
|
| 49 |
+
self.key = tuple()
|
| 50 |
+
self.value = None
|
| 51 |
+
|
| 52 |
+
def __str__(self):
|
| 53 |
+
return "SummaryItem: key: " + str(self.key) + " value: " + str(self.value)
|
| 54 |
+
|
| 55 |
+
__repr__ = __str__
|
| 56 |
+
|
| 57 |
+
def _add_next_parent(self, parent_key):
|
| 58 |
+
with_next_parent = SummaryItem()
|
| 59 |
+
|
| 60 |
+
key = self.key
|
| 61 |
+
if not isinstance(key, tuple):
|
| 62 |
+
key = (key,)
|
| 63 |
+
|
| 64 |
+
with_next_parent.key = (parent_key,) + self.key
|
| 65 |
+
with_next_parent.value = self.value
|
| 66 |
+
|
| 67 |
+
return with_next_parent
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/datastore.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""leveldb log datastore.
|
| 2 |
+
|
| 3 |
+
Format is described at:
|
| 4 |
+
https://github.com/google/leveldb/blob/master/doc/log_format.md
|
| 5 |
+
|
| 6 |
+
block := record* trailer?
|
| 7 |
+
record :=
|
| 8 |
+
checksum: uint32 // crc32c of type and data[] ; little-endian
|
| 9 |
+
length: uint16 // little-endian
|
| 10 |
+
type: uint8 // One of FULL, FIRST, MIDDLE, LAST
|
| 11 |
+
data: uint8[length]
|
| 12 |
+
|
| 13 |
+
header :=
|
| 14 |
+
ident: char[4]
|
| 15 |
+
magic: uint16
|
| 16 |
+
version: uint8
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
# TODO: possibly restructure code by porting the C++ or go implementation
|
| 20 |
+
|
| 21 |
+
import logging
|
| 22 |
+
import os
|
| 23 |
+
import struct
|
| 24 |
+
import zlib
|
| 25 |
+
from typing import TYPE_CHECKING, Optional, Tuple
|
| 26 |
+
|
| 27 |
+
import wandb
|
| 28 |
+
|
| 29 |
+
if TYPE_CHECKING:
|
| 30 |
+
from typing import IO, Any
|
| 31 |
+
|
| 32 |
+
from wandb.proto.wandb_internal_pb2 import Record
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
LEVELDBLOG_HEADER_LEN = 7
|
| 37 |
+
LEVELDBLOG_BLOCK_LEN = 32768
|
| 38 |
+
LEVELDBLOG_DATA_LEN = LEVELDBLOG_BLOCK_LEN - LEVELDBLOG_HEADER_LEN
|
| 39 |
+
|
| 40 |
+
LEVELDBLOG_FULL = 1
|
| 41 |
+
LEVELDBLOG_FIRST = 2
|
| 42 |
+
LEVELDBLOG_MIDDLE = 3
|
| 43 |
+
LEVELDBLOG_LAST = 4
|
| 44 |
+
|
| 45 |
+
LEVELDBLOG_HEADER_IDENT = ":W&B"
|
| 46 |
+
LEVELDBLOG_HEADER_MAGIC = (
|
| 47 |
+
0xBEE1 # zlib.crc32(bytes("Weights & Biases", 'iso8859-1')) & 0xffff
|
| 48 |
+
)
|
| 49 |
+
LEVELDBLOG_HEADER_VERSION = 0
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
bytes("", "ascii")
|
| 53 |
+
|
| 54 |
+
def strtobytes(x):
|
| 55 |
+
"""Strtobytes."""
|
| 56 |
+
return bytes(x, "iso8859-1")
|
| 57 |
+
|
| 58 |
+
# def bytestostr(x):
|
| 59 |
+
# return str(x, 'iso8859-1')
|
| 60 |
+
|
| 61 |
+
except Exception:
|
| 62 |
+
strtobytes = str
|
| 63 |
+
# bytestostr = str
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class DataStore:
|
| 67 |
+
_index: int
|
| 68 |
+
_flush_offset: int
|
| 69 |
+
|
| 70 |
+
def __init__(self) -> None:
|
| 71 |
+
self._opened_for_scan = False
|
| 72 |
+
self._fp: Optional[IO[Any]] = None
|
| 73 |
+
self._index = 0
|
| 74 |
+
self._flush_offset = 0
|
| 75 |
+
self._size_bytes = 0
|
| 76 |
+
|
| 77 |
+
self._crc = [0] * (LEVELDBLOG_LAST + 1)
|
| 78 |
+
for x in range(1, LEVELDBLOG_LAST + 1):
|
| 79 |
+
self._crc[x] = zlib.crc32(strtobytes(chr(x))) & 0xFFFFFFFF
|
| 80 |
+
|
| 81 |
+
assert (
|
| 82 |
+
wandb._assert_is_internal_process # type: ignore
|
| 83 |
+
), "DataStore can only be used in the internal process"
|
| 84 |
+
|
| 85 |
+
def open_for_write(self, fname: str) -> None:
|
| 86 |
+
self._fname = fname
|
| 87 |
+
logger.info("open: %s", fname)
|
| 88 |
+
open_flags = "xb"
|
| 89 |
+
self._fp = open(fname, open_flags)
|
| 90 |
+
self._write_header()
|
| 91 |
+
|
| 92 |
+
def open_for_append(self, fname):
|
| 93 |
+
# TODO: implement
|
| 94 |
+
self._fname = fname
|
| 95 |
+
logger.info("open: %s", fname)
|
| 96 |
+
self._fp = open(fname, "wb")
|
| 97 |
+
# do something with _index
|
| 98 |
+
|
| 99 |
+
def open_for_scan(self, fname):
|
| 100 |
+
self._fname = fname
|
| 101 |
+
logger.info("open for scan: %s", fname)
|
| 102 |
+
self._fp = open(fname, "r+b")
|
| 103 |
+
self._index = 0
|
| 104 |
+
self._size_bytes = os.stat(fname).st_size
|
| 105 |
+
self._opened_for_scan = True
|
| 106 |
+
self._read_header()
|
| 107 |
+
|
| 108 |
+
def seek(self, offset: int) -> None:
|
| 109 |
+
self._fp.seek(offset) # type: ignore
|
| 110 |
+
self._index = offset
|
| 111 |
+
|
| 112 |
+
def get_offset(self) -> int:
|
| 113 |
+
offset = self._fp.tell() # type: ignore
|
| 114 |
+
return offset
|
| 115 |
+
|
| 116 |
+
def in_last_block(self):
|
| 117 |
+
"""Determine if we're in the last block to handle in-progress writes."""
|
| 118 |
+
return self._index > self._size_bytes - LEVELDBLOG_DATA_LEN
|
| 119 |
+
|
| 120 |
+
def scan_record(self):
|
| 121 |
+
assert self._opened_for_scan, "file not open for scanning"
|
| 122 |
+
# TODO(jhr): handle some assertions as file corruption issues
|
| 123 |
+
# assume we have enough room to read header, checked by caller?
|
| 124 |
+
header = self._fp.read(LEVELDBLOG_HEADER_LEN)
|
| 125 |
+
if len(header) == 0:
|
| 126 |
+
return None
|
| 127 |
+
assert (
|
| 128 |
+
len(header) == LEVELDBLOG_HEADER_LEN
|
| 129 |
+
), "record header is {} bytes instead of the expected {}".format(
|
| 130 |
+
len(header), LEVELDBLOG_HEADER_LEN
|
| 131 |
+
)
|
| 132 |
+
fields = struct.unpack("<IHB", header)
|
| 133 |
+
checksum, dlength, dtype = fields
|
| 134 |
+
# check len, better fit in the block
|
| 135 |
+
self._index += LEVELDBLOG_HEADER_LEN
|
| 136 |
+
data = self._fp.read(dlength)
|
| 137 |
+
checksum_computed = zlib.crc32(data, self._crc[dtype]) & 0xFFFFFFFF
|
| 138 |
+
assert (
|
| 139 |
+
checksum == checksum_computed
|
| 140 |
+
), "record checksum is invalid, data may be corrupt"
|
| 141 |
+
self._index += dlength
|
| 142 |
+
return dtype, data
|
| 143 |
+
|
| 144 |
+
def scan_data(self):
|
| 145 |
+
# TODO(jhr): handle some assertions as file corruption issues
|
| 146 |
+
# how much left in the block. if less than header len, read as pad,
|
| 147 |
+
offset = self._index % LEVELDBLOG_BLOCK_LEN
|
| 148 |
+
space_left = LEVELDBLOG_BLOCK_LEN - offset
|
| 149 |
+
if space_left < LEVELDBLOG_HEADER_LEN:
|
| 150 |
+
pad_check = strtobytes("\x00" * space_left)
|
| 151 |
+
pad = self._fp.read(space_left)
|
| 152 |
+
# verify they are zero
|
| 153 |
+
assert pad == pad_check, "invalid padding"
|
| 154 |
+
self._index += space_left
|
| 155 |
+
|
| 156 |
+
record = self.scan_record()
|
| 157 |
+
if record is None: # eof
|
| 158 |
+
return None
|
| 159 |
+
dtype, data = record
|
| 160 |
+
if dtype == LEVELDBLOG_FULL:
|
| 161 |
+
return data
|
| 162 |
+
|
| 163 |
+
assert (
|
| 164 |
+
dtype == LEVELDBLOG_FIRST
|
| 165 |
+
), f"expected record to be type {LEVELDBLOG_FIRST} but found {dtype}"
|
| 166 |
+
while True:
|
| 167 |
+
offset = self._index % LEVELDBLOG_BLOCK_LEN
|
| 168 |
+
record = self.scan_record()
|
| 169 |
+
if record is None: # eof
|
| 170 |
+
return None
|
| 171 |
+
dtype, new_data = record
|
| 172 |
+
if dtype == LEVELDBLOG_LAST:
|
| 173 |
+
data += new_data
|
| 174 |
+
break
|
| 175 |
+
assert (
|
| 176 |
+
dtype == LEVELDBLOG_MIDDLE
|
| 177 |
+
), f"expected record to be type {LEVELDBLOG_MIDDLE} but found {dtype}"
|
| 178 |
+
data += new_data
|
| 179 |
+
return data
|
| 180 |
+
|
| 181 |
+
def _write_header(self):
|
| 182 |
+
data = struct.pack(
|
| 183 |
+
"<4sHB",
|
| 184 |
+
strtobytes(LEVELDBLOG_HEADER_IDENT),
|
| 185 |
+
LEVELDBLOG_HEADER_MAGIC,
|
| 186 |
+
LEVELDBLOG_HEADER_VERSION,
|
| 187 |
+
)
|
| 188 |
+
assert (
|
| 189 |
+
len(data) == LEVELDBLOG_HEADER_LEN
|
| 190 |
+
), f"header size is {len(data)} bytes, expected {LEVELDBLOG_HEADER_LEN}"
|
| 191 |
+
self._fp.write(data)
|
| 192 |
+
self._index += len(data)
|
| 193 |
+
|
| 194 |
+
def _read_header(self):
|
| 195 |
+
header = self._fp.read(LEVELDBLOG_HEADER_LEN)
|
| 196 |
+
assert (
|
| 197 |
+
len(header) == LEVELDBLOG_HEADER_LEN
|
| 198 |
+
), "header is {} bytes instead of the expected {}".format(
|
| 199 |
+
len(header), LEVELDBLOG_HEADER_LEN
|
| 200 |
+
)
|
| 201 |
+
ident, magic, version = struct.unpack("<4sHB", header)
|
| 202 |
+
if ident != strtobytes(LEVELDBLOG_HEADER_IDENT):
|
| 203 |
+
raise Exception("Invalid header")
|
| 204 |
+
if magic != LEVELDBLOG_HEADER_MAGIC:
|
| 205 |
+
raise Exception("Invalid header")
|
| 206 |
+
if version != LEVELDBLOG_HEADER_VERSION:
|
| 207 |
+
raise Exception("Invalid header")
|
| 208 |
+
self._index += len(header)
|
| 209 |
+
|
| 210 |
+
def _write_record(self, s, dtype=None):
|
| 211 |
+
"""Write record that must fit into a block."""
|
| 212 |
+
# double check that there is enough space
|
| 213 |
+
# (this is a precondition to calling this method)
|
| 214 |
+
assert len(s) + LEVELDBLOG_HEADER_LEN <= (
|
| 215 |
+
LEVELDBLOG_BLOCK_LEN - self._index % LEVELDBLOG_BLOCK_LEN
|
| 216 |
+
), "not enough space to write new records"
|
| 217 |
+
|
| 218 |
+
dlength = len(s)
|
| 219 |
+
dtype = dtype or LEVELDBLOG_FULL
|
| 220 |
+
# print("record: length={} type={}".format(dlength, dtype))
|
| 221 |
+
checksum = zlib.crc32(s, self._crc[dtype]) & 0xFFFFFFFF
|
| 222 |
+
# logger.info("write_record: index=%d len=%d dtype=%d",
|
| 223 |
+
# self._index, dlength, dtype)
|
| 224 |
+
self._fp.write(struct.pack("<IHB", checksum, dlength, dtype))
|
| 225 |
+
if dlength:
|
| 226 |
+
self._fp.write(s)
|
| 227 |
+
self._index += LEVELDBLOG_HEADER_LEN + len(s)
|
| 228 |
+
|
| 229 |
+
def _write_data(self, s):
|
| 230 |
+
start_offset = self._index
|
| 231 |
+
|
| 232 |
+
offset = self._index % LEVELDBLOG_BLOCK_LEN
|
| 233 |
+
space_left = LEVELDBLOG_BLOCK_LEN - offset
|
| 234 |
+
data_used = 0
|
| 235 |
+
data_left = len(s)
|
| 236 |
+
# logger.info("write_data: index=%d offset=%d len=%d",
|
| 237 |
+
# self._index, offset, data_left)
|
| 238 |
+
if space_left < LEVELDBLOG_HEADER_LEN:
|
| 239 |
+
pad = "\x00" * space_left
|
| 240 |
+
self._fp.write(strtobytes(pad))
|
| 241 |
+
self._index += space_left
|
| 242 |
+
offset = 0
|
| 243 |
+
space_left = LEVELDBLOG_BLOCK_LEN
|
| 244 |
+
|
| 245 |
+
# does it fit in first (possibly partial) block?
|
| 246 |
+
if data_left + LEVELDBLOG_HEADER_LEN <= space_left:
|
| 247 |
+
self._write_record(s)
|
| 248 |
+
else:
|
| 249 |
+
# write first record (we could still be in the middle of a block,
|
| 250 |
+
# but this write will end on a block boundary)
|
| 251 |
+
data_room = space_left - LEVELDBLOG_HEADER_LEN
|
| 252 |
+
self._write_record(s[:data_room], LEVELDBLOG_FIRST)
|
| 253 |
+
data_used += data_room
|
| 254 |
+
data_left -= data_room
|
| 255 |
+
assert data_left, "data_left should be non-zero"
|
| 256 |
+
|
| 257 |
+
# write middles (if any)
|
| 258 |
+
while data_left > LEVELDBLOG_DATA_LEN:
|
| 259 |
+
self._write_record(
|
| 260 |
+
s[data_used : data_used + LEVELDBLOG_DATA_LEN],
|
| 261 |
+
LEVELDBLOG_MIDDLE,
|
| 262 |
+
)
|
| 263 |
+
data_used += LEVELDBLOG_DATA_LEN
|
| 264 |
+
data_left -= LEVELDBLOG_DATA_LEN
|
| 265 |
+
|
| 266 |
+
# write last and flush the entire block to disk
|
| 267 |
+
self._write_record(s[data_used:], LEVELDBLOG_LAST)
|
| 268 |
+
self._fp.flush()
|
| 269 |
+
os.fsync(self._fp.fileno())
|
| 270 |
+
self._flush_offset = self._index
|
| 271 |
+
|
| 272 |
+
return start_offset, self._index, self._flush_offset
|
| 273 |
+
|
| 274 |
+
def ensure_flushed(self, off: int) -> None:
|
| 275 |
+
self._fp.flush() # type: ignore
|
| 276 |
+
|
| 277 |
+
def write(self, obj: "Record") -> Tuple[int, int, int]:
|
| 278 |
+
"""Write a protocol buffer.
|
| 279 |
+
|
| 280 |
+
Arguments:
|
| 281 |
+
obj: Protocol buffer to write.
|
| 282 |
+
|
| 283 |
+
Returns:
|
| 284 |
+
(start_offset, end_offset, flush_offset) if successful,
|
| 285 |
+
None otherwise
|
| 286 |
+
|
| 287 |
+
"""
|
| 288 |
+
raw_size = obj.ByteSize()
|
| 289 |
+
s = obj.SerializeToString()
|
| 290 |
+
assert len(s) == raw_size, "invalid serialization"
|
| 291 |
+
ret = self._write_data(s)
|
| 292 |
+
return ret
|
| 293 |
+
|
| 294 |
+
def close(self) -> None:
|
| 295 |
+
if self._fp is not None:
|
| 296 |
+
logger.info("close: %s", self._fname)
|
| 297 |
+
self._fp.close()
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/handler.py
ADDED
|
@@ -0,0 +1,911 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Handle Manager."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import math
|
| 6 |
+
import numbers
|
| 7 |
+
import time
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
from queue import Queue
|
| 10 |
+
from threading import Event
|
| 11 |
+
from typing import (
|
| 12 |
+
TYPE_CHECKING,
|
| 13 |
+
Any,
|
| 14 |
+
Callable,
|
| 15 |
+
Dict,
|
| 16 |
+
Iterable,
|
| 17 |
+
List,
|
| 18 |
+
Optional,
|
| 19 |
+
Sequence,
|
| 20 |
+
Tuple,
|
| 21 |
+
cast,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
from wandb.proto.wandb_internal_pb2 import (
|
| 25 |
+
HistoryRecord,
|
| 26 |
+
InternalMessages,
|
| 27 |
+
MetricRecord,
|
| 28 |
+
Record,
|
| 29 |
+
Result,
|
| 30 |
+
RunRecord,
|
| 31 |
+
SampledHistoryItem,
|
| 32 |
+
SummaryItem,
|
| 33 |
+
SummaryRecord,
|
| 34 |
+
SummaryRecordRequest,
|
| 35 |
+
SystemMetricSample,
|
| 36 |
+
SystemMetricsBuffer,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
from ..interface.interface_queue import InterfaceQueue
|
| 40 |
+
from ..lib import handler_util, proto_util, tracelog, wburls
|
| 41 |
+
from . import context, sample, tb_watcher
|
| 42 |
+
from .settings_static import SettingsStatic
|
| 43 |
+
from .system.system_monitor import SystemMonitor
|
| 44 |
+
|
| 45 |
+
if TYPE_CHECKING:
|
| 46 |
+
from wandb.proto.wandb_internal_pb2 import MetricSummary
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
SummaryDict = Dict[str, Any]
|
| 50 |
+
|
| 51 |
+
logger = logging.getLogger(__name__)
|
| 52 |
+
|
| 53 |
+
# Update (March 5, 2024): Since ~2020/2021, when constructing the summary
|
| 54 |
+
# object, we had replaced the artifact path for media types with the latest
|
| 55 |
+
# artifact path. The primary purpose of this was to support live updating of
|
| 56 |
+
# media objects in the UI (since the default artifact path was fully qualified
|
| 57 |
+
# and would not update). However, in March of 2024, a bug was discovered with
|
| 58 |
+
# this approach which causes this path to be incorrect in cases where the media
|
| 59 |
+
# object is logged to another artifact before being logged to the run. Setting
|
| 60 |
+
# this to `False` disables this copy behavior. The impact is that users will
|
| 61 |
+
# need to refresh to see updates. Ironically, this updating behavior is not
|
| 62 |
+
# currently supported in the UI, so the impact of this change is minimal.
|
| 63 |
+
REPLACE_SUMMARY_ART_PATH_WITH_LATEST = False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _dict_nested_set(target: Dict[str, Any], key_list: Sequence[str], v: Any) -> None:
|
| 67 |
+
# recurse down the dictionary structure:
|
| 68 |
+
|
| 69 |
+
for k in key_list[:-1]:
|
| 70 |
+
target.setdefault(k, {})
|
| 71 |
+
new_target = target.get(k)
|
| 72 |
+
if TYPE_CHECKING:
|
| 73 |
+
new_target = cast(Dict[str, Any], new_target)
|
| 74 |
+
target = new_target
|
| 75 |
+
# use the last element of the key to write the leaf:
|
| 76 |
+
target[key_list[-1]] = v
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class HandleManager:
|
| 80 |
+
_consolidated_summary: SummaryDict
|
| 81 |
+
_sampled_history: Dict[str, sample.UniformSampleAccumulator]
|
| 82 |
+
_partial_history: Dict[str, Any]
|
| 83 |
+
_run_proto: Optional[RunRecord]
|
| 84 |
+
_settings: SettingsStatic
|
| 85 |
+
_record_q: "Queue[Record]"
|
| 86 |
+
_result_q: "Queue[Result]"
|
| 87 |
+
_stopped: Event
|
| 88 |
+
_writer_q: "Queue[Record]"
|
| 89 |
+
_interface: InterfaceQueue
|
| 90 |
+
_system_monitor: Optional[SystemMonitor]
|
| 91 |
+
_tb_watcher: Optional[tb_watcher.TBWatcher]
|
| 92 |
+
_metric_defines: Dict[str, MetricRecord]
|
| 93 |
+
_metric_globs: Dict[str, MetricRecord]
|
| 94 |
+
_metric_track: Dict[Tuple[str, ...], float]
|
| 95 |
+
_metric_copy: Dict[Tuple[str, ...], Any]
|
| 96 |
+
_track_time: Optional[float]
|
| 97 |
+
_accumulate_time: float
|
| 98 |
+
_run_start_time: Optional[float]
|
| 99 |
+
_context_keeper: context.ContextKeeper
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
settings: SettingsStatic,
|
| 104 |
+
record_q: "Queue[Record]",
|
| 105 |
+
result_q: "Queue[Result]",
|
| 106 |
+
stopped: Event,
|
| 107 |
+
writer_q: "Queue[Record]",
|
| 108 |
+
interface: InterfaceQueue,
|
| 109 |
+
context_keeper: context.ContextKeeper,
|
| 110 |
+
) -> None:
|
| 111 |
+
self._settings = settings
|
| 112 |
+
self._record_q = record_q
|
| 113 |
+
self._result_q = result_q
|
| 114 |
+
self._stopped = stopped
|
| 115 |
+
self._writer_q = writer_q
|
| 116 |
+
self._interface = interface
|
| 117 |
+
self._context_keeper = context_keeper
|
| 118 |
+
|
| 119 |
+
self._tb_watcher = None
|
| 120 |
+
self._system_monitor = None
|
| 121 |
+
self._step = 0
|
| 122 |
+
|
| 123 |
+
self._track_time = None
|
| 124 |
+
self._accumulate_time = 0
|
| 125 |
+
self._run_start_time = None
|
| 126 |
+
|
| 127 |
+
# keep track of summary from key/val updates
|
| 128 |
+
self._consolidated_summary = dict()
|
| 129 |
+
self._sampled_history = defaultdict(sample.UniformSampleAccumulator)
|
| 130 |
+
self._run_proto = None
|
| 131 |
+
self._partial_history = dict()
|
| 132 |
+
self._metric_defines = defaultdict(MetricRecord)
|
| 133 |
+
self._metric_globs = defaultdict(MetricRecord)
|
| 134 |
+
self._metric_track = dict()
|
| 135 |
+
self._metric_copy = dict()
|
| 136 |
+
self._internal_messages = InternalMessages()
|
| 137 |
+
|
| 138 |
+
self._dropped_history = False
|
| 139 |
+
|
| 140 |
+
def __len__(self) -> int:
|
| 141 |
+
return self._record_q.qsize()
|
| 142 |
+
|
| 143 |
+
def handle(self, record: Record) -> None:
|
| 144 |
+
self._context_keeper.add_from_record(record)
|
| 145 |
+
record_type = record.WhichOneof("record_type")
|
| 146 |
+
assert record_type
|
| 147 |
+
handler_str = "handle_" + record_type
|
| 148 |
+
handler: Callable[[Record], None] = getattr(self, handler_str, None) # type: ignore
|
| 149 |
+
assert handler, f"unknown handle: {handler_str}" # type: ignore
|
| 150 |
+
handler(record)
|
| 151 |
+
|
| 152 |
+
def handle_request(self, record: Record) -> None:
|
| 153 |
+
request_type = record.request.WhichOneof("request_type")
|
| 154 |
+
assert request_type
|
| 155 |
+
handler_str = "handle_request_" + request_type
|
| 156 |
+
handler: Callable[[Record], None] = getattr(self, handler_str, None) # type: ignore
|
| 157 |
+
if request_type != "network_status":
|
| 158 |
+
logger.debug(f"handle_request: {request_type}")
|
| 159 |
+
assert handler, f"unknown handle: {handler_str}" # type: ignore
|
| 160 |
+
handler(record)
|
| 161 |
+
|
| 162 |
+
def _dispatch_record(self, record: Record, always_send: bool = False) -> None:
|
| 163 |
+
if always_send:
|
| 164 |
+
record.control.always_send = True
|
| 165 |
+
tracelog.log_message_queue(record, self._writer_q)
|
| 166 |
+
self._writer_q.put(record)
|
| 167 |
+
|
| 168 |
+
def _respond_result(self, result: Result) -> None:
|
| 169 |
+
tracelog.log_message_queue(result, self._result_q)
|
| 170 |
+
context_id = context.context_id_from_result(result)
|
| 171 |
+
self._context_keeper.release(context_id)
|
| 172 |
+
self._result_q.put(result)
|
| 173 |
+
|
| 174 |
+
def debounce(self) -> None:
|
| 175 |
+
pass
|
| 176 |
+
|
| 177 |
+
def handle_request_cancel(self, record: Record) -> None:
|
| 178 |
+
self._dispatch_record(record)
|
| 179 |
+
|
| 180 |
+
def handle_request_defer(self, record: Record) -> None:
|
| 181 |
+
defer = record.request.defer
|
| 182 |
+
state = defer.state
|
| 183 |
+
|
| 184 |
+
logger.info(f"handle defer: {state}")
|
| 185 |
+
# only handle flush tb (sender handles the rest)
|
| 186 |
+
if state == defer.FLUSH_STATS:
|
| 187 |
+
# TODO(jhr): this could block so we dont really want to call shutdown
|
| 188 |
+
# from handler thread
|
| 189 |
+
if self._system_monitor is not None:
|
| 190 |
+
self._system_monitor.finish()
|
| 191 |
+
elif state == defer.FLUSH_TB:
|
| 192 |
+
if self._tb_watcher:
|
| 193 |
+
# shutdown tensorboard workers so we get all metrics flushed
|
| 194 |
+
self._tb_watcher.finish()
|
| 195 |
+
self._tb_watcher = None
|
| 196 |
+
elif state == defer.FLUSH_PARTIAL_HISTORY:
|
| 197 |
+
self._flush_partial_history()
|
| 198 |
+
elif state == defer.FLUSH_SUM:
|
| 199 |
+
self._save_summary(self._consolidated_summary, flush=True)
|
| 200 |
+
|
| 201 |
+
# defer is used to drive the sender finish state machine
|
| 202 |
+
self._dispatch_record(record, always_send=True)
|
| 203 |
+
|
| 204 |
+
def handle_request_login(self, record: Record) -> None:
|
| 205 |
+
self._dispatch_record(record)
|
| 206 |
+
|
| 207 |
+
def handle_request_python_packages(self, record: Record) -> None:
|
| 208 |
+
self._dispatch_record(record)
|
| 209 |
+
|
| 210 |
+
def handle_run(self, record: Record) -> None:
|
| 211 |
+
if self._settings._offline:
|
| 212 |
+
self._run_proto = record.run
|
| 213 |
+
result = proto_util._result_from_record(record)
|
| 214 |
+
result.run_result.run.CopyFrom(record.run)
|
| 215 |
+
self._respond_result(result)
|
| 216 |
+
self._dispatch_record(record)
|
| 217 |
+
|
| 218 |
+
def handle_stats(self, record: Record) -> None:
|
| 219 |
+
self._dispatch_record(record)
|
| 220 |
+
|
| 221 |
+
def handle_config(self, record: Record) -> None:
|
| 222 |
+
self._dispatch_record(record)
|
| 223 |
+
|
| 224 |
+
def handle_output(self, record: Record) -> None:
|
| 225 |
+
self._dispatch_record(record)
|
| 226 |
+
|
| 227 |
+
def handle_output_raw(self, record: Record) -> None:
|
| 228 |
+
self._dispatch_record(record)
|
| 229 |
+
|
| 230 |
+
def handle_files(self, record: Record) -> None:
|
| 231 |
+
self._dispatch_record(record)
|
| 232 |
+
|
| 233 |
+
def handle_request_link_artifact(self, record: Record) -> None:
|
| 234 |
+
self._dispatch_record(record)
|
| 235 |
+
|
| 236 |
+
def handle_use_artifact(self, record: Record) -> None:
|
| 237 |
+
self._dispatch_record(record)
|
| 238 |
+
|
| 239 |
+
def handle_artifact(self, record: Record) -> None:
|
| 240 |
+
self._dispatch_record(record)
|
| 241 |
+
|
| 242 |
+
def handle_alert(self, record: Record) -> None:
|
| 243 |
+
self._dispatch_record(record)
|
| 244 |
+
|
| 245 |
+
def _save_summary(self, summary_dict: SummaryDict, flush: bool = False) -> None:
|
| 246 |
+
summary = SummaryRecord()
|
| 247 |
+
for k, v in summary_dict.items():
|
| 248 |
+
update = summary.update.add()
|
| 249 |
+
update.key = k
|
| 250 |
+
update.value_json = json.dumps(v)
|
| 251 |
+
if flush:
|
| 252 |
+
record = Record(summary=summary)
|
| 253 |
+
self._dispatch_record(record)
|
| 254 |
+
elif not self._settings._offline:
|
| 255 |
+
# Send this summary update as a request since we aren't persisting every update
|
| 256 |
+
summary_record = SummaryRecordRequest(summary=summary)
|
| 257 |
+
request_record = self._interface._make_request(
|
| 258 |
+
summary_record=summary_record
|
| 259 |
+
)
|
| 260 |
+
self._dispatch_record(request_record)
|
| 261 |
+
|
| 262 |
+
def _save_history(
|
| 263 |
+
self,
|
| 264 |
+
history: HistoryRecord,
|
| 265 |
+
) -> None:
|
| 266 |
+
for item in history.item:
|
| 267 |
+
# TODO(jhr) save nested keys?
|
| 268 |
+
k = item.key
|
| 269 |
+
v = json.loads(item.value_json)
|
| 270 |
+
if isinstance(v, numbers.Real):
|
| 271 |
+
self._sampled_history[k].add(v)
|
| 272 |
+
|
| 273 |
+
def _update_summary_metrics(
|
| 274 |
+
self,
|
| 275 |
+
s: "MetricSummary",
|
| 276 |
+
kl: List[str],
|
| 277 |
+
v: "numbers.Real",
|
| 278 |
+
float_v: float,
|
| 279 |
+
goal_max: Optional[bool],
|
| 280 |
+
) -> bool:
|
| 281 |
+
updated = False
|
| 282 |
+
best_key: Optional[Tuple[str, ...]] = None
|
| 283 |
+
if s.none:
|
| 284 |
+
return False
|
| 285 |
+
if s.copy:
|
| 286 |
+
# non-key list copy already done in _update_summary
|
| 287 |
+
if len(kl) > 1:
|
| 288 |
+
_dict_nested_set(self._consolidated_summary, kl, v)
|
| 289 |
+
return True
|
| 290 |
+
if s.last:
|
| 291 |
+
last_key = tuple(kl + ["last"])
|
| 292 |
+
old_last = self._metric_track.get(last_key)
|
| 293 |
+
if old_last is None or float_v != old_last:
|
| 294 |
+
self._metric_track[last_key] = float_v
|
| 295 |
+
_dict_nested_set(self._consolidated_summary, last_key, v)
|
| 296 |
+
updated = True
|
| 297 |
+
if s.best:
|
| 298 |
+
best_key = tuple(kl + ["best"])
|
| 299 |
+
if s.max or best_key and goal_max:
|
| 300 |
+
max_key = tuple(kl + ["max"])
|
| 301 |
+
old_max = self._metric_track.get(max_key)
|
| 302 |
+
if old_max is None or float_v > old_max:
|
| 303 |
+
self._metric_track[max_key] = float_v
|
| 304 |
+
if s.max:
|
| 305 |
+
_dict_nested_set(self._consolidated_summary, max_key, v)
|
| 306 |
+
updated = True
|
| 307 |
+
if best_key:
|
| 308 |
+
_dict_nested_set(self._consolidated_summary, best_key, v)
|
| 309 |
+
updated = True
|
| 310 |
+
# defaulting to minimize if goal is not specified
|
| 311 |
+
if s.min or best_key and not goal_max:
|
| 312 |
+
min_key = tuple(kl + ["min"])
|
| 313 |
+
old_min = self._metric_track.get(min_key)
|
| 314 |
+
if old_min is None or float_v < old_min:
|
| 315 |
+
self._metric_track[min_key] = float_v
|
| 316 |
+
if s.min:
|
| 317 |
+
_dict_nested_set(self._consolidated_summary, min_key, v)
|
| 318 |
+
updated = True
|
| 319 |
+
if best_key:
|
| 320 |
+
_dict_nested_set(self._consolidated_summary, best_key, v)
|
| 321 |
+
updated = True
|
| 322 |
+
if s.mean:
|
| 323 |
+
tot_key = tuple(kl + ["tot"])
|
| 324 |
+
num_key = tuple(kl + ["num"])
|
| 325 |
+
avg_key = tuple(kl + ["mean"])
|
| 326 |
+
tot = self._metric_track.get(tot_key, 0.0)
|
| 327 |
+
num = self._metric_track.get(num_key, 0)
|
| 328 |
+
tot += float_v
|
| 329 |
+
num += 1
|
| 330 |
+
self._metric_track[tot_key] = tot
|
| 331 |
+
self._metric_track[num_key] = num
|
| 332 |
+
_dict_nested_set(self._consolidated_summary, avg_key, tot / num)
|
| 333 |
+
updated = True
|
| 334 |
+
return updated
|
| 335 |
+
|
| 336 |
+
def _update_summary_leaf(
|
| 337 |
+
self,
|
| 338 |
+
kl: List[str],
|
| 339 |
+
v: Any,
|
| 340 |
+
d: Optional[MetricRecord] = None,
|
| 341 |
+
) -> bool:
|
| 342 |
+
has_summary = d and d.HasField("summary")
|
| 343 |
+
if len(kl) == 1:
|
| 344 |
+
copy_key = tuple(kl)
|
| 345 |
+
old_copy = self._metric_copy.get(copy_key)
|
| 346 |
+
if old_copy is None or v != old_copy:
|
| 347 |
+
self._metric_copy[copy_key] = v
|
| 348 |
+
# Store copy metric if not specified, or copy behavior
|
| 349 |
+
if not has_summary or (d and d.summary.copy):
|
| 350 |
+
self._consolidated_summary[kl[0]] = v
|
| 351 |
+
return True
|
| 352 |
+
if not d:
|
| 353 |
+
return False
|
| 354 |
+
if not has_summary:
|
| 355 |
+
return False
|
| 356 |
+
if not isinstance(v, numbers.Real):
|
| 357 |
+
return False
|
| 358 |
+
if math.isnan(v):
|
| 359 |
+
return False
|
| 360 |
+
float_v = float(v)
|
| 361 |
+
goal_max = None
|
| 362 |
+
if d.goal:
|
| 363 |
+
goal_max = d.goal == d.GOAL_MAXIMIZE
|
| 364 |
+
if self._update_summary_metrics(
|
| 365 |
+
d.summary, kl=kl, v=v, float_v=float_v, goal_max=goal_max
|
| 366 |
+
):
|
| 367 |
+
return True
|
| 368 |
+
return False
|
| 369 |
+
|
| 370 |
+
def _update_summary_list(
|
| 371 |
+
self,
|
| 372 |
+
kl: List[str],
|
| 373 |
+
v: Any,
|
| 374 |
+
d: Optional[MetricRecord] = None,
|
| 375 |
+
) -> bool:
|
| 376 |
+
metric_key = ".".join([k.replace(".", "\\.") for k in kl])
|
| 377 |
+
d = self._metric_defines.get(metric_key, d)
|
| 378 |
+
# if the dict has _type key, it's a wandb table object
|
| 379 |
+
if isinstance(v, dict) and not handler_util.metric_is_wandb_dict(v):
|
| 380 |
+
updated = False
|
| 381 |
+
for nk, nv in v.items():
|
| 382 |
+
if self._update_summary_list(kl=kl[:] + [nk], v=nv, d=d):
|
| 383 |
+
updated = True
|
| 384 |
+
return updated
|
| 385 |
+
# If the dict is a media object, update the pointer to the latest alias
|
| 386 |
+
elif (
|
| 387 |
+
REPLACE_SUMMARY_ART_PATH_WITH_LATEST
|
| 388 |
+
and isinstance(v, dict)
|
| 389 |
+
and handler_util.metric_is_wandb_dict(v)
|
| 390 |
+
):
|
| 391 |
+
if "_latest_artifact_path" in v and "artifact_path" in v:
|
| 392 |
+
# TODO: Make non-destructive?
|
| 393 |
+
v["artifact_path"] = v["_latest_artifact_path"]
|
| 394 |
+
updated = self._update_summary_leaf(kl=kl, v=v, d=d)
|
| 395 |
+
return updated
|
| 396 |
+
|
| 397 |
+
def _update_summary_media_objects(self, v: Dict[str, Any]) -> Dict[str, Any]:
|
| 398 |
+
# For now, non-recursive - just top level
|
| 399 |
+
for nk, nv in v.items():
|
| 400 |
+
if REPLACE_SUMMARY_ART_PATH_WITH_LATEST and (
|
| 401 |
+
isinstance(nv, dict)
|
| 402 |
+
and handler_util.metric_is_wandb_dict(nv)
|
| 403 |
+
and "_latest_artifact_path" in nv
|
| 404 |
+
and "artifact_path" in nv
|
| 405 |
+
):
|
| 406 |
+
# TODO: Make non-destructive?
|
| 407 |
+
nv["artifact_path"] = nv["_latest_artifact_path"]
|
| 408 |
+
v[nk] = nv
|
| 409 |
+
return v
|
| 410 |
+
|
| 411 |
+
def _update_summary(self, history_dict: Dict[str, Any]) -> List[str]:
|
| 412 |
+
# keep old behavior fast path if no define metrics have been used
|
| 413 |
+
if not self._metric_defines:
|
| 414 |
+
history_dict = self._update_summary_media_objects(history_dict)
|
| 415 |
+
self._consolidated_summary.update(history_dict)
|
| 416 |
+
return list(history_dict.keys())
|
| 417 |
+
updated_keys = []
|
| 418 |
+
for k, v in history_dict.items():
|
| 419 |
+
if self._update_summary_list(kl=[k], v=v):
|
| 420 |
+
updated_keys.append(k)
|
| 421 |
+
return updated_keys
|
| 422 |
+
|
| 423 |
+
def _history_assign_step(
|
| 424 |
+
self,
|
| 425 |
+
history: HistoryRecord,
|
| 426 |
+
history_dict: Dict[str, Any],
|
| 427 |
+
) -> None:
|
| 428 |
+
has_step = history.HasField("step")
|
| 429 |
+
item = history.item.add()
|
| 430 |
+
item.key = "_step"
|
| 431 |
+
if has_step:
|
| 432 |
+
step = history.step.num
|
| 433 |
+
history_dict["_step"] = step
|
| 434 |
+
item.value_json = json.dumps(step)
|
| 435 |
+
self._step = step + 1
|
| 436 |
+
else:
|
| 437 |
+
history_dict["_step"] = self._step
|
| 438 |
+
item.value_json = json.dumps(self._step)
|
| 439 |
+
self._step += 1
|
| 440 |
+
|
| 441 |
+
def _history_define_metric(self, hkey: str) -> Optional[MetricRecord]:
|
| 442 |
+
"""Check for hkey match in glob metrics and return the defined metric."""
|
| 443 |
+
# Dont define metric for internal metrics
|
| 444 |
+
if hkey.startswith("_"):
|
| 445 |
+
return None
|
| 446 |
+
for k, mglob in self._metric_globs.items():
|
| 447 |
+
if k.endswith("*"):
|
| 448 |
+
if hkey.startswith(k[:-1]):
|
| 449 |
+
m = MetricRecord()
|
| 450 |
+
m.CopyFrom(mglob)
|
| 451 |
+
m.ClearField("glob_name")
|
| 452 |
+
m.options.defined = False
|
| 453 |
+
m.name = hkey
|
| 454 |
+
return m
|
| 455 |
+
return None
|
| 456 |
+
|
| 457 |
+
def _history_update_leaf(
|
| 458 |
+
self,
|
| 459 |
+
kl: List[str],
|
| 460 |
+
v: Any,
|
| 461 |
+
history_dict: Dict[str, Any],
|
| 462 |
+
update_history: Dict[str, Any],
|
| 463 |
+
) -> None:
|
| 464 |
+
hkey = ".".join([k.replace(".", "\\.") for k in kl])
|
| 465 |
+
m = self._metric_defines.get(hkey)
|
| 466 |
+
if not m:
|
| 467 |
+
m = self._history_define_metric(hkey)
|
| 468 |
+
if not m:
|
| 469 |
+
return
|
| 470 |
+
mr = Record()
|
| 471 |
+
mr.metric.CopyFrom(m)
|
| 472 |
+
mr.control.local = True # Dont store this, just send it
|
| 473 |
+
self._handle_defined_metric(mr)
|
| 474 |
+
|
| 475 |
+
if m.options.step_sync and m.step_metric:
|
| 476 |
+
if m.step_metric not in history_dict:
|
| 477 |
+
copy_key = tuple([m.step_metric])
|
| 478 |
+
step = self._metric_copy.get(copy_key)
|
| 479 |
+
if step is not None:
|
| 480 |
+
update_history[m.step_metric] = step
|
| 481 |
+
|
| 482 |
+
def _history_update_list(
|
| 483 |
+
self,
|
| 484 |
+
kl: List[str],
|
| 485 |
+
v: Any,
|
| 486 |
+
history_dict: Dict[str, Any],
|
| 487 |
+
update_history: Dict[str, Any],
|
| 488 |
+
) -> None:
|
| 489 |
+
if isinstance(v, dict):
|
| 490 |
+
for nk, nv in v.items():
|
| 491 |
+
self._history_update_list(
|
| 492 |
+
kl=kl[:] + [nk],
|
| 493 |
+
v=nv,
|
| 494 |
+
history_dict=history_dict,
|
| 495 |
+
update_history=update_history,
|
| 496 |
+
)
|
| 497 |
+
return
|
| 498 |
+
self._history_update_leaf(
|
| 499 |
+
kl=kl, v=v, history_dict=history_dict, update_history=update_history
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
def _history_update(
|
| 503 |
+
self,
|
| 504 |
+
history: HistoryRecord,
|
| 505 |
+
history_dict: Dict[str, Any],
|
| 506 |
+
) -> None:
|
| 507 |
+
# if syncing an old run, we can skip this logic
|
| 508 |
+
if history_dict.get("_step") is None:
|
| 509 |
+
self._history_assign_step(history, history_dict)
|
| 510 |
+
|
| 511 |
+
update_history: Dict[str, Any] = {}
|
| 512 |
+
# Look for metric matches
|
| 513 |
+
if self._metric_defines or self._metric_globs:
|
| 514 |
+
for hkey, hval in history_dict.items():
|
| 515 |
+
self._history_update_list([hkey], hval, history_dict, update_history)
|
| 516 |
+
|
| 517 |
+
if update_history:
|
| 518 |
+
history_dict.update(update_history)
|
| 519 |
+
for k, v in update_history.items():
|
| 520 |
+
item = history.item.add()
|
| 521 |
+
item.key = k
|
| 522 |
+
item.value_json = json.dumps(v)
|
| 523 |
+
|
| 524 |
+
def handle_history(self, record: Record) -> None:
|
| 525 |
+
history_dict = proto_util.dict_from_proto_list(record.history.item)
|
| 526 |
+
|
| 527 |
+
# Inject _runtime if it is not present
|
| 528 |
+
if history_dict is not None:
|
| 529 |
+
if "_runtime" not in history_dict:
|
| 530 |
+
self._history_assign_runtime(record.history, history_dict)
|
| 531 |
+
|
| 532 |
+
self._history_update(record.history, history_dict)
|
| 533 |
+
self._dispatch_record(record)
|
| 534 |
+
self._save_history(record.history)
|
| 535 |
+
# update summary from history
|
| 536 |
+
updated_keys = self._update_summary(history_dict)
|
| 537 |
+
if updated_keys:
|
| 538 |
+
updated_items = {k: self._consolidated_summary[k] for k in updated_keys}
|
| 539 |
+
self._save_summary(updated_items)
|
| 540 |
+
|
| 541 |
+
def _flush_partial_history(
|
| 542 |
+
self,
|
| 543 |
+
step: Optional[int] = None,
|
| 544 |
+
) -> None:
|
| 545 |
+
if not self._partial_history:
|
| 546 |
+
return
|
| 547 |
+
|
| 548 |
+
history = HistoryRecord()
|
| 549 |
+
for k, v in self._partial_history.items():
|
| 550 |
+
item = history.item.add()
|
| 551 |
+
item.key = k
|
| 552 |
+
item.value_json = json.dumps(v)
|
| 553 |
+
if step is not None:
|
| 554 |
+
history.step.num = step
|
| 555 |
+
self.handle_history(Record(history=history))
|
| 556 |
+
self._partial_history = {}
|
| 557 |
+
|
| 558 |
+
def handle_request_sender_mark_report(self, record: Record) -> None:
|
| 559 |
+
self._dispatch_record(record, always_send=True)
|
| 560 |
+
|
| 561 |
+
def handle_request_status_report(self, record: Record) -> None:
|
| 562 |
+
self._dispatch_record(record, always_send=True)
|
| 563 |
+
|
| 564 |
+
def handle_request_partial_history(self, record: Record) -> None:
|
| 565 |
+
partial_history = record.request.partial_history
|
| 566 |
+
|
| 567 |
+
flush = None
|
| 568 |
+
if partial_history.HasField("action"):
|
| 569 |
+
flush = partial_history.action.flush
|
| 570 |
+
|
| 571 |
+
step = None
|
| 572 |
+
if partial_history.HasField("step"):
|
| 573 |
+
step = partial_history.step.num
|
| 574 |
+
|
| 575 |
+
history_dict = proto_util.dict_from_proto_list(partial_history.item)
|
| 576 |
+
if step is not None:
|
| 577 |
+
if step < self._step:
|
| 578 |
+
if not self._dropped_history:
|
| 579 |
+
message = (
|
| 580 |
+
"Step only supports monotonically increasing values, use define_metric to set a custom x "
|
| 581 |
+
f"axis. For details see: {wburls.wburls.get('wandb_define_metric')}"
|
| 582 |
+
)
|
| 583 |
+
self._internal_messages.warning.append(message)
|
| 584 |
+
self._dropped_history = True
|
| 585 |
+
message = (
|
| 586 |
+
f"(User provided step: {step} is less than current step: {self._step}. "
|
| 587 |
+
f"Dropping entry: {history_dict})."
|
| 588 |
+
)
|
| 589 |
+
self._internal_messages.warning.append(message)
|
| 590 |
+
return
|
| 591 |
+
elif step > self._step:
|
| 592 |
+
self._flush_partial_history()
|
| 593 |
+
self._step = step
|
| 594 |
+
elif flush is None:
|
| 595 |
+
flush = True
|
| 596 |
+
|
| 597 |
+
self._partial_history.update(history_dict)
|
| 598 |
+
|
| 599 |
+
if flush:
|
| 600 |
+
self._flush_partial_history(self._step)
|
| 601 |
+
|
| 602 |
+
def handle_summary(self, record: Record) -> None:
|
| 603 |
+
summary = record.summary
|
| 604 |
+
for item in summary.update:
|
| 605 |
+
if len(item.nested_key) > 0:
|
| 606 |
+
# we use either key or nested_key -- not both
|
| 607 |
+
assert item.key == ""
|
| 608 |
+
key = tuple(item.nested_key)
|
| 609 |
+
else:
|
| 610 |
+
# no counter-assertion here, because technically
|
| 611 |
+
# summary[""] is valid
|
| 612 |
+
key = (item.key,)
|
| 613 |
+
|
| 614 |
+
target = self._consolidated_summary
|
| 615 |
+
|
| 616 |
+
# recurse down the dictionary structure:
|
| 617 |
+
for prop in key[:-1]:
|
| 618 |
+
target = target[prop]
|
| 619 |
+
|
| 620 |
+
# use the last element of the key to write the leaf:
|
| 621 |
+
target[key[-1]] = json.loads(item.value_json)
|
| 622 |
+
|
| 623 |
+
for item in summary.remove:
|
| 624 |
+
if len(item.nested_key) > 0:
|
| 625 |
+
# we use either key or nested_key -- not both
|
| 626 |
+
assert item.key == ""
|
| 627 |
+
key = tuple(item.nested_key)
|
| 628 |
+
else:
|
| 629 |
+
# no counter-assertion here, because technically
|
| 630 |
+
# summary[""] is valid
|
| 631 |
+
key = (item.key,)
|
| 632 |
+
|
| 633 |
+
target = self._consolidated_summary
|
| 634 |
+
|
| 635 |
+
# recurse down the dictionary structure:
|
| 636 |
+
for prop in key[:-1]:
|
| 637 |
+
target = target[prop]
|
| 638 |
+
|
| 639 |
+
# use the last element of the key to erase the leaf:
|
| 640 |
+
del target[key[-1]]
|
| 641 |
+
|
| 642 |
+
self._save_summary(self._consolidated_summary)
|
| 643 |
+
|
| 644 |
+
def handle_exit(self, record: Record) -> None:
|
| 645 |
+
if self._track_time is not None:
|
| 646 |
+
self._accumulate_time += time.time() - self._track_time
|
| 647 |
+
record.exit.runtime = int(self._accumulate_time)
|
| 648 |
+
self._dispatch_record(record, always_send=True)
|
| 649 |
+
|
| 650 |
+
def handle_final(self, record: Record) -> None:
|
| 651 |
+
self._dispatch_record(record, always_send=True)
|
| 652 |
+
|
| 653 |
+
def handle_preempting(self, record: Record) -> None:
|
| 654 |
+
self._dispatch_record(record)
|
| 655 |
+
|
| 656 |
+
def handle_header(self, record: Record) -> None:
|
| 657 |
+
self._dispatch_record(record)
|
| 658 |
+
|
| 659 |
+
def handle_footer(self, record: Record) -> None:
|
| 660 |
+
self._dispatch_record(record)
|
| 661 |
+
|
| 662 |
+
def handle_request_check_version(self, record: Record) -> None:
|
| 663 |
+
if self._settings._offline:
|
| 664 |
+
result = proto_util._result_from_record(record)
|
| 665 |
+
self._respond_result(result)
|
| 666 |
+
else:
|
| 667 |
+
self._dispatch_record(record)
|
| 668 |
+
|
| 669 |
+
def handle_request_attach(self, record: Record) -> None:
|
| 670 |
+
result = proto_util._result_from_record(record)
|
| 671 |
+
attach_id = record.request.attach.attach_id
|
| 672 |
+
assert attach_id
|
| 673 |
+
assert self._run_proto
|
| 674 |
+
result.response.attach_response.run.CopyFrom(self._run_proto)
|
| 675 |
+
self._respond_result(result)
|
| 676 |
+
|
| 677 |
+
def handle_request_log_artifact(self, record: Record) -> None:
|
| 678 |
+
self._dispatch_record(record)
|
| 679 |
+
|
| 680 |
+
def handle_telemetry(self, record: Record) -> None:
|
| 681 |
+
self._dispatch_record(record)
|
| 682 |
+
|
| 683 |
+
def handle_request_run_start(self, record: Record) -> None:
|
| 684 |
+
run_start = record.request.run_start
|
| 685 |
+
assert run_start
|
| 686 |
+
assert run_start.run
|
| 687 |
+
|
| 688 |
+
self._run_proto = run_start.run
|
| 689 |
+
|
| 690 |
+
self._run_start_time = run_start.run.start_time.ToMicroseconds() / 1e6
|
| 691 |
+
|
| 692 |
+
self._track_time = time.time()
|
| 693 |
+
if run_start.run.resumed and run_start.run.runtime:
|
| 694 |
+
self._accumulate_time = run_start.run.runtime
|
| 695 |
+
else:
|
| 696 |
+
self._accumulate_time = 0
|
| 697 |
+
|
| 698 |
+
# system monitor
|
| 699 |
+
self._system_monitor = SystemMonitor(
|
| 700 |
+
self._settings,
|
| 701 |
+
self._interface,
|
| 702 |
+
)
|
| 703 |
+
if not self._settings._disable_stats:
|
| 704 |
+
self._system_monitor.start()
|
| 705 |
+
if not self._settings._disable_meta and not run_start.run.resumed:
|
| 706 |
+
self._system_monitor.probe(publish=True)
|
| 707 |
+
|
| 708 |
+
self._tb_watcher = tb_watcher.TBWatcher(
|
| 709 |
+
self._settings, interface=self._interface, run_proto=run_start.run
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
if run_start.run.resumed or run_start.run.forked:
|
| 713 |
+
self._step = run_start.run.starting_step
|
| 714 |
+
result = proto_util._result_from_record(record)
|
| 715 |
+
self._respond_result(result)
|
| 716 |
+
|
| 717 |
+
def handle_request_resume(self, record: Record) -> None:
|
| 718 |
+
if self._system_monitor is not None:
|
| 719 |
+
logger.info("starting system metrics thread")
|
| 720 |
+
self._system_monitor.start()
|
| 721 |
+
|
| 722 |
+
if self._track_time is not None:
|
| 723 |
+
self._accumulate_time += time.time() - self._track_time
|
| 724 |
+
self._track_time = time.time()
|
| 725 |
+
|
| 726 |
+
def handle_request_pause(self, record: Record) -> None:
|
| 727 |
+
if self._system_monitor is not None:
|
| 728 |
+
logger.info("stopping system metrics thread")
|
| 729 |
+
self._system_monitor.finish()
|
| 730 |
+
if self._track_time is not None:
|
| 731 |
+
self._accumulate_time += time.time() - self._track_time
|
| 732 |
+
self._track_time = None
|
| 733 |
+
|
| 734 |
+
def handle_request_poll_exit(self, record: Record) -> None:
|
| 735 |
+
self._dispatch_record(record, always_send=True)
|
| 736 |
+
|
| 737 |
+
def handle_request_stop_status(self, record: Record) -> None:
|
| 738 |
+
self._dispatch_record(record)
|
| 739 |
+
|
| 740 |
+
def handle_request_network_status(self, record: Record) -> None:
|
| 741 |
+
self._dispatch_record(record)
|
| 742 |
+
|
| 743 |
+
def handle_request_internal_messages(self, record: Record) -> None:
|
| 744 |
+
result = proto_util._result_from_record(record)
|
| 745 |
+
result.response.internal_messages_response.messages.CopyFrom(
|
| 746 |
+
self._internal_messages
|
| 747 |
+
)
|
| 748 |
+
self._internal_messages.Clear()
|
| 749 |
+
self._respond_result(result)
|
| 750 |
+
|
| 751 |
+
def handle_request_status(self, record: Record) -> None:
|
| 752 |
+
result = proto_util._result_from_record(record)
|
| 753 |
+
self._respond_result(result)
|
| 754 |
+
|
| 755 |
+
def handle_request_get_summary(self, record: Record) -> None:
|
| 756 |
+
result = proto_util._result_from_record(record)
|
| 757 |
+
for key, value in self._consolidated_summary.items():
|
| 758 |
+
item = SummaryItem()
|
| 759 |
+
item.key = key
|
| 760 |
+
item.value_json = json.dumps(value)
|
| 761 |
+
result.response.get_summary_response.item.append(item)
|
| 762 |
+
self._respond_result(result)
|
| 763 |
+
|
| 764 |
+
def handle_request_get_system_metrics(self, record: Record) -> None:
|
| 765 |
+
result = proto_util._result_from_record(record)
|
| 766 |
+
if self._system_monitor is None:
|
| 767 |
+
return
|
| 768 |
+
|
| 769 |
+
buffer = self._system_monitor.buffer
|
| 770 |
+
for key, samples in buffer.items():
|
| 771 |
+
buff = []
|
| 772 |
+
for s in samples:
|
| 773 |
+
sms = SystemMetricSample()
|
| 774 |
+
sms.timestamp.FromMicroseconds(int(s[0] * 1e6))
|
| 775 |
+
sms.value = s[1]
|
| 776 |
+
buff.append(sms)
|
| 777 |
+
|
| 778 |
+
result.response.get_system_metrics_response.system_metrics[key].CopyFrom(
|
| 779 |
+
SystemMetricsBuffer(record=buff)
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
self._respond_result(result)
|
| 783 |
+
|
| 784 |
+
def handle_tbrecord(self, record: Record) -> None:
|
| 785 |
+
logger.info("handling tbrecord: %s", record)
|
| 786 |
+
if self._tb_watcher:
|
| 787 |
+
tbrecord = record.tbrecord
|
| 788 |
+
self._tb_watcher.add(tbrecord.log_dir, tbrecord.save, tbrecord.root_dir)
|
| 789 |
+
self._dispatch_record(record)
|
| 790 |
+
|
| 791 |
+
def _handle_defined_metric(self, record: Record) -> None:
|
| 792 |
+
metric = record.metric
|
| 793 |
+
if metric._control.overwrite:
|
| 794 |
+
self._metric_defines[metric.name].CopyFrom(metric)
|
| 795 |
+
else:
|
| 796 |
+
self._metric_defines[metric.name].MergeFrom(metric)
|
| 797 |
+
|
| 798 |
+
# before dispatching, make sure step_metric is defined, if not define it and
|
| 799 |
+
# dispatch it locally first
|
| 800 |
+
metric = self._metric_defines[metric.name]
|
| 801 |
+
if metric.step_metric and metric.step_metric not in self._metric_defines:
|
| 802 |
+
m = MetricRecord(name=metric.step_metric)
|
| 803 |
+
self._metric_defines[metric.step_metric] = m
|
| 804 |
+
mr = Record()
|
| 805 |
+
mr.metric.CopyFrom(m)
|
| 806 |
+
mr.control.local = True # Don't store this, just send it
|
| 807 |
+
self._dispatch_record(mr)
|
| 808 |
+
|
| 809 |
+
self._dispatch_record(record)
|
| 810 |
+
|
| 811 |
+
def _handle_glob_metric(self, record: Record) -> None:
|
| 812 |
+
metric = record.metric
|
| 813 |
+
if metric._control.overwrite:
|
| 814 |
+
self._metric_globs[metric.glob_name].CopyFrom(metric)
|
| 815 |
+
else:
|
| 816 |
+
self._metric_globs[metric.glob_name].MergeFrom(metric)
|
| 817 |
+
self._dispatch_record(record)
|
| 818 |
+
|
| 819 |
+
def handle_metric(self, record: Record) -> None:
|
| 820 |
+
"""Handle MetricRecord.
|
| 821 |
+
|
| 822 |
+
Walkthrough of the life of a MetricRecord:
|
| 823 |
+
|
| 824 |
+
Metric defined:
|
| 825 |
+
- run.define_metric() parses arguments create wandb_metric.Metric
|
| 826 |
+
- build MetricRecord publish to interface
|
| 827 |
+
- handler (this function) keeps list of metrics published:
|
| 828 |
+
- self._metric_defines: Fully defined metrics
|
| 829 |
+
- self._metric_globs: metrics that have a wildcard
|
| 830 |
+
- dispatch writer and sender thread
|
| 831 |
+
- writer: records are saved to persistent store
|
| 832 |
+
- sender: fully defined metrics get mapped into metadata for UI
|
| 833 |
+
|
| 834 |
+
History logged:
|
| 835 |
+
- handle_history
|
| 836 |
+
- check if metric matches _metric_defines
|
| 837 |
+
- if not, check if metric matches _metric_globs
|
| 838 |
+
- if _metric globs match, generate defined metric and call _handle_metric
|
| 839 |
+
|
| 840 |
+
Args:
|
| 841 |
+
record (Record): Metric record to process
|
| 842 |
+
"""
|
| 843 |
+
if record.metric.name:
|
| 844 |
+
self._handle_defined_metric(record)
|
| 845 |
+
elif record.metric.glob_name:
|
| 846 |
+
self._handle_glob_metric(record)
|
| 847 |
+
|
| 848 |
+
def handle_request_sampled_history(self, record: Record) -> None:
|
| 849 |
+
result = proto_util._result_from_record(record)
|
| 850 |
+
for key, sampled in self._sampled_history.items():
|
| 851 |
+
item = SampledHistoryItem()
|
| 852 |
+
item.key = key
|
| 853 |
+
values: Iterable[Any] = sampled.get()
|
| 854 |
+
if all(isinstance(i, numbers.Integral) for i in values):
|
| 855 |
+
try:
|
| 856 |
+
item.values_int.extend(values)
|
| 857 |
+
except ValueError:
|
| 858 |
+
# it is safe to ignore these as this is for display information
|
| 859 |
+
pass
|
| 860 |
+
elif all(isinstance(i, numbers.Real) for i in values):
|
| 861 |
+
item.values_float.extend(values)
|
| 862 |
+
result.response.sampled_history_response.item.append(item)
|
| 863 |
+
self._respond_result(result)
|
| 864 |
+
|
| 865 |
+
def handle_request_server_info(self, record: Record) -> None:
|
| 866 |
+
self._dispatch_record(record, always_send=True)
|
| 867 |
+
|
| 868 |
+
def handle_request_keepalive(self, record: Record) -> None:
|
| 869 |
+
"""Handle a keepalive request.
|
| 870 |
+
|
| 871 |
+
Keepalive is a noop, we just want to verify transport is alive.
|
| 872 |
+
"""
|
| 873 |
+
|
| 874 |
+
def handle_request_run_status(self, record: Record) -> None:
|
| 875 |
+
self._dispatch_record(record, always_send=True)
|
| 876 |
+
|
| 877 |
+
def handle_request_shutdown(self, record: Record) -> None:
|
| 878 |
+
# TODO(jhr): should we drain things and stop new requests from coming in?
|
| 879 |
+
result = proto_util._result_from_record(record)
|
| 880 |
+
self._respond_result(result)
|
| 881 |
+
self._stopped.set()
|
| 882 |
+
|
| 883 |
+
def finish(self) -> None:
|
| 884 |
+
logger.info("shutting down handler")
|
| 885 |
+
if self._system_monitor is not None:
|
| 886 |
+
self._system_monitor.finish()
|
| 887 |
+
if self._tb_watcher:
|
| 888 |
+
self._tb_watcher.finish()
|
| 889 |
+
# self._context_keeper._debug_print_orphans()
|
| 890 |
+
|
| 891 |
+
def __next__(self) -> Record:
|
| 892 |
+
return self._record_q.get(block=True)
|
| 893 |
+
|
| 894 |
+
next = __next__
|
| 895 |
+
|
| 896 |
+
def _history_assign_runtime(
|
| 897 |
+
self,
|
| 898 |
+
history: HistoryRecord,
|
| 899 |
+
history_dict: Dict[str, Any],
|
| 900 |
+
) -> None:
|
| 901 |
+
# _runtime calculation is meaningless if there is no _timestamp
|
| 902 |
+
if "_timestamp" not in history_dict:
|
| 903 |
+
return
|
| 904 |
+
# if it is offline sync, self._run_start_time is None
|
| 905 |
+
# in that case set it to the first tfevent timestamp
|
| 906 |
+
if self._run_start_time is None:
|
| 907 |
+
self._run_start_time = history_dict["_timestamp"]
|
| 908 |
+
history_dict["_runtime"] = history_dict["_timestamp"] - self._run_start_time
|
| 909 |
+
item = history.item.add()
|
| 910 |
+
item.key = "_runtime"
|
| 911 |
+
item.value_json = json.dumps(history_dict[item.key])
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/internal_api.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/job_builder.py
ADDED
|
@@ -0,0 +1,629 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""job builder."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
import sys
|
| 8 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import wandb
|
| 11 |
+
from wandb.sdk.artifacts.artifact import Artifact
|
| 12 |
+
from wandb.sdk.data_types._dtypes import TypeRegistry
|
| 13 |
+
from wandb.sdk.internal.internal_api import Api
|
| 14 |
+
from wandb.sdk.lib.filenames import DIFF_FNAME, METADATA_FNAME, REQUIREMENTS_FNAME
|
| 15 |
+
from wandb.util import make_artifact_name_safe
|
| 16 |
+
|
| 17 |
+
from .settings_static import SettingsStatic
|
| 18 |
+
|
| 19 |
+
if sys.version_info >= (3, 8):
|
| 20 |
+
from typing import Literal, TypedDict
|
| 21 |
+
else:
|
| 22 |
+
from typing_extensions import Literal, TypedDict
|
| 23 |
+
|
| 24 |
+
_logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from wandb.proto.wandb_internal_pb2 import ArtifactRecord
|
| 28 |
+
|
| 29 |
+
FROZEN_REQUIREMENTS_FNAME = "requirements.frozen.txt"
|
| 30 |
+
JOB_FNAME = "wandb-job.json"
|
| 31 |
+
JOB_ARTIFACT_TYPE = "job"
|
| 32 |
+
|
| 33 |
+
LOG_LEVEL = Literal["log", "warn", "error"]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Version:
|
| 37 |
+
def __init__(self, major: int, minor: int, patch: int):
|
| 38 |
+
self._major = major
|
| 39 |
+
self._minor = minor
|
| 40 |
+
self._patch = patch
|
| 41 |
+
|
| 42 |
+
def __repr__(self) -> str:
|
| 43 |
+
return f"{self._major}.{self._minor}.{self._patch}"
|
| 44 |
+
|
| 45 |
+
def __lt__(self, other: "Version") -> bool:
|
| 46 |
+
if self._major < other._major:
|
| 47 |
+
return True
|
| 48 |
+
elif self._major == other._major:
|
| 49 |
+
if self._minor < other._minor:
|
| 50 |
+
return True
|
| 51 |
+
elif self._minor == other._minor:
|
| 52 |
+
if self._patch < other._patch:
|
| 53 |
+
return True
|
| 54 |
+
return False
|
| 55 |
+
|
| 56 |
+
def __eq__(self, other: object) -> bool:
|
| 57 |
+
if not isinstance(other, Version):
|
| 58 |
+
return NotImplemented
|
| 59 |
+
return (
|
| 60 |
+
self._major == other._major
|
| 61 |
+
and self._minor == other._minor
|
| 62 |
+
and self._patch == other._patch
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Minimum supported wandb version for keys in the source dict of wandb-job.json
|
| 67 |
+
SOURCE_KEYS_MIN_SUPPORTED_VERSION = {
|
| 68 |
+
"dockerfile": Version(0, 17, 0),
|
| 69 |
+
"build_context": Version(0, 17, 0),
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class GitInfo(TypedDict):
|
| 74 |
+
remote: str
|
| 75 |
+
commit: str
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class GitSourceDict(TypedDict):
|
| 79 |
+
git: GitInfo
|
| 80 |
+
entrypoint: List[str]
|
| 81 |
+
notebook: bool
|
| 82 |
+
build_context: Optional[str]
|
| 83 |
+
dockerfile: Optional[str]
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class ArtifactSourceDict(TypedDict):
|
| 87 |
+
artifact: str
|
| 88 |
+
entrypoint: List[str]
|
| 89 |
+
notebook: bool
|
| 90 |
+
build_context: Optional[str]
|
| 91 |
+
dockerfile: Optional[str]
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class ImageSourceDict(TypedDict):
|
| 95 |
+
image: str
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class JobSourceDict(TypedDict, total=False):
|
| 99 |
+
_version: str
|
| 100 |
+
source_type: str
|
| 101 |
+
source: Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict]
|
| 102 |
+
input_types: Dict[str, Any]
|
| 103 |
+
output_types: Dict[str, Any]
|
| 104 |
+
runtime: Optional[str]
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class ArtifactInfoForJob(TypedDict):
|
| 108 |
+
id: str
|
| 109 |
+
name: str
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_min_supported_for_source_dict(
|
| 113 |
+
source: Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict],
|
| 114 |
+
) -> Optional[Version]:
|
| 115 |
+
"""Get the minimum supported wandb version the source dict of wandb-job.json."""
|
| 116 |
+
min_seen = None
|
| 117 |
+
for key in source:
|
| 118 |
+
new_ver = SOURCE_KEYS_MIN_SUPPORTED_VERSION.get(key)
|
| 119 |
+
if new_ver:
|
| 120 |
+
if min_seen is None or new_ver < min_seen:
|
| 121 |
+
min_seen = new_ver
|
| 122 |
+
return min_seen
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class JobArtifact(Artifact):
|
| 126 |
+
def __init__(self, name: str, *args: Any, **kwargs: Any):
|
| 127 |
+
super().__init__(name, "placeholder", *args, **kwargs)
|
| 128 |
+
self._type = JOB_ARTIFACT_TYPE # Get around type restriction.
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class JobBuilder:
|
| 132 |
+
_settings: SettingsStatic
|
| 133 |
+
_metadatafile_path: Optional[str]
|
| 134 |
+
_requirements_path: Optional[str]
|
| 135 |
+
_config: Optional[Dict[str, Any]]
|
| 136 |
+
_summary: Optional[Dict[str, Any]]
|
| 137 |
+
_logged_code_artifact: Optional[ArtifactInfoForJob]
|
| 138 |
+
_disable: bool
|
| 139 |
+
_partial_source_id: Optional[str] # Partial job source artifact id.
|
| 140 |
+
_aliases: List[str]
|
| 141 |
+
_job_seq_id: Optional[str]
|
| 142 |
+
_job_version_alias: Optional[str]
|
| 143 |
+
_is_notebook_run: bool
|
| 144 |
+
_verbose: bool
|
| 145 |
+
|
| 146 |
+
def __init__(self, settings: SettingsStatic, verbose: bool = False):
|
| 147 |
+
self._settings = settings
|
| 148 |
+
self._metadatafile_path = None
|
| 149 |
+
self._requirements_path = None
|
| 150 |
+
self._config = None
|
| 151 |
+
self._summary = None
|
| 152 |
+
self._logged_code_artifact = None
|
| 153 |
+
self._job_seq_id = None
|
| 154 |
+
self._job_version_alias = None
|
| 155 |
+
self._disable = settings.disable_job_creation
|
| 156 |
+
self._partial_source_id = None
|
| 157 |
+
self._aliases = []
|
| 158 |
+
self._source_type: Optional[Literal["repo", "artifact", "image"]] = (
|
| 159 |
+
settings.job_source # type: ignore[assignment]
|
| 160 |
+
)
|
| 161 |
+
self._is_notebook_run = self._get_is_notebook_run()
|
| 162 |
+
self._verbose = verbose
|
| 163 |
+
self._partial = False
|
| 164 |
+
|
| 165 |
+
def set_config(self, config: Dict[str, Any]) -> None:
|
| 166 |
+
self._config = config
|
| 167 |
+
|
| 168 |
+
def set_summary(self, summary: Dict[str, Any]) -> None:
|
| 169 |
+
self._summary = summary
|
| 170 |
+
|
| 171 |
+
@property
|
| 172 |
+
def disable(self) -> bool:
|
| 173 |
+
return self._disable
|
| 174 |
+
|
| 175 |
+
@disable.setter
|
| 176 |
+
def disable(self, val: bool) -> None:
|
| 177 |
+
self._disable = val
|
| 178 |
+
|
| 179 |
+
@property
|
| 180 |
+
def input_types(self) -> Dict[str, Any]:
|
| 181 |
+
return TypeRegistry.type_of(self._config).to_json()
|
| 182 |
+
|
| 183 |
+
@property
|
| 184 |
+
def output_types(self) -> Dict[str, Any]:
|
| 185 |
+
return TypeRegistry.type_of(self._summary).to_json()
|
| 186 |
+
|
| 187 |
+
def set_partial_source_id(self, source_id: str) -> None:
|
| 188 |
+
self._partial_source_id = source_id
|
| 189 |
+
|
| 190 |
+
def _handle_server_artifact(
|
| 191 |
+
self, res: Optional[Dict], artifact: "ArtifactRecord"
|
| 192 |
+
) -> None:
|
| 193 |
+
if artifact.type == "job" and res is not None:
|
| 194 |
+
try:
|
| 195 |
+
if res["artifactSequence"]["latestArtifact"] is None:
|
| 196 |
+
self._job_version_alias = "v0"
|
| 197 |
+
elif res["artifactSequence"]["latestArtifact"]["id"] == res["id"]:
|
| 198 |
+
self._job_version_alias = (
|
| 199 |
+
f"v{res['artifactSequence']['latestArtifact']['versionIndex']}"
|
| 200 |
+
)
|
| 201 |
+
else:
|
| 202 |
+
self._job_version_alias = f"v{res['artifactSequence']['latestArtifact']['versionIndex'] + 1}"
|
| 203 |
+
self._job_seq_id = res["artifactSequence"]["id"]
|
| 204 |
+
except KeyError as e:
|
| 205 |
+
_logger.info(f"Malformed response from ArtifactSaver.save {e}")
|
| 206 |
+
if artifact.type == "code" and res is not None:
|
| 207 |
+
self._logged_code_artifact = ArtifactInfoForJob(
|
| 208 |
+
{
|
| 209 |
+
"id": res["id"],
|
| 210 |
+
"name": artifact.name,
|
| 211 |
+
}
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
def _build_repo_job_source(
|
| 215 |
+
self,
|
| 216 |
+
program_relpath: str,
|
| 217 |
+
metadata: Dict[str, Any],
|
| 218 |
+
) -> Tuple[Optional[GitSourceDict], Optional[str]]:
|
| 219 |
+
git_info: Dict[str, str] = metadata.get("git", {})
|
| 220 |
+
remote = git_info.get("remote")
|
| 221 |
+
commit = git_info.get("commit")
|
| 222 |
+
root = metadata.get("root")
|
| 223 |
+
assert remote is not None
|
| 224 |
+
assert commit is not None
|
| 225 |
+
if self._is_notebook_run:
|
| 226 |
+
if not os.path.exists(
|
| 227 |
+
os.path.join(os.getcwd(), os.path.basename(program_relpath))
|
| 228 |
+
):
|
| 229 |
+
return None, None
|
| 230 |
+
|
| 231 |
+
if root is None or self._settings._jupyter_root is None:
|
| 232 |
+
_logger.info("target path does not exist, exiting")
|
| 233 |
+
return None, None
|
| 234 |
+
assert self._settings._jupyter_root is not None
|
| 235 |
+
# git notebooks set the root to the git root,
|
| 236 |
+
# jupyter_root contains the path where the jupyter notebook was started
|
| 237 |
+
# program_relpath contains the path from jupyter_root to the file
|
| 238 |
+
# full program path here is actually the relpath from the program to the git root
|
| 239 |
+
full_program_path = os.path.join(
|
| 240 |
+
os.path.relpath(str(self._settings._jupyter_root), root),
|
| 241 |
+
program_relpath,
|
| 242 |
+
)
|
| 243 |
+
full_program_path = os.path.normpath(full_program_path)
|
| 244 |
+
# if the notebook server is started above the git repo need to clear all the ..s
|
| 245 |
+
if full_program_path.startswith(".."):
|
| 246 |
+
split_path = full_program_path.split("/")
|
| 247 |
+
count_dots = 0
|
| 248 |
+
for p in split_path:
|
| 249 |
+
if p == "..":
|
| 250 |
+
count_dots += 1
|
| 251 |
+
full_program_path = "/".join(split_path[2 * count_dots :])
|
| 252 |
+
else:
|
| 253 |
+
full_program_path = program_relpath
|
| 254 |
+
|
| 255 |
+
entrypoint = self._get_entrypoint(full_program_path, metadata)
|
| 256 |
+
# TODO: update executable to a method that supports pex
|
| 257 |
+
source: GitSourceDict = {
|
| 258 |
+
"git": {"remote": remote, "commit": commit},
|
| 259 |
+
"entrypoint": entrypoint,
|
| 260 |
+
"notebook": self._is_notebook_run,
|
| 261 |
+
"build_context": metadata.get("build_context"),
|
| 262 |
+
"dockerfile": metadata.get("dockerfile"),
|
| 263 |
+
}
|
| 264 |
+
name = self._make_job_name(f"{remote}_{program_relpath}")
|
| 265 |
+
|
| 266 |
+
return source, name
|
| 267 |
+
|
| 268 |
+
def _log_if_verbose(self, message: str, level: LOG_LEVEL) -> None:
|
| 269 |
+
log_func: Optional[Union[Callable[[Any], None], Callable[[Any], None]]] = None
|
| 270 |
+
if level == "log":
|
| 271 |
+
_logger.info(message)
|
| 272 |
+
log_func = wandb.termlog
|
| 273 |
+
elif level == "warn":
|
| 274 |
+
_logger.warning(message)
|
| 275 |
+
log_func = wandb.termwarn
|
| 276 |
+
elif level == "error":
|
| 277 |
+
_logger.error(message)
|
| 278 |
+
log_func = wandb.termerror
|
| 279 |
+
|
| 280 |
+
if self._verbose and log_func is not None:
|
| 281 |
+
log_func(message)
|
| 282 |
+
|
| 283 |
+
def _build_artifact_job_source(
|
| 284 |
+
self,
|
| 285 |
+
program_relpath: str,
|
| 286 |
+
metadata: Dict[str, Any],
|
| 287 |
+
) -> Tuple[Optional[ArtifactSourceDict], Optional[str]]:
|
| 288 |
+
assert isinstance(self._logged_code_artifact, dict)
|
| 289 |
+
# TODO: should we just always exit early if the path doesn't exist?
|
| 290 |
+
if self._is_notebook_run and not self._is_colab_run():
|
| 291 |
+
full_program_relpath = os.path.relpath(program_relpath, os.getcwd())
|
| 292 |
+
# if the resolved path doesn't exist, then we shouldn't make a job because it will fail
|
| 293 |
+
if not os.path.exists(full_program_relpath):
|
| 294 |
+
# when users call log code in a notebook the code artifact starts
|
| 295 |
+
# at the directory the notebook is in instead of the jupyter core
|
| 296 |
+
if not os.path.exists(os.path.basename(program_relpath)):
|
| 297 |
+
_logger.info("target path does not exist, exiting")
|
| 298 |
+
self._log_if_verbose(
|
| 299 |
+
"No program path found when generating artifact job source for a non-colab notebook run. See https://docs.wandb.ai/guides/launch/create-job",
|
| 300 |
+
"warn",
|
| 301 |
+
)
|
| 302 |
+
return None, None
|
| 303 |
+
full_program_relpath = os.path.basename(program_relpath)
|
| 304 |
+
else:
|
| 305 |
+
full_program_relpath = program_relpath
|
| 306 |
+
|
| 307 |
+
entrypoint = self._get_entrypoint(full_program_relpath, metadata)
|
| 308 |
+
# TODO: update executable to a method that supports pex
|
| 309 |
+
source: ArtifactSourceDict = {
|
| 310 |
+
"entrypoint": entrypoint,
|
| 311 |
+
"notebook": self._is_notebook_run,
|
| 312 |
+
"artifact": f"wandb-artifact://_id/{self._logged_code_artifact['id']}",
|
| 313 |
+
"build_context": metadata.get("build_context"),
|
| 314 |
+
"dockerfile": metadata.get("dockerfile"),
|
| 315 |
+
}
|
| 316 |
+
name = self._make_job_name(self._logged_code_artifact["name"])
|
| 317 |
+
|
| 318 |
+
return source, name
|
| 319 |
+
|
| 320 |
+
def _build_image_job_source(
|
| 321 |
+
self, metadata: Dict[str, Any]
|
| 322 |
+
) -> Tuple[ImageSourceDict, str]:
|
| 323 |
+
image_name = metadata.get("docker")
|
| 324 |
+
assert isinstance(image_name, str)
|
| 325 |
+
|
| 326 |
+
raw_image_name = image_name
|
| 327 |
+
if ":" in image_name:
|
| 328 |
+
tag = image_name.split(":")[-1]
|
| 329 |
+
|
| 330 |
+
# if tag looks properly formatted, assume its a tag
|
| 331 |
+
# regex: alphanumeric and "_" "-" "."
|
| 332 |
+
if re.fullmatch(r"([a-zA-Z0-9_\-\.]+)", tag):
|
| 333 |
+
raw_image_name = raw_image_name.replace(f":{tag}", "")
|
| 334 |
+
self._aliases += [tag]
|
| 335 |
+
|
| 336 |
+
source: ImageSourceDict = {
|
| 337 |
+
"image": image_name,
|
| 338 |
+
}
|
| 339 |
+
name = self._make_job_name(raw_image_name)
|
| 340 |
+
|
| 341 |
+
return source, name
|
| 342 |
+
|
| 343 |
+
def _make_job_name(self, input_str: str) -> str:
|
| 344 |
+
"""Use job name from settings if provided, else use programmatic name."""
|
| 345 |
+
if self._settings.job_name:
|
| 346 |
+
return self._settings.job_name
|
| 347 |
+
|
| 348 |
+
return make_artifact_name_safe(f"job-{input_str}")
|
| 349 |
+
|
| 350 |
+
def _get_entrypoint(
|
| 351 |
+
self,
|
| 352 |
+
program_relpath: str,
|
| 353 |
+
metadata: Dict[str, Any],
|
| 354 |
+
) -> List[str]:
|
| 355 |
+
# if building a partial job from CLI, overwrite entrypoint and notebook
|
| 356 |
+
# should already be in metadata from create_job
|
| 357 |
+
if self._partial:
|
| 358 |
+
if metadata.get("entrypoint"):
|
| 359 |
+
entrypoint: List[str] = metadata["entrypoint"]
|
| 360 |
+
return entrypoint
|
| 361 |
+
# job is being built from a run
|
| 362 |
+
entrypoint = [os.path.basename(sys.executable), program_relpath]
|
| 363 |
+
|
| 364 |
+
return entrypoint
|
| 365 |
+
|
| 366 |
+
def _get_is_notebook_run(self) -> bool:
|
| 367 |
+
return hasattr(self._settings, "_jupyter") and bool(self._settings._jupyter)
|
| 368 |
+
|
| 369 |
+
def _is_colab_run(self) -> bool:
|
| 370 |
+
return hasattr(self._settings, "_colab") and bool(self._settings._colab)
|
| 371 |
+
|
| 372 |
+
def _build_job_source(
|
| 373 |
+
self,
|
| 374 |
+
source_type: str,
|
| 375 |
+
program_relpath: Optional[str],
|
| 376 |
+
metadata: Dict[str, Any],
|
| 377 |
+
) -> Tuple[
|
| 378 |
+
Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict, None],
|
| 379 |
+
Optional[str],
|
| 380 |
+
]:
|
| 381 |
+
"""Construct a job source dict and name from the current run.
|
| 382 |
+
|
| 383 |
+
Arguments:
|
| 384 |
+
source_type (str): The type of source to build the job from. One of
|
| 385 |
+
"repo", "artifact", or "image".
|
| 386 |
+
"""
|
| 387 |
+
source: Union[
|
| 388 |
+
GitSourceDict,
|
| 389 |
+
ArtifactSourceDict,
|
| 390 |
+
ImageSourceDict,
|
| 391 |
+
None,
|
| 392 |
+
] = None
|
| 393 |
+
|
| 394 |
+
if source_type == "repo":
|
| 395 |
+
source, name = self._build_repo_job_source(
|
| 396 |
+
program_relpath or "",
|
| 397 |
+
metadata,
|
| 398 |
+
)
|
| 399 |
+
elif source_type == "artifact":
|
| 400 |
+
source, name = self._build_artifact_job_source(
|
| 401 |
+
program_relpath or "",
|
| 402 |
+
metadata,
|
| 403 |
+
)
|
| 404 |
+
elif source_type == "image" and self._has_image_job_ingredients(metadata):
|
| 405 |
+
source, name = self._build_image_job_source(metadata)
|
| 406 |
+
else:
|
| 407 |
+
source = None
|
| 408 |
+
|
| 409 |
+
if source is None:
|
| 410 |
+
if source_type:
|
| 411 |
+
self._log_if_verbose(
|
| 412 |
+
f"Source type is set to '{source_type}' but some required information is missing "
|
| 413 |
+
"from the environment. A job will not be created from this run. See "
|
| 414 |
+
"https://docs.wandb.ai/guides/launch/create-job",
|
| 415 |
+
"warn",
|
| 416 |
+
)
|
| 417 |
+
return None, None
|
| 418 |
+
|
| 419 |
+
return source, name
|
| 420 |
+
|
| 421 |
+
def build(
|
| 422 |
+
self,
|
| 423 |
+
api: Api,
|
| 424 |
+
build_context: Optional[str] = None,
|
| 425 |
+
dockerfile: Optional[str] = None,
|
| 426 |
+
base_image: Optional[str] = None,
|
| 427 |
+
) -> Optional[Artifact]:
|
| 428 |
+
"""Build a job artifact from the current run.
|
| 429 |
+
|
| 430 |
+
Arguments:
|
| 431 |
+
api (Api): The API object to use to create the job artifact.
|
| 432 |
+
build_context (Optional[str]): Path within the job source code to
|
| 433 |
+
the image build context. Saved as part of the job for future
|
| 434 |
+
builds.
|
| 435 |
+
dockerfile (Optional[str]): Path within the build context the
|
| 436 |
+
Dockerfile. Saved as part of the job for future builds.
|
| 437 |
+
base_image (Optional[str]): The base image used to run the job code.
|
| 438 |
+
|
| 439 |
+
Returns:
|
| 440 |
+
Optional[Artifact]: The job artifact if it was successfully built,
|
| 441 |
+
otherwise None.
|
| 442 |
+
"""
|
| 443 |
+
_logger.info("Attempting to build job artifact")
|
| 444 |
+
|
| 445 |
+
# If a partial job was used, write the input/output types to the metadata
|
| 446 |
+
# rather than building a new job version.
|
| 447 |
+
if self._partial_source_id is not None:
|
| 448 |
+
new_metadata = {
|
| 449 |
+
"input_types": {"@wandb.config": self.input_types},
|
| 450 |
+
"output_types": self.output_types,
|
| 451 |
+
}
|
| 452 |
+
api.update_artifact_metadata(
|
| 453 |
+
self._partial_source_id,
|
| 454 |
+
new_metadata,
|
| 455 |
+
)
|
| 456 |
+
return None
|
| 457 |
+
|
| 458 |
+
if not os.path.exists(
|
| 459 |
+
os.path.join(self._settings.files_dir, REQUIREMENTS_FNAME)
|
| 460 |
+
):
|
| 461 |
+
self._log_if_verbose(
|
| 462 |
+
"No requirements.txt found, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job",
|
| 463 |
+
"warn",
|
| 464 |
+
)
|
| 465 |
+
return None
|
| 466 |
+
metadata = self._handle_metadata_file()
|
| 467 |
+
if metadata is None:
|
| 468 |
+
self._log_if_verbose(
|
| 469 |
+
f"Ensure read and write access to run files dir: {self._settings.files_dir}, control this via the WANDB_DIR env var. See https://docs.wandb.ai/guides/track/environment-variables",
|
| 470 |
+
"warn",
|
| 471 |
+
)
|
| 472 |
+
return None
|
| 473 |
+
|
| 474 |
+
runtime: Optional[str] = metadata.get("python")
|
| 475 |
+
# can't build a job without a python version
|
| 476 |
+
if runtime is None:
|
| 477 |
+
self._log_if_verbose(
|
| 478 |
+
"No python version found in metadata, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job",
|
| 479 |
+
"warn",
|
| 480 |
+
)
|
| 481 |
+
return None
|
| 482 |
+
|
| 483 |
+
input_types = TypeRegistry.type_of(self._config).to_json()
|
| 484 |
+
output_types = TypeRegistry.type_of(self._summary).to_json()
|
| 485 |
+
|
| 486 |
+
name: Optional[str] = None
|
| 487 |
+
source_info: Optional[JobSourceDict] = None
|
| 488 |
+
|
| 489 |
+
# configure job from environment
|
| 490 |
+
source_type = self._get_source_type(metadata)
|
| 491 |
+
if not source_type:
|
| 492 |
+
# if source_type is None, then we don't have enough information to build a job
|
| 493 |
+
# if the user intended to create a job, warn.
|
| 494 |
+
if (
|
| 495 |
+
self._settings.job_name
|
| 496 |
+
or self._settings.job_source
|
| 497 |
+
or self._source_type
|
| 498 |
+
):
|
| 499 |
+
self._log_if_verbose(
|
| 500 |
+
"No source type found, not creating job artifact", "warn"
|
| 501 |
+
)
|
| 502 |
+
return None
|
| 503 |
+
|
| 504 |
+
program_relpath = self._get_program_relpath(source_type, metadata)
|
| 505 |
+
if not self._partial and source_type != "image" and not program_relpath:
|
| 506 |
+
self._log_if_verbose(
|
| 507 |
+
"No program path found, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job",
|
| 508 |
+
"warn",
|
| 509 |
+
)
|
| 510 |
+
return None
|
| 511 |
+
|
| 512 |
+
source, name = self._build_job_source(
|
| 513 |
+
source_type,
|
| 514 |
+
program_relpath,
|
| 515 |
+
metadata,
|
| 516 |
+
)
|
| 517 |
+
if source is None:
|
| 518 |
+
return None
|
| 519 |
+
|
| 520 |
+
if build_context:
|
| 521 |
+
source["build_context"] = build_context # type: ignore[typeddict-item]
|
| 522 |
+
if dockerfile:
|
| 523 |
+
source["dockerfile"] = dockerfile # type: ignore[typeddict-item]
|
| 524 |
+
if base_image:
|
| 525 |
+
source["base_image"] = base_image # type: ignore[typeddict-item]
|
| 526 |
+
|
| 527 |
+
# Pop any keys that are initialized to None. The current TypedDict
|
| 528 |
+
# system for source dicts requires all keys to be present, but we
|
| 529 |
+
# don't want to include keys that are None in the final dict.
|
| 530 |
+
for key in list(source.keys()):
|
| 531 |
+
if source[key] is None: # type: ignore[literal-required]
|
| 532 |
+
source.pop(key) # type: ignore[literal-require,misc]
|
| 533 |
+
|
| 534 |
+
source_info = {
|
| 535 |
+
"_version": str(get_min_supported_for_source_dict(source) or "v0"),
|
| 536 |
+
"source_type": source_type,
|
| 537 |
+
"source": source,
|
| 538 |
+
"input_types": input_types,
|
| 539 |
+
"output_types": output_types,
|
| 540 |
+
"runtime": runtime,
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
assert source_info is not None
|
| 544 |
+
assert name is not None
|
| 545 |
+
|
| 546 |
+
artifact = JobArtifact(name)
|
| 547 |
+
|
| 548 |
+
_logger.info("adding wandb-job metadata file")
|
| 549 |
+
with artifact.new_file("wandb-job.json") as f:
|
| 550 |
+
f.write(json.dumps(source_info, indent=4))
|
| 551 |
+
|
| 552 |
+
artifact.add_file(
|
| 553 |
+
os.path.join(self._settings.files_dir, REQUIREMENTS_FNAME),
|
| 554 |
+
name=FROZEN_REQUIREMENTS_FNAME,
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
if source_type == "repo":
|
| 558 |
+
# add diff
|
| 559 |
+
if os.path.exists(os.path.join(self._settings.files_dir, DIFF_FNAME)):
|
| 560 |
+
artifact.add_file(
|
| 561 |
+
os.path.join(self._settings.files_dir, DIFF_FNAME),
|
| 562 |
+
name=DIFF_FNAME,
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
return artifact
|
| 566 |
+
|
| 567 |
+
def _get_source_type(self, metadata: Dict[str, Any]) -> Optional[str]:
|
| 568 |
+
if self._source_type:
|
| 569 |
+
return self._source_type
|
| 570 |
+
|
| 571 |
+
if self._has_git_job_ingredients(metadata):
|
| 572 |
+
_logger.info("is repo sourced job")
|
| 573 |
+
return "repo"
|
| 574 |
+
|
| 575 |
+
if self._has_artifact_job_ingredients():
|
| 576 |
+
_logger.info("is artifact sourced job")
|
| 577 |
+
return "artifact"
|
| 578 |
+
|
| 579 |
+
if self._has_image_job_ingredients(metadata):
|
| 580 |
+
_logger.info("is image sourced job")
|
| 581 |
+
return "image"
|
| 582 |
+
|
| 583 |
+
_logger.info("no source found")
|
| 584 |
+
return None
|
| 585 |
+
|
| 586 |
+
def _get_program_relpath(
|
| 587 |
+
self, source_type: str, metadata: Dict[str, Any]
|
| 588 |
+
) -> Optional[str]:
|
| 589 |
+
if self._is_notebook_run:
|
| 590 |
+
_logger.info("run is notebook based run")
|
| 591 |
+
program = metadata.get("program")
|
| 592 |
+
|
| 593 |
+
if not program:
|
| 594 |
+
self._log_if_verbose(
|
| 595 |
+
"Notebook 'program' path not found in metadata. See https://docs.wandb.ai/guides/launch/create-job",
|
| 596 |
+
"warn",
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
return program
|
| 600 |
+
|
| 601 |
+
if source_type == "artifact" or self._settings.job_source == "artifact":
|
| 602 |
+
# if the job is set to be an artifact, use relpath guaranteed
|
| 603 |
+
# to be correct. 'codePath' uses the root path when in git repo
|
| 604 |
+
# fallback to codePath if strictly local relpath not present
|
| 605 |
+
return metadata.get("codePathLocal") or metadata.get("codePath")
|
| 606 |
+
|
| 607 |
+
return metadata.get("codePath")
|
| 608 |
+
|
| 609 |
+
def _handle_metadata_file(
|
| 610 |
+
self,
|
| 611 |
+
) -> Optional[Dict]:
|
| 612 |
+
if os.path.exists(os.path.join(self._settings.files_dir, METADATA_FNAME)):
|
| 613 |
+
with open(os.path.join(self._settings.files_dir, METADATA_FNAME)) as f:
|
| 614 |
+
metadata: Dict = json.load(f)
|
| 615 |
+
return metadata
|
| 616 |
+
|
| 617 |
+
return None
|
| 618 |
+
|
| 619 |
+
def _has_git_job_ingredients(self, metadata: Dict[str, Any]) -> bool:
|
| 620 |
+
git_info: Dict[str, str] = metadata.get("git", {})
|
| 621 |
+
if self._is_notebook_run and metadata.get("root") is None:
|
| 622 |
+
return False
|
| 623 |
+
return git_info.get("remote") is not None and git_info.get("commit") is not None
|
| 624 |
+
|
| 625 |
+
def _has_artifact_job_ingredients(self) -> bool:
|
| 626 |
+
return self._logged_code_artifact is not None
|
| 627 |
+
|
| 628 |
+
def _has_image_job_ingredients(self, metadata: Dict[str, Any]) -> bool:
|
| 629 |
+
return metadata.get("docker") is not None
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/progress.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""progress."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
from typing import IO, TYPE_CHECKING, Optional
|
| 6 |
+
|
| 7 |
+
from wandb.errors import CommError
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
if sys.version_info >= (3, 8):
|
| 11 |
+
from typing import Protocol
|
| 12 |
+
else:
|
| 13 |
+
from typing_extensions import Protocol
|
| 14 |
+
|
| 15 |
+
class ProgressFn(Protocol):
|
| 16 |
+
def __call__(self, new_bytes: int, total_bytes: int) -> None:
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Progress:
|
| 21 |
+
"""A helper class for displaying progress."""
|
| 22 |
+
|
| 23 |
+
ITER_BYTES = 1024 * 1024
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self, file: IO[bytes], callback: Optional["ProgressFn"] = None
|
| 27 |
+
) -> None:
|
| 28 |
+
self.file = file
|
| 29 |
+
if callback is None:
|
| 30 |
+
|
| 31 |
+
def callback_(new_bytes: int, total_bytes: int) -> None:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
callback = callback_
|
| 35 |
+
|
| 36 |
+
self.callback: ProgressFn = callback
|
| 37 |
+
self.bytes_read = 0
|
| 38 |
+
self.len = os.fstat(file.fileno()).st_size
|
| 39 |
+
|
| 40 |
+
def read(self, size=-1):
|
| 41 |
+
"""Read bytes and call the callback."""
|
| 42 |
+
bites = self.file.read(size)
|
| 43 |
+
self.bytes_read += len(bites)
|
| 44 |
+
if not bites and self.bytes_read < self.len:
|
| 45 |
+
# Files shrinking during uploads causes request timeouts. Maybe
|
| 46 |
+
# we could avoid those by updating the self.len in real-time, but
|
| 47 |
+
# files getting truncated while uploading seems like something
|
| 48 |
+
# that shouldn't really be happening anyway.
|
| 49 |
+
raise CommError(
|
| 50 |
+
"File {} size shrank from {} to {} while it was being uploaded.".format(
|
| 51 |
+
self.file.name, self.len, self.bytes_read
|
| 52 |
+
)
|
| 53 |
+
)
|
| 54 |
+
# Growing files are also likely to be bad, but our code didn't break
|
| 55 |
+
# on those in the past, so it's riskier to make that an error now.
|
| 56 |
+
self.callback(len(bites), self.bytes_read)
|
| 57 |
+
return bites
|
| 58 |
+
|
| 59 |
+
def rewind(self) -> None:
|
| 60 |
+
self.callback(-self.bytes_read, 0)
|
| 61 |
+
self.bytes_read = 0
|
| 62 |
+
self.file.seek(0)
|
| 63 |
+
|
| 64 |
+
def __getattr__(self, name):
|
| 65 |
+
"""Fallback to the file object for attrs not defined here."""
|
| 66 |
+
if hasattr(self.file, name):
|
| 67 |
+
return getattr(self.file, name)
|
| 68 |
+
else:
|
| 69 |
+
raise AttributeError
|
| 70 |
+
|
| 71 |
+
def __iter__(self):
|
| 72 |
+
return self
|
| 73 |
+
|
| 74 |
+
def __next__(self):
|
| 75 |
+
bites = self.read(self.ITER_BYTES)
|
| 76 |
+
if len(bites) == 0:
|
| 77 |
+
raise StopIteration
|
| 78 |
+
return bites
|
| 79 |
+
|
| 80 |
+
def __len__(self):
|
| 81 |
+
return self.len
|
| 82 |
+
|
| 83 |
+
next = __next__
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/thread_local_settings.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
from typing import Dict, Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
# Context variable for setting API settings (api keys, etc.) for internal and public apis thread-locally
|
| 6 |
+
# TODO: move this into actual settings
|
| 7 |
+
class _ThreadLocalApiSettings(threading.local):
|
| 8 |
+
api_key: Optional[str]
|
| 9 |
+
cookies: Optional[Dict]
|
| 10 |
+
headers: Optional[Dict]
|
| 11 |
+
|
| 12 |
+
def __init__(self) -> None:
|
| 13 |
+
self.api_key = None
|
| 14 |
+
self.cookies = None
|
| 15 |
+
self.headers = None
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
_thread_local_api_settings: _ThreadLocalApiSettings = _ThreadLocalApiSettings()
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/update.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import requests
|
| 4 |
+
|
| 5 |
+
import wandb
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _find_available(
|
| 9 |
+
current_version: str,
|
| 10 |
+
) -> Optional[Tuple[str, bool, bool, bool, Optional[str]]]:
|
| 11 |
+
from wandb.util import parse_version
|
| 12 |
+
|
| 13 |
+
pypi_url = "https://pypi.org/pypi/wandb/json"
|
| 14 |
+
|
| 15 |
+
yanked_dict = {}
|
| 16 |
+
try:
|
| 17 |
+
# raise Exception("test")
|
| 18 |
+
async_requests_get = wandb.util.async_call(requests.get, timeout=5)
|
| 19 |
+
data, thread = async_requests_get(pypi_url, timeout=3)
|
| 20 |
+
if not data or isinstance(data, Exception):
|
| 21 |
+
return None
|
| 22 |
+
data = data.json()
|
| 23 |
+
latest_version = data["info"]["version"]
|
| 24 |
+
release_list = data["releases"].keys()
|
| 25 |
+
for version, fields in data["releases"].items():
|
| 26 |
+
for item in fields:
|
| 27 |
+
yanked = item.get("yanked")
|
| 28 |
+
yanked_reason = item.get("yanked_reason")
|
| 29 |
+
if yanked:
|
| 30 |
+
yanked_dict[version] = yanked_reason
|
| 31 |
+
except Exception:
|
| 32 |
+
# Any issues whatsoever, just skip the latest version check.
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
# Return if no update is available
|
| 36 |
+
pip_prerelease = False
|
| 37 |
+
deleted = False
|
| 38 |
+
yanked = False
|
| 39 |
+
yanked_reason = None
|
| 40 |
+
parsed_current_version = parse_version(current_version)
|
| 41 |
+
|
| 42 |
+
# Check if current version has been yanked or deleted
|
| 43 |
+
# NOTE: we will not return yanked or deleted if there is nothing to upgrade to
|
| 44 |
+
if current_version in release_list:
|
| 45 |
+
yanked = current_version in yanked_dict
|
| 46 |
+
yanked_reason = yanked_dict.get(current_version)
|
| 47 |
+
else:
|
| 48 |
+
deleted = True
|
| 49 |
+
|
| 50 |
+
# Check pre-releases
|
| 51 |
+
if parse_version(latest_version) <= parsed_current_version:
|
| 52 |
+
# pre-releases are not included in latest_version
|
| 53 |
+
# so if we are currently running a pre-release we check more
|
| 54 |
+
if not parsed_current_version.is_prerelease:
|
| 55 |
+
return None
|
| 56 |
+
# Candidates are pre-releases with the same base_version
|
| 57 |
+
release_list = map(parse_version, release_list)
|
| 58 |
+
release_list = filter(lambda v: v.is_prerelease, release_list)
|
| 59 |
+
release_list = filter(
|
| 60 |
+
lambda v: v.base_version == parsed_current_version.base_version,
|
| 61 |
+
release_list,
|
| 62 |
+
)
|
| 63 |
+
release_list = sorted(release_list)
|
| 64 |
+
if not release_list:
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
parsed_latest_version = release_list[-1]
|
| 68 |
+
if parsed_latest_version <= parsed_current_version:
|
| 69 |
+
return None
|
| 70 |
+
latest_version = str(parsed_latest_version)
|
| 71 |
+
pip_prerelease = True
|
| 72 |
+
|
| 73 |
+
return latest_version, pip_prerelease, deleted, yanked, yanked_reason
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def check_available(current_version: str) -> Optional[Dict[str, Optional[str]]]:
|
| 77 |
+
package_info = _find_available(current_version)
|
| 78 |
+
if not package_info:
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
wandb_module_name = "wandb"
|
| 82 |
+
|
| 83 |
+
latest_version, pip_prerelease, deleted, yanked, yanked_reason = package_info
|
| 84 |
+
upgrade_message = (
|
| 85 |
+
"{} version {} is available! To upgrade, please run:\n"
|
| 86 |
+
" $ pip install {} --upgrade{}".format(
|
| 87 |
+
wandb_module_name,
|
| 88 |
+
latest_version,
|
| 89 |
+
wandb_module_name,
|
| 90 |
+
" --pre" if pip_prerelease else "",
|
| 91 |
+
)
|
| 92 |
+
)
|
| 93 |
+
delete_message = None
|
| 94 |
+
if deleted:
|
| 95 |
+
delete_message = "{} version {} has been retired! Please upgrade.".format(
|
| 96 |
+
wandb_module_name,
|
| 97 |
+
current_version,
|
| 98 |
+
)
|
| 99 |
+
yank_message = None
|
| 100 |
+
if yanked:
|
| 101 |
+
reason_message = "({}) ".format(yanked_reason) if yanked_reason else ""
|
| 102 |
+
yank_message = "{} version {} has been recalled! {}Please upgrade.".format(
|
| 103 |
+
wandb_module_name,
|
| 104 |
+
current_version,
|
| 105 |
+
reason_message,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# A new version is available!
|
| 109 |
+
return {
|
| 110 |
+
"upgrade_message": upgrade_message,
|
| 111 |
+
"yank_message": yank_message,
|
| 112 |
+
"delete_message": delete_message,
|
| 113 |
+
}
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (483 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch.cpython-310.pyc
ADDED
|
Binary file (9.52 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch_add.cpython-310.pyc
ADDED
|
Binary file (6.84 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_project_spec.cpython-310.pyc
ADDED
|
Binary file (18.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/create_job.cpython-310.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/errors.cpython-310.pyc
ADDED
|
Binary file (744 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/git_reference.cpython-310.pyc
ADDED
|
Binary file (3.12 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/loader.cpython-310.pyc
ADDED
|
Binary file (7.13 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (21.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/wandb_reference.cpython-310.pyc
ADDED
|
Binary file (3.77 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (176 Bytes). View file
|
|
|