Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so +3 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/job_builder.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/profiler.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/sender.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/settings_static.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/writer.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/abstract.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/build.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/context_manager.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/kaniko_builder.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/noop.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/build.py +297 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/docker_builder.py +177 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/dockerfile.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/dockerfile.py +92 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/files.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/internal.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/abstract.py +195 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_monitor.py +474 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_runner.py +963 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_container.py +301 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__init__.py +5 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_settings_toposort_generated.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_wburls_generated.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/config_util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/credentials.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/disabled.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/lazyloader.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/paths.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/proto_util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/reporting.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sock_client.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sparkline.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/timer.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generate.py +159 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generated.py +249 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/_wburls_generate.py +25 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/apikey.py +273 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/capped_dict.py +26 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/config_util.py +101 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/credentials.py +141 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/deprecate.py +42 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/disabled.py +29 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py +54 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/filenames.py +64 -0
- parrot/lib/python3.10/site-packages/wandb/sdk/lib/filesystem.py +372 -0
.gitattributes
CHANGED
|
@@ -146,3 +146,4 @@ parrot/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text
|
|
| 146 |
parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 147 |
parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 148 |
parrot/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 146 |
parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 147 |
parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 148 |
parrot/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text
|
| 149 |
+
parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:178e9547694aa62399bc0cf44c530a767396b5da9344416107b76c83bfe05d02
|
| 3 |
+
size 364184
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_util.cpython-310.pyc
ADDED
|
Binary file (3.58 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/job_builder.cpython-310.pyc
ADDED
|
Binary file (16.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/profiler.cpython-310.pyc
ADDED
|
Binary file (2.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/sender.cpython-310.pyc
ADDED
|
Binary file (46.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/settings_static.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/writer.cpython-310.pyc
ADDED
|
Binary file (6.54 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/abstract.cpython-310.pyc
ADDED
|
Binary file (5.21 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/build.cpython-310.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/context_manager.cpython-310.pyc
ADDED
|
Binary file (5.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/kaniko_builder.cpython-310.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/noop.cpython-310.pyc
ADDED
|
Binary file (2.34 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/build.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import pathlib
|
| 6 |
+
import shlex
|
| 7 |
+
from typing import Any, Dict, List, Tuple
|
| 8 |
+
|
| 9 |
+
from dockerpycreds.utils import find_executable # type: ignore
|
| 10 |
+
|
| 11 |
+
import wandb
|
| 12 |
+
import wandb.env
|
| 13 |
+
from wandb import docker
|
| 14 |
+
from wandb.apis.internal import Api
|
| 15 |
+
from wandb.sdk.launch.loader import (
|
| 16 |
+
builder_from_config,
|
| 17 |
+
environment_from_config,
|
| 18 |
+
registry_from_config,
|
| 19 |
+
)
|
| 20 |
+
from wandb.util import get_module
|
| 21 |
+
|
| 22 |
+
from .._project_spec import EntryPoint, LaunchProject
|
| 23 |
+
from ..errors import ExecutionError, LaunchError
|
| 24 |
+
from ..utils import LOG_PREFIX, event_loop_thread_exec
|
| 25 |
+
from .templates.dockerfile import (
|
| 26 |
+
ACCELERATOR_SETUP_TEMPLATE,
|
| 27 |
+
ENTRYPOINT_TEMPLATE,
|
| 28 |
+
PIP_TEMPLATE,
|
| 29 |
+
PYTHON_SETUP_TEMPLATE,
|
| 30 |
+
USER_CREATE_TEMPLATE,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
_logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
_WANDB_DOCKERFILE_NAME = "Dockerfile.wandb"
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
async def validate_docker_installation() -> None:
|
| 40 |
+
"""Verify if Docker is installed on host machine."""
|
| 41 |
+
find_exec = event_loop_thread_exec(find_executable)
|
| 42 |
+
if not await find_exec("docker"):
|
| 43 |
+
raise ExecutionError(
|
| 44 |
+
"Could not find Docker executable. "
|
| 45 |
+
"Ensure Docker is installed as per the instructions "
|
| 46 |
+
"at https://docs.docker.com/install/overview/."
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def join(split_command: List[str]) -> str:
|
| 51 |
+
"""Return a shell-escaped string from *split_command*.
|
| 52 |
+
|
| 53 |
+
Also remove quotes from double quoted strings. Ex:
|
| 54 |
+
"'local container queue'" --> "local container queue"
|
| 55 |
+
"""
|
| 56 |
+
return " ".join(shlex.quote(arg.replace("'", "")) for arg in split_command)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
async def build_image_from_project(
|
| 60 |
+
launch_project: LaunchProject,
|
| 61 |
+
api: Api,
|
| 62 |
+
launch_config: Dict[str, Any],
|
| 63 |
+
) -> str:
|
| 64 |
+
"""Construct a docker image from a project and returns the URI of the image.
|
| 65 |
+
|
| 66 |
+
Arguments:
|
| 67 |
+
launch_project: The project to build an image from.
|
| 68 |
+
api: The API object to use for fetching the project.
|
| 69 |
+
launch_config: The launch config to use for building the image.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
The URI of the built image.
|
| 73 |
+
"""
|
| 74 |
+
assert launch_project.uri, "To build an image on queue a URI must be set."
|
| 75 |
+
launch_config = launch_config or {}
|
| 76 |
+
env_config = launch_config.get("environment", {})
|
| 77 |
+
if not isinstance(env_config, dict):
|
| 78 |
+
wrong_type = type(env_config).__name__
|
| 79 |
+
raise LaunchError(
|
| 80 |
+
f"Invalid environment config: {env_config} of type {wrong_type} "
|
| 81 |
+
"loaded from launch config. Expected dict."
|
| 82 |
+
)
|
| 83 |
+
environment = environment_from_config(env_config)
|
| 84 |
+
|
| 85 |
+
registry_config = launch_config.get("registry", {})
|
| 86 |
+
if not isinstance(registry_config, dict):
|
| 87 |
+
wrong_type = type(registry_config).__name__
|
| 88 |
+
raise LaunchError(
|
| 89 |
+
f"Invalid registry config: {registry_config} of type {wrong_type}"
|
| 90 |
+
" loaded from launch config. Expected dict."
|
| 91 |
+
)
|
| 92 |
+
registry = registry_from_config(registry_config, environment)
|
| 93 |
+
|
| 94 |
+
builder_config = launch_config.get("builder", {})
|
| 95 |
+
if not isinstance(builder_config, dict):
|
| 96 |
+
wrong_type = type(builder_config).__name__
|
| 97 |
+
raise LaunchError(
|
| 98 |
+
f"Invalid builder config: {builder_config} of type {wrong_type} "
|
| 99 |
+
"loaded from launch config. Expected dict."
|
| 100 |
+
)
|
| 101 |
+
builder = builder_from_config(builder_config, environment, registry)
|
| 102 |
+
|
| 103 |
+
if not builder:
|
| 104 |
+
raise LaunchError("Unable to build image. No builder found.")
|
| 105 |
+
|
| 106 |
+
launch_project.fetch_and_validate_project()
|
| 107 |
+
|
| 108 |
+
entry_point = (
|
| 109 |
+
launch_project.get_job_entry_point() or launch_project.override_entrypoint
|
| 110 |
+
)
|
| 111 |
+
assert entry_point is not None
|
| 112 |
+
wandb.termlog(f"{LOG_PREFIX}Building docker image from uri source")
|
| 113 |
+
image_uri = await builder.build_image(launch_project, entry_point)
|
| 114 |
+
if not image_uri:
|
| 115 |
+
raise LaunchError("Error building image uri")
|
| 116 |
+
else:
|
| 117 |
+
return image_uri
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def image_tag_from_dockerfile_and_source(
|
| 121 |
+
launch_project: LaunchProject, dockerfile_contents: str
|
| 122 |
+
) -> str:
|
| 123 |
+
"""Hashes the source and dockerfile contents into a unique tag."""
|
| 124 |
+
image_source_string = launch_project.get_image_source_string()
|
| 125 |
+
unique_id_string = image_source_string + dockerfile_contents
|
| 126 |
+
image_tag = hashlib.sha256(unique_id_string.encode("utf-8")).hexdigest()[:8]
|
| 127 |
+
return image_tag
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def get_docker_user(launch_project: LaunchProject, runner_type: str) -> Tuple[str, int]:
|
| 131 |
+
import getpass
|
| 132 |
+
|
| 133 |
+
username = getpass.getuser()
|
| 134 |
+
|
| 135 |
+
if runner_type == "sagemaker" and not launch_project.docker_image:
|
| 136 |
+
# unless user has provided their own image, sagemaker must run as root but keep the name for workdir etc
|
| 137 |
+
return username, 0
|
| 138 |
+
|
| 139 |
+
userid = launch_project.docker_user_id or os.geteuid()
|
| 140 |
+
return username, userid
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def get_base_setup(
|
| 144 |
+
launch_project: LaunchProject, py_version: str, py_major: str
|
| 145 |
+
) -> str:
|
| 146 |
+
"""Fill in the Dockerfile templates for stage 2 of build.
|
| 147 |
+
|
| 148 |
+
CPU version is built on python, Accelerator version is built on user provided.
|
| 149 |
+
"""
|
| 150 |
+
minor = int(py_version.split(".")[1])
|
| 151 |
+
if minor < 12:
|
| 152 |
+
python_base_image = f"python:{py_version}-buster"
|
| 153 |
+
else:
|
| 154 |
+
python_base_image = f"python:{py_version}-bookworm"
|
| 155 |
+
if launch_project.accelerator_base_image:
|
| 156 |
+
_logger.info(
|
| 157 |
+
f"Using accelerator base image: {launch_project.accelerator_base_image}"
|
| 158 |
+
)
|
| 159 |
+
python_packages = [
|
| 160 |
+
f"python{py_version}",
|
| 161 |
+
f"libpython{py_version}",
|
| 162 |
+
"python3-pip",
|
| 163 |
+
"python3-setuptools",
|
| 164 |
+
]
|
| 165 |
+
base_setup = ACCELERATOR_SETUP_TEMPLATE.format(
|
| 166 |
+
accelerator_base_image=launch_project.accelerator_base_image,
|
| 167 |
+
python_packages=" \\\n".join(python_packages),
|
| 168 |
+
py_version=py_version,
|
| 169 |
+
)
|
| 170 |
+
else:
|
| 171 |
+
python_packages = [
|
| 172 |
+
"python3-dev",
|
| 173 |
+
"gcc",
|
| 174 |
+
] # gcc required for python < 3.7 for some reason
|
| 175 |
+
base_setup = PYTHON_SETUP_TEMPLATE.format(py_base_image=python_base_image)
|
| 176 |
+
return base_setup
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# Move this into the build context manager.
|
| 180 |
+
def get_requirements_section(
|
| 181 |
+
launch_project: LaunchProject, build_context_dir: str, builder_type: str
|
| 182 |
+
) -> str:
|
| 183 |
+
if builder_type == "docker":
|
| 184 |
+
buildx_installed = docker.is_buildx_installed()
|
| 185 |
+
if not buildx_installed:
|
| 186 |
+
wandb.termwarn(
|
| 187 |
+
"Docker BuildX is not installed, for faster builds upgrade docker: https://github.com/docker/buildx#installing"
|
| 188 |
+
)
|
| 189 |
+
prefix = "RUN WANDB_DISABLE_CACHE=true"
|
| 190 |
+
elif builder_type == "kaniko":
|
| 191 |
+
prefix = "RUN WANDB_DISABLE_CACHE=true"
|
| 192 |
+
buildx_installed = False
|
| 193 |
+
|
| 194 |
+
if buildx_installed:
|
| 195 |
+
prefix = "RUN --mount=type=cache,mode=0777,target=/root/.cache/pip"
|
| 196 |
+
|
| 197 |
+
requirements_files = []
|
| 198 |
+
deps_install_line = None
|
| 199 |
+
|
| 200 |
+
base_path = pathlib.Path(build_context_dir)
|
| 201 |
+
# If there is a requirements.txt at root of build context, use that.
|
| 202 |
+
if (base_path / "src" / "requirements.txt").exists():
|
| 203 |
+
requirements_files += ["src/requirements.txt"]
|
| 204 |
+
deps_install_line = "pip install uv && uv pip install -r requirements.txt"
|
| 205 |
+
with open(base_path / "src" / "requirements.txt") as f:
|
| 206 |
+
requirements = f.readlines()
|
| 207 |
+
if not any(["wandb" in r for r in requirements]):
|
| 208 |
+
wandb.termwarn(f"{LOG_PREFIX}wandb is not present in requirements.txt.")
|
| 209 |
+
return PIP_TEMPLATE.format(
|
| 210 |
+
buildx_optional_prefix=prefix,
|
| 211 |
+
requirements_files=" ".join(requirements_files),
|
| 212 |
+
pip_install=deps_install_line,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Elif there is pyproject.toml at build context, convert the dependencies
|
| 216 |
+
# section to a requirements.txt and use that.
|
| 217 |
+
elif (base_path / "src" / "pyproject.toml").exists():
|
| 218 |
+
tomli = get_module("tomli")
|
| 219 |
+
if tomli is None:
|
| 220 |
+
wandb.termwarn(
|
| 221 |
+
"pyproject.toml found but tomli could not be loaded. To "
|
| 222 |
+
"install dependencies from pyproject.toml please run "
|
| 223 |
+
"`pip install tomli` and try again."
|
| 224 |
+
)
|
| 225 |
+
else:
|
| 226 |
+
# First try to read deps from standard pyproject format.
|
| 227 |
+
with open(base_path / "src" / "pyproject.toml", "rb") as f:
|
| 228 |
+
contents = tomli.load(f)
|
| 229 |
+
project_deps = [
|
| 230 |
+
str(d) for d in contents.get("project", {}).get("dependencies", [])
|
| 231 |
+
]
|
| 232 |
+
if project_deps:
|
| 233 |
+
if not any(["wandb" in d for d in project_deps]):
|
| 234 |
+
wandb.termwarn(
|
| 235 |
+
f"{LOG_PREFIX}wandb is not present as a dependency in pyproject.toml."
|
| 236 |
+
)
|
| 237 |
+
with open(base_path / "src" / "requirements.txt", "w") as f:
|
| 238 |
+
f.write("\n".join(project_deps))
|
| 239 |
+
requirements_files += ["src/requirements.txt"]
|
| 240 |
+
deps_install_line = (
|
| 241 |
+
"pip install uv && uv pip install -r requirements.txt"
|
| 242 |
+
)
|
| 243 |
+
return PIP_TEMPLATE.format(
|
| 244 |
+
buildx_optional_prefix=prefix,
|
| 245 |
+
requirements_files=" ".join(requirements_files),
|
| 246 |
+
pip_install=deps_install_line,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
# Else use frozen requirements from wandb run.
|
| 250 |
+
if (
|
| 251 |
+
not deps_install_line
|
| 252 |
+
and (base_path / "src" / "requirements.frozen.txt").exists()
|
| 253 |
+
):
|
| 254 |
+
requirements_files += [
|
| 255 |
+
"src/requirements.frozen.txt",
|
| 256 |
+
"_wandb_bootstrap.py",
|
| 257 |
+
]
|
| 258 |
+
deps_install_line = (
|
| 259 |
+
launch_project.parse_existing_requirements() + "python _wandb_bootstrap.py"
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
if not deps_install_line:
|
| 263 |
+
raise LaunchError(f"No dependency sources found for {launch_project}")
|
| 264 |
+
|
| 265 |
+
with open(base_path / "src" / "requirements.frozen.txt") as f:
|
| 266 |
+
requirements = f.readlines()
|
| 267 |
+
if not any(["wandb" in r for r in requirements]):
|
| 268 |
+
wandb.termwarn(
|
| 269 |
+
f"{LOG_PREFIX}wandb is not present in requirements.frozen.txt."
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
return PIP_TEMPLATE.format(
|
| 273 |
+
buildx_optional_prefix=prefix,
|
| 274 |
+
requirements_files=" ".join(requirements_files),
|
| 275 |
+
pip_install=deps_install_line,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
else:
|
| 279 |
+
# this means no deps file was found
|
| 280 |
+
requirements_line = "RUN mkdir -p env/" # Docker fails otherwise
|
| 281 |
+
wandb.termwarn("No requirements file found. No packages will be installed.")
|
| 282 |
+
return requirements_line
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def get_user_setup(username: str, userid: int, runner_type: str) -> str:
|
| 286 |
+
if runner_type == "sagemaker":
|
| 287 |
+
# sagemaker must run as root
|
| 288 |
+
return "USER root"
|
| 289 |
+
user_create = USER_CREATE_TEMPLATE.format(uid=userid, user=username)
|
| 290 |
+
user_create += f"\nUSER {username}"
|
| 291 |
+
return user_create
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def get_entrypoint_setup(
|
| 295 |
+
entry_point: EntryPoint,
|
| 296 |
+
) -> str:
|
| 297 |
+
return ENTRYPOINT_TEMPLATE.format(entrypoint=json.dumps(entry_point.command))
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/docker_builder.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Implementation of the docker builder."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from typing import Any, Dict, Optional
|
| 6 |
+
|
| 7 |
+
import wandb
|
| 8 |
+
import wandb.docker as docker
|
| 9 |
+
from wandb.sdk.launch.agent.job_status_tracker import JobAndRunStatusTracker
|
| 10 |
+
from wandb.sdk.launch.builder.abstract import AbstractBuilder, registry_from_uri
|
| 11 |
+
from wandb.sdk.launch.environment.abstract import AbstractEnvironment
|
| 12 |
+
from wandb.sdk.launch.registry.abstract import AbstractRegistry
|
| 13 |
+
|
| 14 |
+
from .._project_spec import EntryPoint, LaunchProject
|
| 15 |
+
from ..errors import LaunchDockerError, LaunchError
|
| 16 |
+
from ..registry.anon import AnonynmousRegistry
|
| 17 |
+
from ..registry.local_registry import LocalRegistry
|
| 18 |
+
from ..utils import (
|
| 19 |
+
LOG_PREFIX,
|
| 20 |
+
event_loop_thread_exec,
|
| 21 |
+
warn_failed_packages_from_build_logs,
|
| 22 |
+
)
|
| 23 |
+
from .build import _WANDB_DOCKERFILE_NAME, validate_docker_installation
|
| 24 |
+
from .context_manager import BuildContextManager
|
| 25 |
+
|
| 26 |
+
_logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class DockerBuilder(AbstractBuilder):
|
| 30 |
+
"""Builds a docker image for a project.
|
| 31 |
+
|
| 32 |
+
Attributes:
|
| 33 |
+
builder_config (Dict[str, Any]): The builder config.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
builder_type = "docker"
|
| 38 |
+
target_platform = "linux/amd64"
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
environment: AbstractEnvironment,
|
| 43 |
+
registry: AbstractRegistry,
|
| 44 |
+
config: Dict[str, Any],
|
| 45 |
+
):
|
| 46 |
+
"""Initialize a DockerBuilder.
|
| 47 |
+
|
| 48 |
+
Arguments:
|
| 49 |
+
environment (AbstractEnvironment): The environment to use.
|
| 50 |
+
registry (AbstractRegistry): The registry to use.
|
| 51 |
+
|
| 52 |
+
Raises:
|
| 53 |
+
LaunchError: If docker is not installed
|
| 54 |
+
"""
|
| 55 |
+
self.environment = environment # Docker builder doesn't actually use this.
|
| 56 |
+
self.registry = registry
|
| 57 |
+
self.config = config
|
| 58 |
+
|
| 59 |
+
@classmethod
|
| 60 |
+
def from_config(
|
| 61 |
+
cls,
|
| 62 |
+
config: Dict[str, Any],
|
| 63 |
+
environment: AbstractEnvironment,
|
| 64 |
+
registry: AbstractRegistry,
|
| 65 |
+
) -> "DockerBuilder":
|
| 66 |
+
"""Create a DockerBuilder from a config.
|
| 67 |
+
|
| 68 |
+
Arguments:
|
| 69 |
+
config (Dict[str, Any]): The config.
|
| 70 |
+
registry (AbstractRegistry): The registry to use.
|
| 71 |
+
verify (bool, optional): Whether to verify the functionality of the builder.
|
| 72 |
+
login (bool, optional): Whether to login to the registry.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
DockerBuilder: The DockerBuilder.
|
| 76 |
+
"""
|
| 77 |
+
# If the user provided a destination URI in the builder config
|
| 78 |
+
# we use that as the registry.
|
| 79 |
+
image_uri = config.get("destination")
|
| 80 |
+
if image_uri:
|
| 81 |
+
if registry is not None:
|
| 82 |
+
wandb.termwarn(
|
| 83 |
+
f"{LOG_PREFIX}Overriding registry from registry config"
|
| 84 |
+
f" with {image_uri} from builder config."
|
| 85 |
+
)
|
| 86 |
+
registry = registry_from_uri(image_uri)
|
| 87 |
+
|
| 88 |
+
return cls(environment, registry, config)
|
| 89 |
+
|
| 90 |
+
async def verify(self) -> None:
|
| 91 |
+
"""Verify the builder."""
|
| 92 |
+
await validate_docker_installation()
|
| 93 |
+
|
| 94 |
+
async def login(self) -> None:
|
| 95 |
+
"""Login to the registry."""
|
| 96 |
+
if isinstance(self.registry, LocalRegistry):
|
| 97 |
+
_logger.info(f"{LOG_PREFIX}No registry configured, skipping login.")
|
| 98 |
+
elif isinstance(self.registry, AnonynmousRegistry):
|
| 99 |
+
_logger.info(f"{LOG_PREFIX}Anonymous registry, skipping login.")
|
| 100 |
+
else:
|
| 101 |
+
username, password = await self.registry.get_username_password()
|
| 102 |
+
login = event_loop_thread_exec(docker.login)
|
| 103 |
+
await login(username, password, self.registry.uri)
|
| 104 |
+
|
| 105 |
+
async def build_image(
|
| 106 |
+
self,
|
| 107 |
+
launch_project: LaunchProject,
|
| 108 |
+
entrypoint: EntryPoint,
|
| 109 |
+
job_tracker: Optional[JobAndRunStatusTracker] = None,
|
| 110 |
+
) -> str:
|
| 111 |
+
"""Build the image for the given project.
|
| 112 |
+
|
| 113 |
+
Arguments:
|
| 114 |
+
launch_project (LaunchProject): The project to build.
|
| 115 |
+
entrypoint (EntryPoint): The entrypoint to use.
|
| 116 |
+
"""
|
| 117 |
+
await self.verify()
|
| 118 |
+
await self.login()
|
| 119 |
+
|
| 120 |
+
build_context_manager = BuildContextManager(launch_project=launch_project)
|
| 121 |
+
build_ctx_path, image_tag = build_context_manager.create_build_context("docker")
|
| 122 |
+
dockerfile = os.path.join(build_ctx_path, _WANDB_DOCKERFILE_NAME)
|
| 123 |
+
repository = None if not self.registry else await self.registry.get_repo_uri()
|
| 124 |
+
|
| 125 |
+
# if repo is set, use the repo name as the image name
|
| 126 |
+
if repository:
|
| 127 |
+
image_uri = f"{repository}:{image_tag}"
|
| 128 |
+
# otherwise, base the image name off of the source
|
| 129 |
+
# which the launch_project checks in image_name
|
| 130 |
+
else:
|
| 131 |
+
image_uri = f"{launch_project.image_name}:{image_tag}"
|
| 132 |
+
|
| 133 |
+
if (
|
| 134 |
+
not launch_project.build_required()
|
| 135 |
+
and await self.registry.check_image_exists(image_uri)
|
| 136 |
+
):
|
| 137 |
+
return image_uri
|
| 138 |
+
|
| 139 |
+
_logger.info(
|
| 140 |
+
f"image {image_uri} does not already exist in repository, building."
|
| 141 |
+
)
|
| 142 |
+
try:
|
| 143 |
+
output = await event_loop_thread_exec(docker.build)(
|
| 144 |
+
tags=[image_uri],
|
| 145 |
+
file=dockerfile,
|
| 146 |
+
context_path=build_ctx_path,
|
| 147 |
+
platform=self.config.get("platform"),
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
warn_failed_packages_from_build_logs(
|
| 151 |
+
output, image_uri, launch_project.api, job_tracker
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
except docker.DockerError as e:
|
| 155 |
+
if job_tracker:
|
| 156 |
+
job_tracker.set_err_stage("build")
|
| 157 |
+
raise LaunchDockerError(f"Error communicating with docker client: {e}")
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
os.remove(build_ctx_path)
|
| 161 |
+
except Exception:
|
| 162 |
+
_msg = f"{LOG_PREFIX}Temporary docker context file {build_ctx_path} was not deleted."
|
| 163 |
+
_logger.info(_msg)
|
| 164 |
+
|
| 165 |
+
if repository:
|
| 166 |
+
reg, tag = image_uri.split(":")
|
| 167 |
+
wandb.termlog(f"{LOG_PREFIX}Pushing image {image_uri}")
|
| 168 |
+
push_resp = await event_loop_thread_exec(docker.push)(reg, tag)
|
| 169 |
+
if push_resp is None:
|
| 170 |
+
raise LaunchError("Failed to push image to repository")
|
| 171 |
+
elif (
|
| 172 |
+
launch_project.resource == "sagemaker"
|
| 173 |
+
and f"The push refers to repository [{repository}]" not in push_resp
|
| 174 |
+
):
|
| 175 |
+
raise LaunchError(f"Unable to push image to ECR, response: {push_resp}")
|
| 176 |
+
|
| 177 |
+
return image_uri
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/dockerfile.cpython-310.pyc
ADDED
|
Binary file (2.25 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/dockerfile.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
DOCKERFILE_TEMPLATE = """
|
| 2 |
+
# ----- stage 1: build -----
|
| 3 |
+
FROM {py_build_image} as build
|
| 4 |
+
|
| 5 |
+
# requirements section depends on pip vs conda, and presence of buildx
|
| 6 |
+
ENV PIP_PROGRESS_BAR off
|
| 7 |
+
{requirements_section}
|
| 8 |
+
|
| 9 |
+
# ----- stage 2: base -----
|
| 10 |
+
{base_setup}
|
| 11 |
+
|
| 12 |
+
COPY --from=build /env /env
|
| 13 |
+
ENV PATH="/env/bin:$PATH"
|
| 14 |
+
|
| 15 |
+
ENV SHELL /bin/bash
|
| 16 |
+
|
| 17 |
+
# some resources (eg sagemaker) must run on root
|
| 18 |
+
{user_setup}
|
| 19 |
+
|
| 20 |
+
WORKDIR {workdir}
|
| 21 |
+
RUN chown -R {uid} {workdir}
|
| 22 |
+
|
| 23 |
+
# make artifacts cache dir unrelated to build
|
| 24 |
+
RUN mkdir -p {workdir}/.cache && chown -R {uid} {workdir}/.cache
|
| 25 |
+
|
| 26 |
+
# copy code/etc
|
| 27 |
+
COPY --chown={uid} src/ {workdir}
|
| 28 |
+
|
| 29 |
+
ENV PYTHONUNBUFFERED=1
|
| 30 |
+
|
| 31 |
+
{entrypoint_section}
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
# this goes into base_setup in TEMPLATE
|
| 35 |
+
PYTHON_SETUP_TEMPLATE = """
|
| 36 |
+
FROM {py_base_image} as base
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
# this goes into base_setup in TEMPLATE
|
| 40 |
+
ACCELERATOR_SETUP_TEMPLATE = """
|
| 41 |
+
FROM {accelerator_base_image} as base
|
| 42 |
+
|
| 43 |
+
# make non-interactive so build doesn't block on questions
|
| 44 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 45 |
+
|
| 46 |
+
# install python
|
| 47 |
+
RUN apt-get update -qq && apt-get install --no-install-recommends -y \
|
| 48 |
+
{python_packages} \
|
| 49 |
+
&& apt-get -qq purge && apt-get -qq clean \
|
| 50 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 51 |
+
|
| 52 |
+
# make sure `python` points at the right version
|
| 53 |
+
RUN update-alternatives --install /usr/bin/python python /usr/bin/python{py_version} 1 \
|
| 54 |
+
&& update-alternatives --install /usr/local/bin/python python /usr/bin/python{py_version} 1
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
# this goes into requirements_section in TEMPLATE
|
| 58 |
+
PIP_TEMPLATE = """
|
| 59 |
+
RUN python -m venv /env
|
| 60 |
+
# make sure we install into the env
|
| 61 |
+
ENV PATH="/env/bin:$PATH"
|
| 62 |
+
|
| 63 |
+
COPY {requirements_files} ./
|
| 64 |
+
{buildx_optional_prefix} {pip_install}
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
# this goes into requirements_section in TEMPLATE
|
| 68 |
+
CONDA_TEMPLATE = """
|
| 69 |
+
COPY src/environment.yml .
|
| 70 |
+
{buildx_optional_prefix} conda env create -f environment.yml -n env
|
| 71 |
+
|
| 72 |
+
# pack the environment so that we can transfer to the base image
|
| 73 |
+
RUN conda install -c conda-forge conda-pack
|
| 74 |
+
RUN conda pack -n env -o /tmp/env.tar && \
|
| 75 |
+
mkdir /env && cd /env && tar xf /tmp/env.tar && \
|
| 76 |
+
rm /tmp/env.tar
|
| 77 |
+
RUN /env/bin/conda-unpack
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
USER_CREATE_TEMPLATE = """
|
| 81 |
+
RUN useradd \
|
| 82 |
+
--create-home \
|
| 83 |
+
--no-log-init \
|
| 84 |
+
--shell /bin/bash \
|
| 85 |
+
--gid 0 \
|
| 86 |
+
--uid {uid} \
|
| 87 |
+
{user} || echo ""
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
ENTRYPOINT_TEMPLATE = """
|
| 91 |
+
ENTRYPOINT {entrypoint}
|
| 92 |
+
"""
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/files.cpython-310.pyc
ADDED
|
Binary file (4.68 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/internal.cpython-310.pyc
ADDED
|
Binary file (8.78 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/abstract.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Implementation of the abstract runner class.
|
| 2 |
+
|
| 3 |
+
This class defines the interface that the W&B launch runner uses to manage the lifecycle
|
| 4 |
+
of runs launched in different environments (e.g. runs launched locally or in a cluster).
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import subprocess
|
| 10 |
+
import sys
|
| 11 |
+
from abc import ABC, abstractmethod
|
| 12 |
+
from typing import Any, Dict, List, Optional, Union
|
| 13 |
+
|
| 14 |
+
from dockerpycreds.utils import find_executable # type: ignore
|
| 15 |
+
|
| 16 |
+
import wandb
|
| 17 |
+
from wandb.apis.internal import Api
|
| 18 |
+
from wandb.sdk.lib import runid
|
| 19 |
+
|
| 20 |
+
from .._project_spec import LaunchProject
|
| 21 |
+
|
| 22 |
+
_logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
if sys.version_info >= (3, 8):
|
| 26 |
+
from typing import Literal
|
| 27 |
+
else:
|
| 28 |
+
from typing_extensions import Literal
|
| 29 |
+
|
| 30 |
+
State = Literal[
|
| 31 |
+
"unknown",
|
| 32 |
+
"starting",
|
| 33 |
+
"running",
|
| 34 |
+
"failed",
|
| 35 |
+
"finished",
|
| 36 |
+
"stopping",
|
| 37 |
+
"stopped",
|
| 38 |
+
"preempted",
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class Status:
|
| 43 |
+
def __init__(self, state: "State" = "unknown", messages: List[str] = None): # type: ignore
|
| 44 |
+
self.state = state
|
| 45 |
+
self.messages = messages or []
|
| 46 |
+
|
| 47 |
+
def __repr__(self) -> "State":
|
| 48 |
+
return self.state
|
| 49 |
+
|
| 50 |
+
def __str__(self) -> str:
|
| 51 |
+
return self.state
|
| 52 |
+
|
| 53 |
+
def __eq__(self, __value: object) -> bool:
|
| 54 |
+
if isinstance(__value, Status):
|
| 55 |
+
return self.state == __value.state
|
| 56 |
+
else:
|
| 57 |
+
return self.state == __value
|
| 58 |
+
|
| 59 |
+
def __hash__(self) -> int:
|
| 60 |
+
return hash(self.state)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class AbstractRun(ABC):
|
| 64 |
+
"""Wrapper around a W&B launch run.
|
| 65 |
+
|
| 66 |
+
A launched run is a subprocess running an entry point
|
| 67 |
+
command, that exposes methods for waiting on and cancelling the run.
|
| 68 |
+
This class defines the interface that the W&B launch runner uses to manage the lifecycle
|
| 69 |
+
of runs launched in different environments (e.g. runs launched locally or in a cluster).
|
| 70 |
+
``AbstractRun`` is not thread-safe. That is, concurrent calls to wait() / cancel()
|
| 71 |
+
from multiple threads may inadvertently kill resources (e.g. local processes) unrelated to the
|
| 72 |
+
run.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(self) -> None:
|
| 76 |
+
self._status = Status()
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def status(self) -> Status:
|
| 80 |
+
return self._status
|
| 81 |
+
|
| 82 |
+
@abstractmethod
|
| 83 |
+
async def get_logs(self) -> Optional[str]:
|
| 84 |
+
"""Return the logs associated with the run."""
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
def _run_cmd(
|
| 88 |
+
self, cmd: List[str], output_only: Optional[bool] = False
|
| 89 |
+
) -> Optional[Union["subprocess.Popen[bytes]", bytes]]:
|
| 90 |
+
"""Run the command and returns a popen object or the stdout of the command.
|
| 91 |
+
|
| 92 |
+
Arguments:
|
| 93 |
+
cmd: The command to run
|
| 94 |
+
output_only: If true just return the stdout bytes
|
| 95 |
+
"""
|
| 96 |
+
try:
|
| 97 |
+
env = os.environ
|
| 98 |
+
popen = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
|
| 99 |
+
if output_only:
|
| 100 |
+
popen.wait()
|
| 101 |
+
if popen.stdout is not None:
|
| 102 |
+
return popen.stdout.read()
|
| 103 |
+
return popen
|
| 104 |
+
except subprocess.CalledProcessError as e:
|
| 105 |
+
wandb.termerror(f"Command failed: {e}")
|
| 106 |
+
return None
|
| 107 |
+
|
| 108 |
+
@abstractmethod
|
| 109 |
+
async def wait(self) -> bool:
|
| 110 |
+
"""Wait for the run to finish, returning True if the run succeeded and false otherwise.
|
| 111 |
+
|
| 112 |
+
Note that in some cases, we may wait until the remote job completes rather than until the W&B run completes.
|
| 113 |
+
"""
|
| 114 |
+
pass
|
| 115 |
+
|
| 116 |
+
@abstractmethod
|
| 117 |
+
async def get_status(self) -> Status:
|
| 118 |
+
"""Get status of the run."""
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
@abstractmethod
|
| 122 |
+
async def cancel(self) -> None:
|
| 123 |
+
"""Cancel the run (interrupts the command subprocess, cancels the run, etc).
|
| 124 |
+
|
| 125 |
+
Cancels the run and waits for it to terminate. The W&B run status may not be
|
| 126 |
+
set correctly upon run cancellation.
|
| 127 |
+
"""
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
@abstractmethod
|
| 132 |
+
def id(self) -> Optional[str]:
|
| 133 |
+
pass
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class AbstractRunner(ABC):
|
| 137 |
+
"""Abstract plugin class defining the interface needed to execute W&B Launches.
|
| 138 |
+
|
| 139 |
+
You can define subclasses of ``AbstractRunner`` and expose them as third-party
|
| 140 |
+
plugins to enable running W&B projects against custom execution backends
|
| 141 |
+
(e.g. to run projects against your team's in-house cluster or job scheduler).
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
_type: str
|
| 145 |
+
|
| 146 |
+
def __init__(
|
| 147 |
+
self,
|
| 148 |
+
api: Api,
|
| 149 |
+
backend_config: Dict[str, Any],
|
| 150 |
+
) -> None:
|
| 151 |
+
self._api = api
|
| 152 |
+
self.backend_config = backend_config
|
| 153 |
+
self._cwd = os.getcwd()
|
| 154 |
+
self._namespace = runid.generate_id()
|
| 155 |
+
|
| 156 |
+
def find_executable(
|
| 157 |
+
self, cmd: str
|
| 158 |
+
) -> Any: # should return a string, but mypy doesn't trust find_executable
|
| 159 |
+
"""Cross platform utility for checking if a program is available."""
|
| 160 |
+
return find_executable(cmd)
|
| 161 |
+
|
| 162 |
+
@property
|
| 163 |
+
def api_key(self) -> Any:
|
| 164 |
+
return self._api.api_key
|
| 165 |
+
|
| 166 |
+
def verify(self) -> bool:
|
| 167 |
+
"""This is called on first boot to verify the needed commands, and permissions are available.
|
| 168 |
+
|
| 169 |
+
For now just call `wandb.termerror` and `sys.exit(1)`
|
| 170 |
+
"""
|
| 171 |
+
if self._api.api_key is None:
|
| 172 |
+
wandb.termerror(
|
| 173 |
+
"Couldn't find W&B api key, run wandb login or set WANDB_API_KEY"
|
| 174 |
+
)
|
| 175 |
+
sys.exit(1)
|
| 176 |
+
return True
|
| 177 |
+
|
| 178 |
+
@abstractmethod
|
| 179 |
+
async def run(
|
| 180 |
+
self,
|
| 181 |
+
launch_project: LaunchProject,
|
| 182 |
+
image_uri: str,
|
| 183 |
+
) -> Optional[AbstractRun]:
|
| 184 |
+
"""Submit an LaunchProject to be run.
|
| 185 |
+
|
| 186 |
+
Returns a SubmittedRun object to track the execution
|
| 187 |
+
Arguments:
|
| 188 |
+
launch_project: Object of _project_spec.LaunchProject class representing a wandb launch project
|
| 189 |
+
|
| 190 |
+
Returns:
|
| 191 |
+
A :py:class:`wandb.sdk.launch.runners.SubmittedRun`. This function is expected to run
|
| 192 |
+
the project asynchronously, i.e. it should trigger project execution and then
|
| 193 |
+
immediately return a `SubmittedRun` to track execution status.
|
| 194 |
+
"""
|
| 195 |
+
pass
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_monitor.py
ADDED
|
@@ -0,0 +1,474 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Monitors kubernetes resources managed by the launch agent."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import logging
|
| 5 |
+
import sys
|
| 6 |
+
import traceback
|
| 7 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import kubernetes_asyncio # type: ignore # noqa: F401
|
| 10 |
+
import urllib3
|
| 11 |
+
from kubernetes_asyncio import watch
|
| 12 |
+
from kubernetes_asyncio.client import ( # type: ignore # noqa: F401
|
| 13 |
+
ApiException,
|
| 14 |
+
BatchV1Api,
|
| 15 |
+
CoreV1Api,
|
| 16 |
+
CustomObjectsApi,
|
| 17 |
+
V1Pod,
|
| 18 |
+
V1PodStatus,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
import wandb
|
| 22 |
+
from wandb.sdk.launch.agent import LaunchAgent
|
| 23 |
+
from wandb.sdk.launch.errors import LaunchError
|
| 24 |
+
from wandb.sdk.launch.runner.abstract import State, Status
|
| 25 |
+
from wandb.sdk.launch.utils import get_kube_context_and_api_client
|
| 26 |
+
|
| 27 |
+
WANDB_K8S_LABEL_NAMESPACE = "wandb.ai"
|
| 28 |
+
WANDB_K8S_RUN_ID = f"{WANDB_K8S_LABEL_NAMESPACE}/run-id"
|
| 29 |
+
WANDB_K8S_LABEL_AGENT = f"{WANDB_K8S_LABEL_NAMESPACE}/agent"
|
| 30 |
+
WANDB_K8S_LABEL_MONITOR = f"{WANDB_K8S_LABEL_NAMESPACE}/monitor"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Resources:
|
| 34 |
+
JOBS = "jobs"
|
| 35 |
+
PODS = "pods"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class CustomResource:
|
| 39 |
+
"""Class for custom resources."""
|
| 40 |
+
|
| 41 |
+
def __init__(self, group: str, version: str, plural: str) -> None:
|
| 42 |
+
"""Initialize the CustomResource."""
|
| 43 |
+
self.group = group
|
| 44 |
+
self.version = version
|
| 45 |
+
self.plural = plural
|
| 46 |
+
|
| 47 |
+
def __str__(self) -> str:
|
| 48 |
+
"""Return a string representation of the CustomResource."""
|
| 49 |
+
return f"{self.group}/{self.version}/{self.plural}"
|
| 50 |
+
|
| 51 |
+
def __hash__(self) -> int:
|
| 52 |
+
"""Return a hash of the CustomResource."""
|
| 53 |
+
return hash(str(self))
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# Maps phases and conditions of custom objects to agent's internal run states.
|
| 57 |
+
CRD_STATE_DICT: Dict[str, State] = {
|
| 58 |
+
"created": "starting",
|
| 59 |
+
"pending": "starting",
|
| 60 |
+
"running": "running",
|
| 61 |
+
"completing": "running",
|
| 62 |
+
"succeeded": "finished",
|
| 63 |
+
"completed": "finished",
|
| 64 |
+
"failed": "failed",
|
| 65 |
+
"aborted": "failed",
|
| 66 |
+
"timeout": "failed",
|
| 67 |
+
"terminated": "failed",
|
| 68 |
+
"terminating": "stopping",
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
_logger = logging.getLogger(__name__)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def create_named_task(name: str, coro: Any, *args: Any, **kwargs: Any) -> asyncio.Task:
|
| 75 |
+
"""Create a named task."""
|
| 76 |
+
task = asyncio.create_task(coro(*args, **kwargs))
|
| 77 |
+
if sys.version_info >= (3, 8):
|
| 78 |
+
task.set_name(name)
|
| 79 |
+
task.add_done_callback(_log_err_task_callback)
|
| 80 |
+
return task
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _log_err_task_callback(task: asyncio.Task) -> None:
|
| 84 |
+
"""Callback to log exceptions from tasks."""
|
| 85 |
+
exec = task.exception()
|
| 86 |
+
if exec is not None:
|
| 87 |
+
if isinstance(exec, asyncio.CancelledError):
|
| 88 |
+
wandb.termlog(f"Task {task.get_name()} was cancelled")
|
| 89 |
+
return
|
| 90 |
+
name = str(task) if sys.version_info < (3, 8) else task.get_name()
|
| 91 |
+
wandb.termerror(f"Exception in task {name}")
|
| 92 |
+
tb = exec.__traceback__
|
| 93 |
+
tb_str = "".join(traceback.format_tb(tb))
|
| 94 |
+
wandb.termerror(tb_str)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _is_preempted(status: "V1PodStatus") -> bool:
|
| 98 |
+
"""Check if this pod has been preempted."""
|
| 99 |
+
if hasattr(status, "conditions") and status.conditions is not None:
|
| 100 |
+
for condition in status.conditions:
|
| 101 |
+
if condition.type == "DisruptionTarget" and condition.reason in [
|
| 102 |
+
"EvictionByEvictionAPI",
|
| 103 |
+
"PreemptionByScheduler",
|
| 104 |
+
"TerminationByKubelet",
|
| 105 |
+
]:
|
| 106 |
+
return True
|
| 107 |
+
return False
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _is_container_creating(status: "V1PodStatus") -> bool:
|
| 111 |
+
"""Check if this pod has started creating containers."""
|
| 112 |
+
for container_status in status.container_statuses or []:
|
| 113 |
+
if (
|
| 114 |
+
container_status.state
|
| 115 |
+
and container_status.state.waiting
|
| 116 |
+
and container_status.state.waiting.reason == "ContainerCreating"
|
| 117 |
+
):
|
| 118 |
+
return True
|
| 119 |
+
return False
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def _is_pod_unschedulable(status: "V1PodStatus") -> Tuple[bool, str]:
|
| 123 |
+
"""Return whether the pod is unschedulable along with the reason message."""
|
| 124 |
+
if not status.conditions:
|
| 125 |
+
return False, ""
|
| 126 |
+
for condition in status.conditions:
|
| 127 |
+
if (
|
| 128 |
+
condition.type == "PodScheduled"
|
| 129 |
+
and condition.status == "False"
|
| 130 |
+
and condition.reason == "Unschedulable"
|
| 131 |
+
):
|
| 132 |
+
return True, condition.message
|
| 133 |
+
return False, ""
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _get_crd_job_name(object: "V1Pod") -> Optional[str]:
|
| 137 |
+
refs = object.metadata.owner_references
|
| 138 |
+
if refs:
|
| 139 |
+
return refs[0].name
|
| 140 |
+
return None
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _state_from_conditions(conditions: List[Dict[str, Any]]) -> Optional[State]:
|
| 144 |
+
"""Get the status from the pod conditions."""
|
| 145 |
+
true_conditions = [
|
| 146 |
+
c.get("type", "").lower() for c in conditions if c.get("status") == "True"
|
| 147 |
+
]
|
| 148 |
+
detected_states = {
|
| 149 |
+
CRD_STATE_DICT[c] for c in true_conditions if c in CRD_STATE_DICT
|
| 150 |
+
}
|
| 151 |
+
# The list below is ordered so that returning the first state detected
|
| 152 |
+
# will accurately reflect the state of the job.
|
| 153 |
+
states_in_order: List[State] = [
|
| 154 |
+
"finished",
|
| 155 |
+
"failed",
|
| 156 |
+
"stopping",
|
| 157 |
+
"running",
|
| 158 |
+
"starting",
|
| 159 |
+
]
|
| 160 |
+
for state in states_in_order:
|
| 161 |
+
if state in detected_states:
|
| 162 |
+
return state
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def _state_from_replicated_status(status_dict: Dict[str, int]) -> Optional[State]:
|
| 167 |
+
"""Infer overall job status from replicated job status for jobsets.
|
| 168 |
+
|
| 169 |
+
More info on jobset:
|
| 170 |
+
https://github.com/kubernetes-sigs/jobset/blob/main/docs/concepts/README.md
|
| 171 |
+
|
| 172 |
+
This is useful for detecting when jobsets are starting.
|
| 173 |
+
"""
|
| 174 |
+
pods_ready = status_dict.get("ready", 0)
|
| 175 |
+
pods_active = status_dict.get("active", 0)
|
| 176 |
+
if pods_ready >= 1:
|
| 177 |
+
return "running"
|
| 178 |
+
elif pods_active >= 1:
|
| 179 |
+
return "starting"
|
| 180 |
+
return None
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class LaunchKubernetesMonitor:
|
| 184 |
+
"""Monitors kubernetes resources managed by the launch agent.
|
| 185 |
+
|
| 186 |
+
Note: this class is forced to be a singleton in order to prevent multiple
|
| 187 |
+
threads from being created that monitor the same kubernetes resources.
|
| 188 |
+
"""
|
| 189 |
+
|
| 190 |
+
_instance = None # This is used to ensure only one instance is created.
|
| 191 |
+
|
| 192 |
+
def __new__(cls, *args: Any, **kwargs: Any) -> "LaunchKubernetesMonitor":
|
| 193 |
+
"""Create a new instance of the LaunchKubernetesMonitor.
|
| 194 |
+
|
| 195 |
+
This method ensures that only one instance of the LaunchKubernetesMonitor
|
| 196 |
+
is created. This is done to prevent multiple threads from being created
|
| 197 |
+
that monitor the same kubernetes resources.
|
| 198 |
+
"""
|
| 199 |
+
if cls._instance is None:
|
| 200 |
+
cls._instance = super().__new__(cls)
|
| 201 |
+
return cls._instance
|
| 202 |
+
|
| 203 |
+
def __init__(
|
| 204 |
+
self,
|
| 205 |
+
core_api: CoreV1Api,
|
| 206 |
+
batch_api: BatchV1Api,
|
| 207 |
+
custom_api: CustomObjectsApi,
|
| 208 |
+
label_selector: str,
|
| 209 |
+
):
|
| 210 |
+
"""Initialize the LaunchKubernetesMonitor."""
|
| 211 |
+
self._core_api: CoreV1Api = core_api
|
| 212 |
+
self._batch_api: BatchV1Api = batch_api
|
| 213 |
+
self._custom_api: CustomObjectsApi = custom_api
|
| 214 |
+
|
| 215 |
+
self._label_selector: str = label_selector
|
| 216 |
+
|
| 217 |
+
# Dict mapping a tuple of (namespace, resource_type) to an
|
| 218 |
+
# asyncio.Task that is monitoring that resource type in that namespace.
|
| 219 |
+
self._monitor_tasks: Dict[
|
| 220 |
+
Tuple[str, Union[str, CustomResource]], asyncio.Task
|
| 221 |
+
] = dict()
|
| 222 |
+
|
| 223 |
+
# Map from job name to job state.
|
| 224 |
+
self._job_states: Dict[str, Status] = dict()
|
| 225 |
+
|
| 226 |
+
@classmethod
|
| 227 |
+
async def ensure_initialized(
|
| 228 |
+
cls,
|
| 229 |
+
) -> None:
|
| 230 |
+
"""Initialize the LaunchKubernetesMonitor."""
|
| 231 |
+
if cls._instance is None:
|
| 232 |
+
_, api_client = await get_kube_context_and_api_client(
|
| 233 |
+
kubernetes_asyncio, {}
|
| 234 |
+
)
|
| 235 |
+
core_api = CoreV1Api(api_client)
|
| 236 |
+
batch_api = BatchV1Api(api_client)
|
| 237 |
+
custom_api = CustomObjectsApi(api_client)
|
| 238 |
+
label_selector = f"{WANDB_K8S_LABEL_MONITOR}=true"
|
| 239 |
+
if LaunchAgent.initialized():
|
| 240 |
+
label_selector += f",{WANDB_K8S_LABEL_AGENT}={LaunchAgent.name()}"
|
| 241 |
+
cls(
|
| 242 |
+
core_api=core_api,
|
| 243 |
+
batch_api=batch_api,
|
| 244 |
+
custom_api=custom_api,
|
| 245 |
+
label_selector=label_selector,
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
@classmethod
|
| 249 |
+
def monitor_namespace(
|
| 250 |
+
cls, namespace: str, custom_resource: Optional[CustomResource] = None
|
| 251 |
+
) -> None:
|
| 252 |
+
"""Start monitoring a namespaces for resources."""
|
| 253 |
+
if cls._instance is None:
|
| 254 |
+
raise LaunchError(
|
| 255 |
+
"LaunchKubernetesMonitor not initialized, cannot monitor namespace."
|
| 256 |
+
)
|
| 257 |
+
cls._instance.__monitor_namespace(namespace, custom_resource=custom_resource)
|
| 258 |
+
|
| 259 |
+
@classmethod
|
| 260 |
+
def get_status(cls, job_name: str) -> Status:
|
| 261 |
+
"""Get the status of a job."""
|
| 262 |
+
if cls._instance is None:
|
| 263 |
+
raise LaunchError(
|
| 264 |
+
"LaunchKubernetesMonitor not initialized, cannot get status."
|
| 265 |
+
)
|
| 266 |
+
return cls._instance.__get_status(job_name)
|
| 267 |
+
|
| 268 |
+
@classmethod
|
| 269 |
+
def status_count(cls) -> Dict[State, int]:
|
| 270 |
+
"""Get a dictionary mapping statuses to the # monitored jobs with each status."""
|
| 271 |
+
if cls._instance is None:
|
| 272 |
+
raise ValueError(
|
| 273 |
+
"LaunchKubernetesMonitor not initialized, cannot get status counts."
|
| 274 |
+
)
|
| 275 |
+
return cls._instance.__status_count()
|
| 276 |
+
|
| 277 |
+
def __monitor_namespace(
|
| 278 |
+
self, namespace: str, custom_resource: Optional[CustomResource] = None
|
| 279 |
+
) -> None:
|
| 280 |
+
"""Start monitoring a namespaces for resources."""
|
| 281 |
+
if (namespace, Resources.PODS) not in self._monitor_tasks:
|
| 282 |
+
self._monitor_tasks[(namespace, Resources.PODS)] = create_named_task(
|
| 283 |
+
f"monitor_pods_{namespace}",
|
| 284 |
+
self._monitor_pods,
|
| 285 |
+
namespace,
|
| 286 |
+
)
|
| 287 |
+
# If a custom resource is specified then we will start monitoring
|
| 288 |
+
# that resource type in the namespace instead of jobs.
|
| 289 |
+
if custom_resource is not None:
|
| 290 |
+
if (namespace, custom_resource) not in self._monitor_tasks:
|
| 291 |
+
self._monitor_tasks[(namespace, custom_resource)] = create_named_task(
|
| 292 |
+
f"monitor_{custom_resource}_{namespace}",
|
| 293 |
+
self._monitor_crd,
|
| 294 |
+
namespace,
|
| 295 |
+
custom_resource=custom_resource,
|
| 296 |
+
)
|
| 297 |
+
else:
|
| 298 |
+
if (namespace, Resources.JOBS) not in self._monitor_tasks:
|
| 299 |
+
self._monitor_tasks[(namespace, Resources.JOBS)] = create_named_task(
|
| 300 |
+
f"monitor_jobs_{namespace}",
|
| 301 |
+
self._monitor_jobs,
|
| 302 |
+
namespace,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
def __get_status(self, job_name: str) -> Status:
|
| 306 |
+
"""Get the status of a job."""
|
| 307 |
+
if job_name not in self._job_states:
|
| 308 |
+
return Status("unknown")
|
| 309 |
+
state = self._job_states[job_name]
|
| 310 |
+
return state
|
| 311 |
+
|
| 312 |
+
def __status_count(self) -> Dict[State, int]:
|
| 313 |
+
"""Get a dictionary mapping statuses to the # monitored jobs with each status."""
|
| 314 |
+
counts = dict()
|
| 315 |
+
for _, status in self._job_states.items():
|
| 316 |
+
state = status.state
|
| 317 |
+
if state not in counts:
|
| 318 |
+
counts[state] = 1
|
| 319 |
+
else:
|
| 320 |
+
counts[state] += 1
|
| 321 |
+
return counts
|
| 322 |
+
|
| 323 |
+
def _set_status_state(self, job_name: str, state: State) -> None:
|
| 324 |
+
"""Set the status of the run."""
|
| 325 |
+
if job_name not in self._job_states:
|
| 326 |
+
self._job_states[job_name] = Status(state)
|
| 327 |
+
elif self._job_states[job_name].state != state:
|
| 328 |
+
self._job_states[job_name].state = state
|
| 329 |
+
|
| 330 |
+
def _add_status_message(self, job_name: str, message: str) -> None:
|
| 331 |
+
if job_name not in self._job_states:
|
| 332 |
+
self._job_states[job_name] = Status("unknown")
|
| 333 |
+
wandb.termwarn(f"Warning from Kubernetes for job {job_name}: {message}")
|
| 334 |
+
self._job_states[job_name].messages.append(message)
|
| 335 |
+
|
| 336 |
+
async def _monitor_pods(self, namespace: str) -> None:
|
| 337 |
+
"""Monitor a namespace for changes."""
|
| 338 |
+
watcher = SafeWatch(watch.Watch())
|
| 339 |
+
async for event in watcher.stream(
|
| 340 |
+
self._core_api.list_namespaced_pod,
|
| 341 |
+
namespace=namespace,
|
| 342 |
+
label_selector=self._label_selector,
|
| 343 |
+
):
|
| 344 |
+
obj = event.get("object")
|
| 345 |
+
job_name = obj.metadata.labels.get("job-name") or _get_crd_job_name(obj)
|
| 346 |
+
if job_name is None or not hasattr(obj, "status"):
|
| 347 |
+
continue
|
| 348 |
+
if self.__get_status(job_name) in ["finished", "failed"]:
|
| 349 |
+
continue
|
| 350 |
+
|
| 351 |
+
is_unschedulable, reason = _is_pod_unschedulable(obj.status)
|
| 352 |
+
if is_unschedulable:
|
| 353 |
+
self._add_status_message(job_name, reason)
|
| 354 |
+
if obj.status.phase == "Running" or _is_container_creating(obj.status):
|
| 355 |
+
self._set_status_state(job_name, "running")
|
| 356 |
+
elif _is_preempted(obj.status):
|
| 357 |
+
self._set_status_state(job_name, "preempted")
|
| 358 |
+
|
| 359 |
+
async def _monitor_jobs(self, namespace: str) -> None:
|
| 360 |
+
"""Monitor a namespace for changes."""
|
| 361 |
+
watcher = SafeWatch(watch.Watch())
|
| 362 |
+
async for event in watcher.stream(
|
| 363 |
+
self._batch_api.list_namespaced_job,
|
| 364 |
+
namespace=namespace,
|
| 365 |
+
label_selector=self._label_selector,
|
| 366 |
+
):
|
| 367 |
+
obj = event.get("object")
|
| 368 |
+
job_name = obj.metadata.name
|
| 369 |
+
|
| 370 |
+
if obj.status.succeeded == 1:
|
| 371 |
+
self._set_status_state(job_name, "finished")
|
| 372 |
+
elif obj.status.failed is not None and obj.status.failed >= 1:
|
| 373 |
+
self._set_status_state(job_name, "failed")
|
| 374 |
+
|
| 375 |
+
# If the job is deleted and we haven't seen a terminal state
|
| 376 |
+
# then we will consider the job failed.
|
| 377 |
+
if event.get("type") == "DELETED":
|
| 378 |
+
if self._job_states.get(job_name) != Status("finished"):
|
| 379 |
+
self._set_status_state(job_name, "failed")
|
| 380 |
+
|
| 381 |
+
async def _monitor_crd(
|
| 382 |
+
self, namespace: str, custom_resource: CustomResource
|
| 383 |
+
) -> None:
|
| 384 |
+
"""Monitor a namespace for changes."""
|
| 385 |
+
watcher = SafeWatch(watch.Watch())
|
| 386 |
+
async for event in watcher.stream(
|
| 387 |
+
self._custom_api.list_namespaced_custom_object,
|
| 388 |
+
namespace=namespace,
|
| 389 |
+
plural=custom_resource.plural,
|
| 390 |
+
group=custom_resource.group,
|
| 391 |
+
version=custom_resource.version,
|
| 392 |
+
label_selector=self._label_selector,
|
| 393 |
+
):
|
| 394 |
+
object = event.get("object")
|
| 395 |
+
name = object.get("metadata", dict()).get("name")
|
| 396 |
+
status = object.get("status")
|
| 397 |
+
state = None
|
| 398 |
+
if status is None:
|
| 399 |
+
continue
|
| 400 |
+
replicated_jobs_status = status.get("ReplicatedJobsStatus")
|
| 401 |
+
if isinstance(replicated_jobs_status, dict):
|
| 402 |
+
state = _state_from_replicated_status(replicated_jobs_status)
|
| 403 |
+
state_dict = status.get("state")
|
| 404 |
+
if isinstance(state_dict, dict):
|
| 405 |
+
phase = state_dict.get("phase")
|
| 406 |
+
if phase:
|
| 407 |
+
state = CRD_STATE_DICT.get(phase.lower())
|
| 408 |
+
else:
|
| 409 |
+
conditions = status.get("conditions")
|
| 410 |
+
if isinstance(conditions, list):
|
| 411 |
+
state = _state_from_conditions(conditions)
|
| 412 |
+
else:
|
| 413 |
+
# This should never happen.
|
| 414 |
+
_logger.warning(
|
| 415 |
+
f"Unexpected conditions type {type(conditions)} "
|
| 416 |
+
f"for CRD watcher in {namespace}"
|
| 417 |
+
)
|
| 418 |
+
if state is None:
|
| 419 |
+
continue
|
| 420 |
+
self._set_status_state(name, state)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
class SafeWatch:
|
| 424 |
+
"""Wrapper for the kubernetes watch class that can recover in more situations."""
|
| 425 |
+
|
| 426 |
+
def __init__(self, watcher: watch.Watch) -> None:
|
| 427 |
+
"""Initialize the SafeWatch."""
|
| 428 |
+
self._watcher = watcher
|
| 429 |
+
self._last_seen_resource_version: Optional[str] = None
|
| 430 |
+
self._stopped = False
|
| 431 |
+
|
| 432 |
+
async def stream(self, func: Any, *args: Any, **kwargs: Any) -> Any:
|
| 433 |
+
"""Stream the watcher.
|
| 434 |
+
|
| 435 |
+
This method will automatically resume the stream if it breaks. It will
|
| 436 |
+
also save the resource version so that the stream can be resumed from
|
| 437 |
+
the last seen resource version.
|
| 438 |
+
"""
|
| 439 |
+
while True:
|
| 440 |
+
try:
|
| 441 |
+
async for event in self._watcher.stream(
|
| 442 |
+
func, *args, **kwargs, timeout_seconds=30
|
| 443 |
+
):
|
| 444 |
+
if self._stopped:
|
| 445 |
+
break
|
| 446 |
+
# Save the resource version so that we can resume the stream
|
| 447 |
+
# if it breaks.
|
| 448 |
+
object = event.get("object")
|
| 449 |
+
if isinstance(object, dict):
|
| 450 |
+
self._last_seen_resource_version = object.get(
|
| 451 |
+
"metadata", dict()
|
| 452 |
+
).get("resourceVersion")
|
| 453 |
+
else:
|
| 454 |
+
self._last_seen_resource_version = (
|
| 455 |
+
object.metadata.resource_version
|
| 456 |
+
)
|
| 457 |
+
kwargs["resource_version"] = self._last_seen_resource_version
|
| 458 |
+
yield event
|
| 459 |
+
# If stream ends after stop just break
|
| 460 |
+
if self._stopped:
|
| 461 |
+
break
|
| 462 |
+
except urllib3.exceptions.ProtocolError as e:
|
| 463 |
+
wandb.termwarn(f"Broken event stream: {e}, attempting to recover")
|
| 464 |
+
except ApiException as e:
|
| 465 |
+
if e.status == 410:
|
| 466 |
+
# If resource version is too old we need to start over.
|
| 467 |
+
del kwargs["resource_version"]
|
| 468 |
+
self._last_seen_resource_version = None
|
| 469 |
+
except Exception as E:
|
| 470 |
+
exc_type = type(E).__name__
|
| 471 |
+
stack_trace = traceback.format_exc()
|
| 472 |
+
wandb.termerror(
|
| 473 |
+
f"Unknown exception in event stream of type {exc_type}: {E}, attempting to recover. Stack trace: {stack_trace}"
|
| 474 |
+
)
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_runner.py
ADDED
|
@@ -0,0 +1,963 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Implementation of KubernetesRunner class for wandb launch."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import base64
|
| 5 |
+
import datetime
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
| 10 |
+
|
| 11 |
+
import yaml
|
| 12 |
+
|
| 13 |
+
import wandb
|
| 14 |
+
from wandb.apis.internal import Api
|
| 15 |
+
from wandb.sdk.launch.agent.agent import LaunchAgent
|
| 16 |
+
from wandb.sdk.launch.environment.abstract import AbstractEnvironment
|
| 17 |
+
from wandb.sdk.launch.registry.abstract import AbstractRegistry
|
| 18 |
+
from wandb.sdk.launch.registry.azure_container_registry import AzureContainerRegistry
|
| 19 |
+
from wandb.sdk.launch.registry.local_registry import LocalRegistry
|
| 20 |
+
from wandb.sdk.launch.runner.abstract import Status
|
| 21 |
+
from wandb.sdk.launch.runner.kubernetes_monitor import (
|
| 22 |
+
WANDB_K8S_LABEL_AGENT,
|
| 23 |
+
WANDB_K8S_LABEL_MONITOR,
|
| 24 |
+
WANDB_K8S_RUN_ID,
|
| 25 |
+
CustomResource,
|
| 26 |
+
LaunchKubernetesMonitor,
|
| 27 |
+
)
|
| 28 |
+
from wandb.sdk.lib.retry import ExponentialBackoff, retry_async
|
| 29 |
+
from wandb.util import get_module
|
| 30 |
+
|
| 31 |
+
from .._project_spec import EntryPoint, LaunchProject
|
| 32 |
+
from ..errors import LaunchError
|
| 33 |
+
from ..utils import (
|
| 34 |
+
CODE_MOUNT_DIR,
|
| 35 |
+
LOG_PREFIX,
|
| 36 |
+
MAX_ENV_LENGTHS,
|
| 37 |
+
PROJECT_SYNCHRONOUS,
|
| 38 |
+
get_kube_context_and_api_client,
|
| 39 |
+
make_name_dns_safe,
|
| 40 |
+
)
|
| 41 |
+
from .abstract import AbstractRun, AbstractRunner
|
| 42 |
+
|
| 43 |
+
get_module(
|
| 44 |
+
"kubernetes_asyncio",
|
| 45 |
+
required="Kubernetes runner requires the kubernetes package. Please install it with `pip install wandb[launch]`.",
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
import kubernetes_asyncio # type: ignore # noqa: E402
|
| 49 |
+
from kubernetes_asyncio import client # noqa: E402
|
| 50 |
+
from kubernetes_asyncio.client.api.batch_v1_api import ( # type: ignore # noqa: E402
|
| 51 |
+
BatchV1Api,
|
| 52 |
+
)
|
| 53 |
+
from kubernetes_asyncio.client.api.core_v1_api import ( # type: ignore # noqa: E402
|
| 54 |
+
CoreV1Api,
|
| 55 |
+
)
|
| 56 |
+
from kubernetes_asyncio.client.api.custom_objects_api import ( # type: ignore # noqa: E402
|
| 57 |
+
CustomObjectsApi,
|
| 58 |
+
)
|
| 59 |
+
from kubernetes_asyncio.client.models.v1_secret import ( # type: ignore # noqa: E402
|
| 60 |
+
V1Secret,
|
| 61 |
+
)
|
| 62 |
+
from kubernetes_asyncio.client.rest import ApiException # type: ignore # noqa: E402
|
| 63 |
+
|
| 64 |
+
TIMEOUT = 5
|
| 65 |
+
API_KEY_SECRET_MAX_RETRIES = 5
|
| 66 |
+
|
| 67 |
+
_logger = logging.getLogger(__name__)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
SOURCE_CODE_PVC_MOUNT_PATH = os.environ.get("WANDB_LAUNCH_CODE_PVC_MOUNT_PATH")
|
| 71 |
+
SOURCE_CODE_PVC_NAME = os.environ.get("WANDB_LAUNCH_CODE_PVC_NAME")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class KubernetesSubmittedRun(AbstractRun):
|
| 75 |
+
"""Wrapper for a launched run on Kubernetes."""
|
| 76 |
+
|
| 77 |
+
def __init__(
|
| 78 |
+
self,
|
| 79 |
+
batch_api: "BatchV1Api",
|
| 80 |
+
core_api: "CoreV1Api",
|
| 81 |
+
name: str,
|
| 82 |
+
namespace: Optional[str] = "default",
|
| 83 |
+
secret: Optional["V1Secret"] = None,
|
| 84 |
+
) -> None:
|
| 85 |
+
"""Initialize a KubernetesSubmittedRun.
|
| 86 |
+
|
| 87 |
+
Other implementations of the AbstractRun interface poll on the run
|
| 88 |
+
when `get_status` is called, but KubernetesSubmittedRun uses
|
| 89 |
+
Kubernetes watch streams to update the run status. One thread handles
|
| 90 |
+
events from the job object and another thread handles events from the
|
| 91 |
+
rank 0 pod. These threads updated the `_status` attributed of the
|
| 92 |
+
KubernetesSubmittedRun object. When `get_status` is called, the
|
| 93 |
+
`_status` attribute is returned.
|
| 94 |
+
|
| 95 |
+
Arguments:
|
| 96 |
+
batch_api: Kubernetes BatchV1Api object.
|
| 97 |
+
core_api: Kubernetes CoreV1Api object.
|
| 98 |
+
name: Name of the job.
|
| 99 |
+
namespace: Kubernetes namespace.
|
| 100 |
+
secret: Kubernetes secret.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
None.
|
| 104 |
+
"""
|
| 105 |
+
self.batch_api = batch_api
|
| 106 |
+
self.core_api = core_api
|
| 107 |
+
self.name = name
|
| 108 |
+
self.namespace = namespace
|
| 109 |
+
self._fail_count = 0
|
| 110 |
+
self.secret = secret
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def id(self) -> str:
|
| 114 |
+
"""Return the run id."""
|
| 115 |
+
return self.name
|
| 116 |
+
|
| 117 |
+
async def get_logs(self) -> Optional[str]:
|
| 118 |
+
try:
|
| 119 |
+
pods = await self.core_api.list_namespaced_pod(
|
| 120 |
+
label_selector=f"job-name={self.name}", namespace=self.namespace
|
| 121 |
+
)
|
| 122 |
+
pod_names = [pi.metadata.name for pi in pods.items]
|
| 123 |
+
if not pod_names:
|
| 124 |
+
wandb.termwarn(f"Found no pods for kubernetes job: {self.name}")
|
| 125 |
+
return None
|
| 126 |
+
logs = await self.core_api.read_namespaced_pod_log(
|
| 127 |
+
name=pod_names[0], namespace=self.namespace
|
| 128 |
+
)
|
| 129 |
+
if logs:
|
| 130 |
+
return str(logs)
|
| 131 |
+
else:
|
| 132 |
+
wandb.termwarn(f"No logs for kubernetes pod(s): {pod_names}")
|
| 133 |
+
return None
|
| 134 |
+
except Exception as e:
|
| 135 |
+
wandb.termerror(f"{LOG_PREFIX}Failed to get pod logs: {e}")
|
| 136 |
+
return None
|
| 137 |
+
|
| 138 |
+
async def wait(self) -> bool:
|
| 139 |
+
"""Wait for the run to finish.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
True if the run finished successfully, False otherwise.
|
| 143 |
+
"""
|
| 144 |
+
while True:
|
| 145 |
+
status = await self.get_status()
|
| 146 |
+
wandb.termlog(f"{LOG_PREFIX}Job {self.name} status: {status.state}")
|
| 147 |
+
if status.state in ["finished", "failed", "preempted"]:
|
| 148 |
+
break
|
| 149 |
+
await asyncio.sleep(5)
|
| 150 |
+
|
| 151 |
+
await self._delete_secret()
|
| 152 |
+
return (
|
| 153 |
+
status.state == "finished"
|
| 154 |
+
) # todo: not sure if this (copied from aws runner) is the right approach? should we return false on failure
|
| 155 |
+
|
| 156 |
+
async def get_status(self) -> Status:
|
| 157 |
+
status = LaunchKubernetesMonitor.get_status(self.name)
|
| 158 |
+
if status in ["stopped", "failed", "finished", "preempted"]:
|
| 159 |
+
await self._delete_secret()
|
| 160 |
+
return status
|
| 161 |
+
|
| 162 |
+
async def cancel(self) -> None:
|
| 163 |
+
"""Cancel the run."""
|
| 164 |
+
try:
|
| 165 |
+
await self.batch_api.delete_namespaced_job(
|
| 166 |
+
namespace=self.namespace,
|
| 167 |
+
name=self.name,
|
| 168 |
+
)
|
| 169 |
+
await self._delete_secret()
|
| 170 |
+
except ApiException as e:
|
| 171 |
+
raise LaunchError(
|
| 172 |
+
f"Failed to delete Kubernetes Job {self.name} in namespace {self.namespace}: {str(e)}"
|
| 173 |
+
) from e
|
| 174 |
+
|
| 175 |
+
async def _delete_secret(self) -> None:
|
| 176 |
+
# Cleanup secret if not running in a helm-managed context
|
| 177 |
+
if not os.environ.get("WANDB_RELEASE_NAME") and self.secret:
|
| 178 |
+
await self.core_api.delete_namespaced_secret(
|
| 179 |
+
name=self.secret.metadata.name,
|
| 180 |
+
namespace=self.secret.metadata.namespace,
|
| 181 |
+
)
|
| 182 |
+
self.secret = None
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class CrdSubmittedRun(AbstractRun):
|
| 186 |
+
"""Run submitted to a CRD backend, e.g. Volcano."""
|
| 187 |
+
|
| 188 |
+
def __init__(
|
| 189 |
+
self,
|
| 190 |
+
group: str,
|
| 191 |
+
version: str,
|
| 192 |
+
plural: str,
|
| 193 |
+
name: str,
|
| 194 |
+
namespace: str,
|
| 195 |
+
core_api: CoreV1Api,
|
| 196 |
+
custom_api: CustomObjectsApi,
|
| 197 |
+
) -> None:
|
| 198 |
+
"""Create a run object for tracking the progress of a CRD.
|
| 199 |
+
|
| 200 |
+
Arguments:
|
| 201 |
+
group: The API group of the CRD.
|
| 202 |
+
version: The API version of the CRD.
|
| 203 |
+
plural: The plural name of the CRD.
|
| 204 |
+
name: The name of the CRD instance.
|
| 205 |
+
namespace: The namespace of the CRD instance.
|
| 206 |
+
core_api: The Kubernetes core API client.
|
| 207 |
+
custom_api: The Kubernetes custom object API client.
|
| 208 |
+
|
| 209 |
+
Raises:
|
| 210 |
+
LaunchError: If the CRD instance does not exist.
|
| 211 |
+
"""
|
| 212 |
+
self.group = group
|
| 213 |
+
self.version = version
|
| 214 |
+
self.plural = plural
|
| 215 |
+
self.name = name
|
| 216 |
+
self.namespace = namespace
|
| 217 |
+
self.core_api = core_api
|
| 218 |
+
self.custom_api = custom_api
|
| 219 |
+
self._fail_count = 0
|
| 220 |
+
|
| 221 |
+
@property
|
| 222 |
+
def id(self) -> str:
|
| 223 |
+
"""Get the name of the custom object."""
|
| 224 |
+
return self.name
|
| 225 |
+
|
| 226 |
+
async def get_logs(self) -> Optional[str]:
|
| 227 |
+
"""Get logs for custom object."""
|
| 228 |
+
# TODO: test more carefully once we release multi-node support
|
| 229 |
+
logs: Dict[str, Optional[str]] = {}
|
| 230 |
+
try:
|
| 231 |
+
pods = await self.core_api.list_namespaced_pod(
|
| 232 |
+
label_selector=f"wandb/run-id={self.name}", namespace=self.namespace
|
| 233 |
+
)
|
| 234 |
+
pod_names = [pi.metadata.name for pi in pods.items]
|
| 235 |
+
for pod_name in pod_names:
|
| 236 |
+
logs[pod_name] = await self.core_api.read_namespaced_pod_log(
|
| 237 |
+
name=pod_name, namespace=self.namespace
|
| 238 |
+
)
|
| 239 |
+
except ApiException as e:
|
| 240 |
+
wandb.termwarn(f"Failed to get logs for {self.name}: {str(e)}")
|
| 241 |
+
return None
|
| 242 |
+
if not logs:
|
| 243 |
+
return None
|
| 244 |
+
logs_as_array = [f"Pod {pod_name}:\n{log}" for pod_name, log in logs.items()]
|
| 245 |
+
return "\n".join(logs_as_array)
|
| 246 |
+
|
| 247 |
+
async def get_status(self) -> Status:
|
| 248 |
+
"""Get status of custom object."""
|
| 249 |
+
return LaunchKubernetesMonitor.get_status(self.name)
|
| 250 |
+
|
| 251 |
+
async def cancel(self) -> None:
|
| 252 |
+
"""Cancel the custom object."""
|
| 253 |
+
try:
|
| 254 |
+
await self.custom_api.delete_namespaced_custom_object(
|
| 255 |
+
group=self.group,
|
| 256 |
+
version=self.version,
|
| 257 |
+
namespace=self.namespace,
|
| 258 |
+
plural=self.plural,
|
| 259 |
+
name=self.name,
|
| 260 |
+
)
|
| 261 |
+
except ApiException as e:
|
| 262 |
+
raise LaunchError(
|
| 263 |
+
f"Failed to delete CRD {self.name} in namespace {self.namespace}: {str(e)}"
|
| 264 |
+
) from e
|
| 265 |
+
|
| 266 |
+
async def wait(self) -> bool:
|
| 267 |
+
"""Wait for this custom object to finish running."""
|
| 268 |
+
while True:
|
| 269 |
+
status = await self.get_status()
|
| 270 |
+
wandb.termlog(f"{LOG_PREFIX}Job {self.name} status: {status}")
|
| 271 |
+
if status.state in ["finished", "failed", "preempted"]:
|
| 272 |
+
return status.state == "finished"
|
| 273 |
+
await asyncio.sleep(5)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
class KubernetesRunner(AbstractRunner):
|
| 277 |
+
"""Launches runs onto kubernetes."""
|
| 278 |
+
|
| 279 |
+
def __init__(
|
| 280 |
+
self,
|
| 281 |
+
api: Api,
|
| 282 |
+
backend_config: Dict[str, Any],
|
| 283 |
+
environment: AbstractEnvironment,
|
| 284 |
+
registry: AbstractRegistry,
|
| 285 |
+
) -> None:
|
| 286 |
+
"""Create a Kubernetes runner.
|
| 287 |
+
|
| 288 |
+
Arguments:
|
| 289 |
+
api: The API client object.
|
| 290 |
+
backend_config: The backend configuration.
|
| 291 |
+
environment: The environment to launch runs into.
|
| 292 |
+
|
| 293 |
+
Raises:
|
| 294 |
+
LaunchError: If the Kubernetes configuration is invalid.
|
| 295 |
+
"""
|
| 296 |
+
super().__init__(api, backend_config)
|
| 297 |
+
self.environment = environment
|
| 298 |
+
self.registry = registry
|
| 299 |
+
|
| 300 |
+
def get_namespace(
|
| 301 |
+
self, resource_args: Dict[str, Any], context: Dict[str, Any]
|
| 302 |
+
) -> str:
|
| 303 |
+
"""Get the namespace to launch into.
|
| 304 |
+
|
| 305 |
+
Arguments:
|
| 306 |
+
resource_args: The resource args to launch.
|
| 307 |
+
context: The k8s config context.
|
| 308 |
+
|
| 309 |
+
Returns:
|
| 310 |
+
The namespace to launch into.
|
| 311 |
+
"""
|
| 312 |
+
default_namespace = (
|
| 313 |
+
context["context"].get("namespace", "default") if context else "default"
|
| 314 |
+
)
|
| 315 |
+
return ( # type: ignore[no-any-return]
|
| 316 |
+
resource_args.get("metadata", {}).get("namespace")
|
| 317 |
+
or resource_args.get(
|
| 318 |
+
"namespace"
|
| 319 |
+
) # continue support for malformed namespace
|
| 320 |
+
or self.backend_config.get("runner", {}).get("namespace")
|
| 321 |
+
or default_namespace
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
async def _inject_defaults(
|
| 325 |
+
self,
|
| 326 |
+
resource_args: Dict[str, Any],
|
| 327 |
+
launch_project: LaunchProject,
|
| 328 |
+
image_uri: str,
|
| 329 |
+
namespace: str,
|
| 330 |
+
core_api: "CoreV1Api",
|
| 331 |
+
) -> Tuple[Dict[str, Any], Optional["V1Secret"]]:
|
| 332 |
+
"""Apply our default values, return job dict and api key secret.
|
| 333 |
+
|
| 334 |
+
Arguments:
|
| 335 |
+
resource_args (Dict[str, Any]): The resource args to launch.
|
| 336 |
+
launch_project (LaunchProject): The launch project.
|
| 337 |
+
builder (Optional[AbstractBuilder]): The builder.
|
| 338 |
+
namespace (str): The namespace.
|
| 339 |
+
core_api (CoreV1Api): The core api.
|
| 340 |
+
|
| 341 |
+
Returns:
|
| 342 |
+
Tuple[Dict[str, Any], Optional["V1Secret"]]: The resource args and api key secret.
|
| 343 |
+
"""
|
| 344 |
+
job: Dict[str, Any] = {
|
| 345 |
+
"apiVersion": "batch/v1",
|
| 346 |
+
"kind": "Job",
|
| 347 |
+
}
|
| 348 |
+
job.update(resource_args)
|
| 349 |
+
|
| 350 |
+
job_metadata: Dict[str, Any] = job.get("metadata", {})
|
| 351 |
+
job_spec: Dict[str, Any] = {"backoffLimit": 0, "ttlSecondsAfterFinished": 60}
|
| 352 |
+
job_spec.update(job.get("spec", {}))
|
| 353 |
+
pod_template: Dict[str, Any] = job_spec.get("template", {})
|
| 354 |
+
pod_spec: Dict[str, Any] = {"restartPolicy": "Never"}
|
| 355 |
+
pod_spec.update(pod_template.get("spec", {}))
|
| 356 |
+
containers: List[Dict[str, Any]] = pod_spec.get("containers", [{}])
|
| 357 |
+
|
| 358 |
+
# Add labels to job metadata
|
| 359 |
+
job_metadata.setdefault("labels", {})
|
| 360 |
+
job_metadata["labels"][WANDB_K8S_RUN_ID] = launch_project.run_id
|
| 361 |
+
job_metadata["labels"][WANDB_K8S_LABEL_MONITOR] = "true"
|
| 362 |
+
if LaunchAgent.initialized():
|
| 363 |
+
job_metadata["labels"][WANDB_K8S_LABEL_AGENT] = LaunchAgent.name()
|
| 364 |
+
# name precedence: name in spec > generated name
|
| 365 |
+
if not job_metadata.get("name"):
|
| 366 |
+
job_metadata["generateName"] = make_name_dns_safe(
|
| 367 |
+
f"launch-{launch_project.target_entity}-{launch_project.target_project}-"
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
for i, cont in enumerate(containers):
|
| 371 |
+
if "name" not in cont:
|
| 372 |
+
cont["name"] = cont.get("name", "launch" + str(i))
|
| 373 |
+
if "securityContext" not in cont:
|
| 374 |
+
cont["securityContext"] = {
|
| 375 |
+
"allowPrivilegeEscalation": False,
|
| 376 |
+
"capabilities": {"drop": ["ALL"]},
|
| 377 |
+
"seccompProfile": {"type": "RuntimeDefault"},
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
entry_point = (
|
| 381 |
+
launch_project.override_entrypoint or launch_project.get_job_entry_point()
|
| 382 |
+
)
|
| 383 |
+
if launch_project.docker_image:
|
| 384 |
+
# dont specify run id if user provided image, could have multiple runs
|
| 385 |
+
containers[0]["image"] = image_uri
|
| 386 |
+
# TODO: handle secret pulling image from registry
|
| 387 |
+
elif not any(["image" in cont for cont in containers]):
|
| 388 |
+
assert entry_point is not None
|
| 389 |
+
# in the non instance case we need to make an imagePullSecret
|
| 390 |
+
# so the new job can pull the image
|
| 391 |
+
containers[0]["image"] = image_uri
|
| 392 |
+
secret = await maybe_create_imagepull_secret(
|
| 393 |
+
core_api, self.registry, launch_project.run_id, namespace
|
| 394 |
+
)
|
| 395 |
+
if secret is not None:
|
| 396 |
+
pod_spec["imagePullSecrets"] = [
|
| 397 |
+
{"name": f"regcred-{launch_project.run_id}"}
|
| 398 |
+
]
|
| 399 |
+
|
| 400 |
+
inject_entrypoint_and_args(
|
| 401 |
+
containers,
|
| 402 |
+
entry_point,
|
| 403 |
+
launch_project.override_args,
|
| 404 |
+
launch_project.override_entrypoint is not None,
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
env_vars = launch_project.get_env_vars_dict(
|
| 408 |
+
self._api, MAX_ENV_LENGTHS[self.__class__.__name__]
|
| 409 |
+
)
|
| 410 |
+
api_key_secret = None
|
| 411 |
+
for cont in containers:
|
| 412 |
+
# Add our env vars to user supplied env vars
|
| 413 |
+
env = cont.get("env") or []
|
| 414 |
+
for key, value in env_vars.items():
|
| 415 |
+
if (
|
| 416 |
+
key == "WANDB_API_KEY"
|
| 417 |
+
and value
|
| 418 |
+
and (
|
| 419 |
+
LaunchAgent.initialized()
|
| 420 |
+
or self.backend_config[PROJECT_SYNCHRONOUS]
|
| 421 |
+
)
|
| 422 |
+
):
|
| 423 |
+
# Override API key with secret. TODO: Do the same for other runners
|
| 424 |
+
release_name = os.environ.get("WANDB_RELEASE_NAME")
|
| 425 |
+
secret_name = "wandb-api-key"
|
| 426 |
+
if release_name:
|
| 427 |
+
secret_name += f"-{release_name}"
|
| 428 |
+
else:
|
| 429 |
+
secret_name += f"-{launch_project.run_id}"
|
| 430 |
+
|
| 431 |
+
def handle_exception(e):
|
| 432 |
+
wandb.termwarn(
|
| 433 |
+
f"Exception when ensuring Kubernetes API key secret: {e}. Retrying..."
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
api_key_secret = await retry_async(
|
| 437 |
+
backoff=ExponentialBackoff(
|
| 438 |
+
initial_sleep=datetime.timedelta(seconds=1),
|
| 439 |
+
max_sleep=datetime.timedelta(minutes=1),
|
| 440 |
+
max_retries=API_KEY_SECRET_MAX_RETRIES,
|
| 441 |
+
),
|
| 442 |
+
fn=ensure_api_key_secret,
|
| 443 |
+
on_exc=handle_exception,
|
| 444 |
+
core_api=core_api,
|
| 445 |
+
secret_name=secret_name,
|
| 446 |
+
namespace=namespace,
|
| 447 |
+
api_key=value,
|
| 448 |
+
)
|
| 449 |
+
env.append(
|
| 450 |
+
{
|
| 451 |
+
"name": key,
|
| 452 |
+
"valueFrom": {
|
| 453 |
+
"secretKeyRef": {
|
| 454 |
+
"name": secret_name,
|
| 455 |
+
"key": "password",
|
| 456 |
+
}
|
| 457 |
+
},
|
| 458 |
+
}
|
| 459 |
+
)
|
| 460 |
+
else:
|
| 461 |
+
env.append({"name": key, "value": value})
|
| 462 |
+
cont["env"] = env
|
| 463 |
+
|
| 464 |
+
pod_spec["containers"] = containers
|
| 465 |
+
pod_template["spec"] = pod_spec
|
| 466 |
+
job_spec["template"] = pod_template
|
| 467 |
+
job["spec"] = job_spec
|
| 468 |
+
job["metadata"] = job_metadata
|
| 469 |
+
|
| 470 |
+
add_label_to_pods(
|
| 471 |
+
job,
|
| 472 |
+
WANDB_K8S_LABEL_MONITOR,
|
| 473 |
+
"true",
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
if launch_project.job_base_image:
|
| 477 |
+
apply_code_mount_configuration(
|
| 478 |
+
job,
|
| 479 |
+
launch_project,
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
# Add wandb.ai/agent: current agent label on all pods
|
| 483 |
+
if LaunchAgent.initialized():
|
| 484 |
+
add_label_to_pods(
|
| 485 |
+
job,
|
| 486 |
+
WANDB_K8S_LABEL_AGENT,
|
| 487 |
+
LaunchAgent.name(),
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
return job, api_key_secret
|
| 491 |
+
|
| 492 |
+
async def run(
|
| 493 |
+
self, launch_project: LaunchProject, image_uri: str
|
| 494 |
+
) -> Optional[AbstractRun]: # noqa: C901
|
| 495 |
+
"""Execute a launch project on Kubernetes.
|
| 496 |
+
|
| 497 |
+
Arguments:
|
| 498 |
+
launch_project: The launch project to execute.
|
| 499 |
+
builder: The builder to use to build the image.
|
| 500 |
+
|
| 501 |
+
Returns:
|
| 502 |
+
The run object if the run was successful, otherwise None.
|
| 503 |
+
"""
|
| 504 |
+
await LaunchKubernetesMonitor.ensure_initialized()
|
| 505 |
+
resource_args = launch_project.fill_macros(image_uri).get("kubernetes", {})
|
| 506 |
+
if not resource_args:
|
| 507 |
+
wandb.termlog(
|
| 508 |
+
f"{LOG_PREFIX}Note: no resource args specified. Add a "
|
| 509 |
+
"Kubernetes yaml spec or other options in a json file "
|
| 510 |
+
"with --resource-args <json>."
|
| 511 |
+
)
|
| 512 |
+
_logger.info(f"Running Kubernetes job with resource args: {resource_args}")
|
| 513 |
+
|
| 514 |
+
context, api_client = await get_kube_context_and_api_client(
|
| 515 |
+
kubernetes_asyncio, resource_args
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
# If using pvc for code mount, move code there.
|
| 519 |
+
if launch_project.job_base_image is not None:
|
| 520 |
+
if SOURCE_CODE_PVC_NAME is None or SOURCE_CODE_PVC_MOUNT_PATH is None:
|
| 521 |
+
raise LaunchError(
|
| 522 |
+
"WANDB_LAUNCH_SOURCE_CODE_PVC_ environment variables not set. "
|
| 523 |
+
"Unable to mount source code PVC into base image. "
|
| 524 |
+
"Use the `codeMountPvcName` variable in the agent helm chart "
|
| 525 |
+
"to enable base image jobs for this agent. See "
|
| 526 |
+
"https://github.com/wandb/helm-charts/tree/main/charts/launch-agent "
|
| 527 |
+
"for more information."
|
| 528 |
+
)
|
| 529 |
+
code_subdir = launch_project.get_image_source_string()
|
| 530 |
+
launch_project.change_project_dir(
|
| 531 |
+
os.path.join(SOURCE_CODE_PVC_MOUNT_PATH, code_subdir)
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
# If the user specified an alternate api, we need will execute this
|
| 535 |
+
# run by creating a custom object.
|
| 536 |
+
api_version = resource_args.get("apiVersion", "batch/v1")
|
| 537 |
+
|
| 538 |
+
if api_version not in ["batch/v1", "batch/v1beta1"]:
|
| 539 |
+
env_vars = launch_project.get_env_vars_dict(
|
| 540 |
+
self._api, MAX_ENV_LENGTHS[self.__class__.__name__]
|
| 541 |
+
)
|
| 542 |
+
# Crawl the resource args and add our env vars to the containers.
|
| 543 |
+
add_wandb_env(resource_args, env_vars)
|
| 544 |
+
|
| 545 |
+
# Add our labels to the resource args. This is necessary for the
|
| 546 |
+
# agent to find the custom object later on.
|
| 547 |
+
resource_args["metadata"] = resource_args.get("metadata", {})
|
| 548 |
+
resource_args["metadata"]["labels"] = resource_args["metadata"].get(
|
| 549 |
+
"labels", {}
|
| 550 |
+
)
|
| 551 |
+
resource_args["metadata"]["labels"][WANDB_K8S_LABEL_MONITOR] = "true"
|
| 552 |
+
|
| 553 |
+
# Crawl the resource arsg and add our labels to the pods. This is
|
| 554 |
+
# necessary for the agent to find the pods later on.
|
| 555 |
+
add_label_to_pods(
|
| 556 |
+
resource_args,
|
| 557 |
+
WANDB_K8S_LABEL_MONITOR,
|
| 558 |
+
"true",
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
# Add wandb.ai/agent: current agent label on all pods
|
| 562 |
+
if LaunchAgent.initialized():
|
| 563 |
+
add_label_to_pods(
|
| 564 |
+
resource_args,
|
| 565 |
+
WANDB_K8S_LABEL_AGENT,
|
| 566 |
+
LaunchAgent.name(),
|
| 567 |
+
)
|
| 568 |
+
resource_args["metadata"]["labels"][WANDB_K8S_LABEL_AGENT] = (
|
| 569 |
+
LaunchAgent.name()
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
if launch_project.job_base_image:
|
| 573 |
+
apply_code_mount_configuration(resource_args, launch_project)
|
| 574 |
+
|
| 575 |
+
overrides = {}
|
| 576 |
+
if launch_project.override_args:
|
| 577 |
+
overrides["args"] = launch_project.override_args
|
| 578 |
+
if launch_project.override_entrypoint:
|
| 579 |
+
overrides["command"] = launch_project.override_entrypoint.command
|
| 580 |
+
add_entrypoint_args_overrides(
|
| 581 |
+
resource_args,
|
| 582 |
+
overrides,
|
| 583 |
+
)
|
| 584 |
+
api = client.CustomObjectsApi(api_client)
|
| 585 |
+
# Infer the attributes of a custom object from the apiVersion and/or
|
| 586 |
+
# a kind: attribute in the resource args.
|
| 587 |
+
namespace = self.get_namespace(resource_args, context)
|
| 588 |
+
group, version, *_ = api_version.split("/")
|
| 589 |
+
group = resource_args.get("group", group)
|
| 590 |
+
version = resource_args.get("version", version)
|
| 591 |
+
kind = resource_args.get("kind", version)
|
| 592 |
+
plural = f"{kind.lower()}s"
|
| 593 |
+
custom_resource = CustomResource(
|
| 594 |
+
group=group,
|
| 595 |
+
version=version,
|
| 596 |
+
plural=plural,
|
| 597 |
+
)
|
| 598 |
+
LaunchKubernetesMonitor.monitor_namespace(
|
| 599 |
+
namespace, custom_resource=custom_resource
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
try:
|
| 603 |
+
response = await api.create_namespaced_custom_object(
|
| 604 |
+
group=group,
|
| 605 |
+
version=version,
|
| 606 |
+
namespace=namespace,
|
| 607 |
+
plural=plural,
|
| 608 |
+
body=resource_args,
|
| 609 |
+
)
|
| 610 |
+
except ApiException as e:
|
| 611 |
+
body = json.loads(e.body)
|
| 612 |
+
body_yaml = yaml.dump(body)
|
| 613 |
+
raise LaunchError(
|
| 614 |
+
f"Error creating CRD of kind {kind}: {e.status} {e.reason}\n{body_yaml}"
|
| 615 |
+
) from e
|
| 616 |
+
name = response.get("metadata", {}).get("name")
|
| 617 |
+
_logger.info(f"Created {kind} {response['metadata']['name']}")
|
| 618 |
+
submitted_run = CrdSubmittedRun(
|
| 619 |
+
name=name,
|
| 620 |
+
group=group,
|
| 621 |
+
version=version,
|
| 622 |
+
namespace=namespace,
|
| 623 |
+
plural=plural,
|
| 624 |
+
core_api=client.CoreV1Api(api_client),
|
| 625 |
+
custom_api=api,
|
| 626 |
+
)
|
| 627 |
+
if self.backend_config[PROJECT_SYNCHRONOUS]:
|
| 628 |
+
await submitted_run.wait()
|
| 629 |
+
return submitted_run
|
| 630 |
+
|
| 631 |
+
batch_api = kubernetes_asyncio.client.BatchV1Api(api_client)
|
| 632 |
+
core_api = kubernetes_asyncio.client.CoreV1Api(api_client)
|
| 633 |
+
namespace = self.get_namespace(resource_args, context)
|
| 634 |
+
job, secret = await self._inject_defaults(
|
| 635 |
+
resource_args, launch_project, image_uri, namespace, core_api
|
| 636 |
+
)
|
| 637 |
+
msg = "Creating Kubernetes job"
|
| 638 |
+
if "name" in resource_args:
|
| 639 |
+
msg += f": {resource_args['name']}"
|
| 640 |
+
_logger.info(msg)
|
| 641 |
+
try:
|
| 642 |
+
response = await kubernetes_asyncio.utils.create_from_dict(
|
| 643 |
+
api_client, job, namespace=namespace
|
| 644 |
+
)
|
| 645 |
+
except kubernetes_asyncio.utils.FailToCreateError as e:
|
| 646 |
+
for exc in e.api_exceptions:
|
| 647 |
+
resp = json.loads(exc.body)
|
| 648 |
+
msg = resp.get("message")
|
| 649 |
+
code = resp.get("code")
|
| 650 |
+
raise LaunchError(
|
| 651 |
+
f"Failed to create Kubernetes job for run {launch_project.run_id} ({code} {exc.reason}): {msg}"
|
| 652 |
+
)
|
| 653 |
+
except Exception as e:
|
| 654 |
+
raise LaunchError(
|
| 655 |
+
f"Unexpected exception when creating Kubernetes job: {str(e)}\n"
|
| 656 |
+
)
|
| 657 |
+
job_response = response[0]
|
| 658 |
+
job_name = job_response.metadata.name
|
| 659 |
+
LaunchKubernetesMonitor.monitor_namespace(namespace)
|
| 660 |
+
submitted_job = KubernetesSubmittedRun(
|
| 661 |
+
batch_api, core_api, job_name, namespace, secret
|
| 662 |
+
)
|
| 663 |
+
if self.backend_config[PROJECT_SYNCHRONOUS]:
|
| 664 |
+
await submitted_job.wait()
|
| 665 |
+
|
| 666 |
+
return submitted_job
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def inject_entrypoint_and_args(
|
| 670 |
+
containers: List[dict],
|
| 671 |
+
entry_point: Optional[EntryPoint],
|
| 672 |
+
override_args: List[str],
|
| 673 |
+
should_override_entrypoint: bool,
|
| 674 |
+
) -> None:
|
| 675 |
+
"""Inject the entrypoint and args into the containers.
|
| 676 |
+
|
| 677 |
+
Arguments:
|
| 678 |
+
containers: The containers to inject the entrypoint and args into.
|
| 679 |
+
entry_point: The entrypoint to inject.
|
| 680 |
+
override_args: The args to inject.
|
| 681 |
+
should_override_entrypoint: Whether to override the entrypoint.
|
| 682 |
+
|
| 683 |
+
Returns:
|
| 684 |
+
None
|
| 685 |
+
"""
|
| 686 |
+
for i in range(len(containers)):
|
| 687 |
+
if override_args:
|
| 688 |
+
containers[i]["args"] = override_args
|
| 689 |
+
if entry_point and (
|
| 690 |
+
not containers[i].get("command") or should_override_entrypoint
|
| 691 |
+
):
|
| 692 |
+
containers[i]["command"] = entry_point.command
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
async def ensure_api_key_secret(
|
| 696 |
+
core_api: "CoreV1Api",
|
| 697 |
+
secret_name: str,
|
| 698 |
+
namespace: str,
|
| 699 |
+
api_key: str,
|
| 700 |
+
) -> "V1Secret":
|
| 701 |
+
"""Create a secret containing a user's wandb API key.
|
| 702 |
+
|
| 703 |
+
Arguments:
|
| 704 |
+
core_api: The Kubernetes CoreV1Api object.
|
| 705 |
+
secret_name: The name to use for the secret.
|
| 706 |
+
namespace: The namespace to create the secret in.
|
| 707 |
+
api_key: The user's wandb API key
|
| 708 |
+
|
| 709 |
+
Returns:
|
| 710 |
+
The created secret
|
| 711 |
+
"""
|
| 712 |
+
secret_data = {"password": base64.b64encode(api_key.encode()).decode()}
|
| 713 |
+
labels = {"wandb.ai/created-by": "launch-agent"}
|
| 714 |
+
secret = client.V1Secret(
|
| 715 |
+
data=secret_data,
|
| 716 |
+
metadata=client.V1ObjectMeta(
|
| 717 |
+
name=secret_name, namespace=namespace, labels=labels
|
| 718 |
+
),
|
| 719 |
+
kind="Secret",
|
| 720 |
+
type="kubernetes.io/basic-auth",
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
try:
|
| 724 |
+
try:
|
| 725 |
+
return await core_api.create_namespaced_secret(namespace, secret)
|
| 726 |
+
except ApiException as e:
|
| 727 |
+
# 409 = conflict = secret already exists
|
| 728 |
+
if e.status == 409:
|
| 729 |
+
existing_secret = await core_api.read_namespaced_secret(
|
| 730 |
+
name=secret_name, namespace=namespace
|
| 731 |
+
)
|
| 732 |
+
if existing_secret.data != secret_data:
|
| 733 |
+
# If it's a previous secret made by launch agent, clean it up
|
| 734 |
+
if (
|
| 735 |
+
existing_secret.metadata.labels.get("wandb.ai/created-by")
|
| 736 |
+
== "launch-agent"
|
| 737 |
+
):
|
| 738 |
+
await core_api.delete_namespaced_secret(
|
| 739 |
+
name=secret_name, namespace=namespace
|
| 740 |
+
)
|
| 741 |
+
return await core_api.create_namespaced_secret(
|
| 742 |
+
namespace, secret
|
| 743 |
+
)
|
| 744 |
+
else:
|
| 745 |
+
raise LaunchError(
|
| 746 |
+
f"Kubernetes secret already exists in namespace {namespace} with incorrect data: {secret_name}"
|
| 747 |
+
)
|
| 748 |
+
return existing_secret
|
| 749 |
+
raise
|
| 750 |
+
except Exception as e:
|
| 751 |
+
raise LaunchError(
|
| 752 |
+
f"Exception when ensuring Kubernetes API key secret: {str(e)}\n"
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
|
| 756 |
+
async def maybe_create_imagepull_secret(
|
| 757 |
+
core_api: "CoreV1Api",
|
| 758 |
+
registry: AbstractRegistry,
|
| 759 |
+
run_id: str,
|
| 760 |
+
namespace: str,
|
| 761 |
+
) -> Optional["V1Secret"]:
|
| 762 |
+
"""Create a secret for pulling images from a private registry.
|
| 763 |
+
|
| 764 |
+
Arguments:
|
| 765 |
+
core_api: The Kubernetes CoreV1Api object.
|
| 766 |
+
registry: The registry to pull from.
|
| 767 |
+
run_id: The run id.
|
| 768 |
+
namespace: The namespace to create the secret in.
|
| 769 |
+
|
| 770 |
+
Returns:
|
| 771 |
+
A secret if one was created, otherwise None.
|
| 772 |
+
"""
|
| 773 |
+
secret = None
|
| 774 |
+
if isinstance(registry, LocalRegistry) or isinstance(
|
| 775 |
+
registry, AzureContainerRegistry
|
| 776 |
+
):
|
| 777 |
+
# Secret not required
|
| 778 |
+
return None
|
| 779 |
+
uname, token = await registry.get_username_password()
|
| 780 |
+
creds_info = {
|
| 781 |
+
"auths": {
|
| 782 |
+
registry.uri: {
|
| 783 |
+
"auth": base64.b64encode(f"{uname}:{token}".encode()).decode(),
|
| 784 |
+
# need an email but the use is deprecated
|
| 785 |
+
"email": "deprecated@wandblaunch.com",
|
| 786 |
+
}
|
| 787 |
+
}
|
| 788 |
+
}
|
| 789 |
+
secret_data = {
|
| 790 |
+
".dockerconfigjson": base64.b64encode(json.dumps(creds_info).encode()).decode()
|
| 791 |
+
}
|
| 792 |
+
secret = client.V1Secret(
|
| 793 |
+
data=secret_data,
|
| 794 |
+
metadata=client.V1ObjectMeta(name=f"regcred-{run_id}", namespace=namespace),
|
| 795 |
+
kind="Secret",
|
| 796 |
+
type="kubernetes.io/dockerconfigjson",
|
| 797 |
+
)
|
| 798 |
+
try:
|
| 799 |
+
try:
|
| 800 |
+
return await core_api.create_namespaced_secret(namespace, secret)
|
| 801 |
+
except ApiException as e:
|
| 802 |
+
# 409 = conflict = secret already exists
|
| 803 |
+
if e.status == 409:
|
| 804 |
+
return await core_api.read_namespaced_secret(
|
| 805 |
+
name=f"regcred-{run_id}", namespace=namespace
|
| 806 |
+
)
|
| 807 |
+
raise
|
| 808 |
+
except Exception as e:
|
| 809 |
+
raise LaunchError(f"Exception when creating Kubernetes secret: {str(e)}\n")
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def yield_containers(root: Any) -> Iterator[dict]:
|
| 813 |
+
"""Yield all container specs in a manifest.
|
| 814 |
+
|
| 815 |
+
Recursively traverses the manifest and yields all container specs. Container
|
| 816 |
+
specs are identified by the presence of a "containers" key in the value.
|
| 817 |
+
"""
|
| 818 |
+
if isinstance(root, dict):
|
| 819 |
+
for k, v in root.items():
|
| 820 |
+
if k == "containers":
|
| 821 |
+
if isinstance(v, list):
|
| 822 |
+
yield from v
|
| 823 |
+
elif isinstance(v, (dict, list)):
|
| 824 |
+
yield from yield_containers(v)
|
| 825 |
+
elif isinstance(root, list):
|
| 826 |
+
for item in root:
|
| 827 |
+
yield from yield_containers(item)
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
def add_wandb_env(root: Union[dict, list], env_vars: Dict[str, str]) -> None:
|
| 831 |
+
"""Injects wandb environment variables into specs.
|
| 832 |
+
|
| 833 |
+
Recursively walks the spec and injects the environment variables into
|
| 834 |
+
every container spec. Containers are identified by the "containers" key.
|
| 835 |
+
|
| 836 |
+
This function treats the WANDB_RUN_ID and WANDB_GROUP_ID environment variables
|
| 837 |
+
specially. If they are present in the spec, they will be overwritten. If a setting
|
| 838 |
+
for WANDB_RUN_ID is provided in env_vars, then that environment variable will only be
|
| 839 |
+
set in the first container modified by this function.
|
| 840 |
+
|
| 841 |
+
Arguments:
|
| 842 |
+
root: The spec to modify.
|
| 843 |
+
env_vars: The environment variables to inject.
|
| 844 |
+
|
| 845 |
+
Returns: None.
|
| 846 |
+
"""
|
| 847 |
+
for cont in yield_containers(root):
|
| 848 |
+
env = cont.setdefault("env", [])
|
| 849 |
+
env.extend([{"name": key, "value": value} for key, value in env_vars.items()])
|
| 850 |
+
cont["env"] = env
|
| 851 |
+
# After we have set WANDB_RUN_ID once, we don't want to set it again
|
| 852 |
+
if "WANDB_RUN_ID" in env_vars:
|
| 853 |
+
env_vars.pop("WANDB_RUN_ID")
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
def yield_pods(manifest: Any) -> Iterator[dict]:
|
| 857 |
+
"""Yield all pod specs in a manifest.
|
| 858 |
+
|
| 859 |
+
Recursively traverses the manifest and yields all pod specs. Pod specs are
|
| 860 |
+
identified by the presence of a "spec" key with a "containers" key in the
|
| 861 |
+
value.
|
| 862 |
+
"""
|
| 863 |
+
if isinstance(manifest, list):
|
| 864 |
+
for item in manifest:
|
| 865 |
+
yield from yield_pods(item)
|
| 866 |
+
elif isinstance(manifest, dict):
|
| 867 |
+
if "spec" in manifest and "containers" in manifest["spec"]:
|
| 868 |
+
yield manifest
|
| 869 |
+
for value in manifest.values():
|
| 870 |
+
if isinstance(value, (dict, list)):
|
| 871 |
+
yield from yield_pods(value)
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
def add_label_to_pods(
|
| 875 |
+
manifest: Union[dict, list], label_key: str, label_value: str
|
| 876 |
+
) -> None:
|
| 877 |
+
"""Add a label to all pod specs in a manifest.
|
| 878 |
+
|
| 879 |
+
Recursively traverses the manifest and adds the label to all pod specs.
|
| 880 |
+
Pod specs are identified by the presence of a "spec" key with a "containers"
|
| 881 |
+
key in the value.
|
| 882 |
+
|
| 883 |
+
Arguments:
|
| 884 |
+
manifest: The manifest to modify.
|
| 885 |
+
label_key: The label key to add.
|
| 886 |
+
label_value: The label value to add.
|
| 887 |
+
|
| 888 |
+
Returns: None.
|
| 889 |
+
"""
|
| 890 |
+
for pod in yield_pods(manifest):
|
| 891 |
+
metadata = pod.setdefault("metadata", {})
|
| 892 |
+
labels = metadata.setdefault("labels", {})
|
| 893 |
+
labels[label_key] = label_value
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
def add_entrypoint_args_overrides(manifest: Union[dict, list], overrides: dict) -> None:
|
| 897 |
+
"""Add entrypoint and args overrides to all containers in a manifest.
|
| 898 |
+
|
| 899 |
+
Recursively traverses the manifest and adds the entrypoint and args overrides
|
| 900 |
+
to all containers. Containers are identified by the presence of a "spec" key
|
| 901 |
+
with a "containers" key in the value.
|
| 902 |
+
|
| 903 |
+
Arguments:
|
| 904 |
+
manifest: The manifest to modify.
|
| 905 |
+
overrides: Dictionary with args and entrypoint keys.
|
| 906 |
+
|
| 907 |
+
Returns: None.
|
| 908 |
+
"""
|
| 909 |
+
if isinstance(manifest, list):
|
| 910 |
+
for item in manifest:
|
| 911 |
+
add_entrypoint_args_overrides(item, overrides)
|
| 912 |
+
elif isinstance(manifest, dict):
|
| 913 |
+
if "spec" in manifest and "containers" in manifest["spec"]:
|
| 914 |
+
containers = manifest["spec"]["containers"]
|
| 915 |
+
for container in containers:
|
| 916 |
+
if "command" in overrides:
|
| 917 |
+
container["command"] = overrides["command"]
|
| 918 |
+
if "args" in overrides:
|
| 919 |
+
container["args"] = overrides["args"]
|
| 920 |
+
for value in manifest.values():
|
| 921 |
+
add_entrypoint_args_overrides(value, overrides)
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
def apply_code_mount_configuration(
|
| 925 |
+
manifest: Union[Dict, list], project: LaunchProject
|
| 926 |
+
) -> None:
|
| 927 |
+
"""Apply code mount configuration to all containers in a manifest.
|
| 928 |
+
|
| 929 |
+
Recursively traverses the manifest and adds the code mount configuration to
|
| 930 |
+
all containers. Containers are identified by the presence of a "spec" key
|
| 931 |
+
with a "containers" key in the value.
|
| 932 |
+
|
| 933 |
+
Arguments:
|
| 934 |
+
manifest: The manifest to modify.
|
| 935 |
+
project: The launch project.
|
| 936 |
+
|
| 937 |
+
Returns: None.
|
| 938 |
+
"""
|
| 939 |
+
assert SOURCE_CODE_PVC_NAME is not None
|
| 940 |
+
source_dir = project.get_image_source_string()
|
| 941 |
+
for pod in yield_pods(manifest):
|
| 942 |
+
for container in yield_containers(pod):
|
| 943 |
+
if "volumeMounts" not in container:
|
| 944 |
+
container["volumeMounts"] = []
|
| 945 |
+
container["volumeMounts"].append(
|
| 946 |
+
{
|
| 947 |
+
"name": "wandb-source-code-volume",
|
| 948 |
+
"mountPath": CODE_MOUNT_DIR,
|
| 949 |
+
"subPath": source_dir,
|
| 950 |
+
}
|
| 951 |
+
)
|
| 952 |
+
container["workingDir"] = CODE_MOUNT_DIR
|
| 953 |
+
spec = pod["spec"]
|
| 954 |
+
if "volumes" not in spec:
|
| 955 |
+
spec["volumes"] = []
|
| 956 |
+
spec["volumes"].append(
|
| 957 |
+
{
|
| 958 |
+
"name": "wandb-source-code-volume",
|
| 959 |
+
"persistentVolumeClaim": {
|
| 960 |
+
"claimName": SOURCE_CODE_PVC_NAME,
|
| 961 |
+
},
|
| 962 |
+
}
|
| 963 |
+
)
|
parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_container.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import shlex
|
| 5 |
+
import subprocess
|
| 6 |
+
import sys
|
| 7 |
+
import threading
|
| 8 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
| 9 |
+
|
| 10 |
+
import wandb
|
| 11 |
+
from wandb.sdk.launch.environment.abstract import AbstractEnvironment
|
| 12 |
+
from wandb.sdk.launch.registry.abstract import AbstractRegistry
|
| 13 |
+
|
| 14 |
+
from .._project_spec import LaunchProject
|
| 15 |
+
from ..errors import LaunchError
|
| 16 |
+
from ..utils import (
|
| 17 |
+
CODE_MOUNT_DIR,
|
| 18 |
+
LOG_PREFIX,
|
| 19 |
+
MAX_ENV_LENGTHS,
|
| 20 |
+
PROJECT_SYNCHRONOUS,
|
| 21 |
+
_is_wandb_dev_uri,
|
| 22 |
+
_is_wandb_local_uri,
|
| 23 |
+
docker_image_exists,
|
| 24 |
+
event_loop_thread_exec,
|
| 25 |
+
pull_docker_image,
|
| 26 |
+
sanitize_wandb_api_key,
|
| 27 |
+
)
|
| 28 |
+
from .abstract import AbstractRun, AbstractRunner, Status
|
| 29 |
+
|
| 30 |
+
if TYPE_CHECKING:
|
| 31 |
+
from wandb.apis.internal import Api
|
| 32 |
+
|
| 33 |
+
_logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class LocalSubmittedRun(AbstractRun):
|
| 37 |
+
"""Instance of ``AbstractRun`` corresponding to a subprocess launched to run an entry point command locally."""
|
| 38 |
+
|
| 39 |
+
def __init__(self) -> None:
|
| 40 |
+
super().__init__()
|
| 41 |
+
self._command_proc: Optional[subprocess.Popen] = None
|
| 42 |
+
self._stdout: Optional[str] = None
|
| 43 |
+
self._terminate_flag: bool = False
|
| 44 |
+
self._thread: Optional[threading.Thread] = None
|
| 45 |
+
|
| 46 |
+
def set_command_proc(self, command_proc: subprocess.Popen) -> None:
|
| 47 |
+
self._command_proc = command_proc
|
| 48 |
+
|
| 49 |
+
def set_thread(self, thread: threading.Thread) -> None:
|
| 50 |
+
self._thread = thread
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def id(self) -> Optional[str]:
|
| 54 |
+
if self._command_proc is None:
|
| 55 |
+
return None
|
| 56 |
+
return str(self._command_proc.pid)
|
| 57 |
+
|
| 58 |
+
async def wait(self) -> bool:
|
| 59 |
+
assert self._thread is not None
|
| 60 |
+
# if command proc is not set
|
| 61 |
+
# wait for thread to set it
|
| 62 |
+
if self._command_proc is None:
|
| 63 |
+
while self._thread.is_alive():
|
| 64 |
+
await asyncio.sleep(5)
|
| 65 |
+
# command proc can be updated by another thread
|
| 66 |
+
if self._command_proc is not None:
|
| 67 |
+
break # type: ignore # mypy thinks this is unreachable
|
| 68 |
+
else:
|
| 69 |
+
return False
|
| 70 |
+
wait = event_loop_thread_exec(self._command_proc.wait)
|
| 71 |
+
return int(await wait()) == 0
|
| 72 |
+
|
| 73 |
+
async def get_logs(self) -> Optional[str]:
|
| 74 |
+
return self._stdout
|
| 75 |
+
|
| 76 |
+
async def cancel(self) -> None:
|
| 77 |
+
# thread is set immediately after starting, should always exist
|
| 78 |
+
assert self._thread is not None
|
| 79 |
+
|
| 80 |
+
# cancel called before the thread subprocess has started
|
| 81 |
+
# indicates to thread to not start command proc if not already started
|
| 82 |
+
self._terminate_flag = True
|
| 83 |
+
|
| 84 |
+
async def get_status(self) -> Status:
|
| 85 |
+
assert self._thread is not None, "Failed to get status, self._thread = None"
|
| 86 |
+
if self._command_proc is None:
|
| 87 |
+
if self._thread.is_alive():
|
| 88 |
+
return Status("running")
|
| 89 |
+
return Status("stopped")
|
| 90 |
+
exit_code = self._command_proc.poll()
|
| 91 |
+
if exit_code is None:
|
| 92 |
+
return Status("running")
|
| 93 |
+
if exit_code == 0:
|
| 94 |
+
return Status("finished")
|
| 95 |
+
return Status("failed")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class LocalContainerRunner(AbstractRunner):
|
| 99 |
+
"""Runner class, uses a project to create a LocallySubmittedRun."""
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
api: "Api",
|
| 104 |
+
backend_config: Dict[str, Any],
|
| 105 |
+
environment: AbstractEnvironment,
|
| 106 |
+
registry: AbstractRegistry,
|
| 107 |
+
) -> None:
|
| 108 |
+
super().__init__(api, backend_config)
|
| 109 |
+
self.environment = environment
|
| 110 |
+
self.registry = registry
|
| 111 |
+
|
| 112 |
+
def _populate_docker_args(
|
| 113 |
+
self, launch_project: LaunchProject, image_uri: str
|
| 114 |
+
) -> Dict[str, Any]:
|
| 115 |
+
docker_args: Dict[str, Any] = launch_project.fill_macros(image_uri).get(
|
| 116 |
+
"local-container", {}
|
| 117 |
+
)
|
| 118 |
+
if _is_wandb_local_uri(self._api.settings("base_url")):
|
| 119 |
+
if sys.platform == "win32":
|
| 120 |
+
docker_args["net"] = "host"
|
| 121 |
+
else:
|
| 122 |
+
docker_args["network"] = "host"
|
| 123 |
+
if sys.platform == "linux" or sys.platform == "linux2":
|
| 124 |
+
docker_args["add-host"] = "host.docker.internal:host-gateway"
|
| 125 |
+
base_image = launch_project.job_base_image
|
| 126 |
+
if base_image is not None:
|
| 127 |
+
# Mount code into the container and set the working directory.
|
| 128 |
+
if "volume" not in docker_args:
|
| 129 |
+
docker_args["volume"] = []
|
| 130 |
+
docker_args["volume"].append(
|
| 131 |
+
f"{launch_project.project_dir}:{CODE_MOUNT_DIR}"
|
| 132 |
+
)
|
| 133 |
+
docker_args["workdir"] = CODE_MOUNT_DIR
|
| 134 |
+
return docker_args
|
| 135 |
+
|
| 136 |
+
async def run(
|
| 137 |
+
self,
|
| 138 |
+
launch_project: LaunchProject,
|
| 139 |
+
image_uri: str,
|
| 140 |
+
) -> Optional[AbstractRun]:
|
| 141 |
+
docker_args = self._populate_docker_args(launch_project, image_uri)
|
| 142 |
+
synchronous: bool = self.backend_config[PROJECT_SYNCHRONOUS]
|
| 143 |
+
|
| 144 |
+
env_vars = launch_project.get_env_vars_dict(
|
| 145 |
+
self._api, MAX_ENV_LENGTHS[self.__class__.__name__]
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# When running against local port, need to swap to local docker host
|
| 149 |
+
if (
|
| 150 |
+
_is_wandb_local_uri(self._api.settings("base_url"))
|
| 151 |
+
and sys.platform == "darwin"
|
| 152 |
+
):
|
| 153 |
+
_, _, port = self._api.settings("base_url").split(":")
|
| 154 |
+
env_vars["WANDB_BASE_URL"] = f"http://host.docker.internal:{port}"
|
| 155 |
+
elif _is_wandb_dev_uri(self._api.settings("base_url")):
|
| 156 |
+
env_vars["WANDB_BASE_URL"] = "http://host.docker.internal:9001"
|
| 157 |
+
|
| 158 |
+
if launch_project.docker_image or launch_project.job_base_image:
|
| 159 |
+
try:
|
| 160 |
+
pull_docker_image(image_uri)
|
| 161 |
+
except Exception as e:
|
| 162 |
+
wandb.termwarn(f"Error attempting to pull docker image {image_uri}")
|
| 163 |
+
if not docker_image_exists(image_uri):
|
| 164 |
+
raise LaunchError(
|
| 165 |
+
f"Failed to pull docker image {image_uri} with error: {e}"
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
entrypoint = launch_project.get_job_entry_point()
|
| 169 |
+
entry_cmd = None if entrypoint is None else entrypoint.command
|
| 170 |
+
command_str = " ".join(
|
| 171 |
+
get_docker_command(
|
| 172 |
+
image_uri,
|
| 173 |
+
env_vars,
|
| 174 |
+
docker_args=docker_args,
|
| 175 |
+
entry_cmd=entry_cmd,
|
| 176 |
+
additional_args=launch_project.override_args,
|
| 177 |
+
)
|
| 178 |
+
).strip()
|
| 179 |
+
sanitized_cmd_str = sanitize_wandb_api_key(command_str)
|
| 180 |
+
_msg = f"{LOG_PREFIX}Launching run in docker with command: {sanitized_cmd_str}"
|
| 181 |
+
wandb.termlog(_msg)
|
| 182 |
+
run = _run_entry_point(command_str, launch_project.project_dir)
|
| 183 |
+
if synchronous:
|
| 184 |
+
await run.wait()
|
| 185 |
+
return run
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _run_entry_point(command: str, work_dir: Optional[str]) -> AbstractRun:
|
| 189 |
+
"""Run an entry point command in a subprocess.
|
| 190 |
+
|
| 191 |
+
Arguments:
|
| 192 |
+
command: Entry point command to run
|
| 193 |
+
work_dir: Working directory in which to run the command
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
An instance of `LocalSubmittedRun`
|
| 197 |
+
"""
|
| 198 |
+
if work_dir is None:
|
| 199 |
+
work_dir = os.getcwd()
|
| 200 |
+
env = os.environ.copy()
|
| 201 |
+
run = LocalSubmittedRun()
|
| 202 |
+
thread = threading.Thread(
|
| 203 |
+
target=_thread_process_runner,
|
| 204 |
+
args=(run, ["bash", "-c", command], work_dir, env),
|
| 205 |
+
)
|
| 206 |
+
run.set_thread(thread)
|
| 207 |
+
thread.start()
|
| 208 |
+
return run
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def _thread_process_runner(
|
| 212 |
+
run: LocalSubmittedRun, args: List[str], work_dir: str, env: Dict[str, str]
|
| 213 |
+
) -> None:
|
| 214 |
+
# cancel was called before we started the subprocess
|
| 215 |
+
if run._terminate_flag:
|
| 216 |
+
return
|
| 217 |
+
# TODO: Make this async
|
| 218 |
+
process = subprocess.Popen(
|
| 219 |
+
args,
|
| 220 |
+
close_fds=True,
|
| 221 |
+
stdout=subprocess.PIPE,
|
| 222 |
+
stderr=subprocess.STDOUT,
|
| 223 |
+
universal_newlines=True,
|
| 224 |
+
bufsize=1,
|
| 225 |
+
cwd=work_dir,
|
| 226 |
+
env=env,
|
| 227 |
+
)
|
| 228 |
+
run.set_command_proc(process)
|
| 229 |
+
run._stdout = ""
|
| 230 |
+
while True:
|
| 231 |
+
# the agent thread could set the terminate flag
|
| 232 |
+
if run._terminate_flag:
|
| 233 |
+
process.terminate() # type: ignore
|
| 234 |
+
chunk = os.read(process.stdout.fileno(), 4096) # type: ignore
|
| 235 |
+
if not chunk:
|
| 236 |
+
break
|
| 237 |
+
index = chunk.find(b"\r")
|
| 238 |
+
decoded_chunk = None
|
| 239 |
+
while not decoded_chunk:
|
| 240 |
+
try:
|
| 241 |
+
decoded_chunk = chunk.decode()
|
| 242 |
+
except UnicodeDecodeError:
|
| 243 |
+
# Multi-byte character cut off, try to get the rest of it
|
| 244 |
+
chunk += os.read(process.stdout.fileno(), 1) # type: ignore
|
| 245 |
+
if index != -1:
|
| 246 |
+
run._stdout += decoded_chunk
|
| 247 |
+
print(chunk.decode(), end="")
|
| 248 |
+
else:
|
| 249 |
+
run._stdout += decoded_chunk + "\r"
|
| 250 |
+
print(chunk.decode(), end="\r")
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def get_docker_command(
|
| 254 |
+
image: str,
|
| 255 |
+
env_vars: Dict[str, str],
|
| 256 |
+
entry_cmd: Optional[List[str]] = None,
|
| 257 |
+
docker_args: Optional[Dict[str, Any]] = None,
|
| 258 |
+
additional_args: Optional[List[str]] = None,
|
| 259 |
+
) -> List[str]:
|
| 260 |
+
"""Construct the docker command using the image and docker args.
|
| 261 |
+
|
| 262 |
+
Arguments:
|
| 263 |
+
image: a Docker image to be run
|
| 264 |
+
env_vars: a dictionary of environment variables for the command
|
| 265 |
+
entry_cmd: the entry point command to run
|
| 266 |
+
docker_args: a dictionary of additional docker args for the command
|
| 267 |
+
"""
|
| 268 |
+
docker_path = "docker"
|
| 269 |
+
cmd: List[Any] = [docker_path, "run", "--rm"]
|
| 270 |
+
|
| 271 |
+
# hacky handling of env vars, needs to be improved
|
| 272 |
+
for env_key, env_value in env_vars.items():
|
| 273 |
+
cmd += ["-e", f"{shlex.quote(env_key)}={shlex.quote(env_value)}"]
|
| 274 |
+
|
| 275 |
+
if docker_args:
|
| 276 |
+
for name, value in docker_args.items():
|
| 277 |
+
if len(name) == 1:
|
| 278 |
+
prefix = "-" + shlex.quote(name)
|
| 279 |
+
else:
|
| 280 |
+
prefix = "--" + shlex.quote(name)
|
| 281 |
+
if isinstance(value, list):
|
| 282 |
+
for v in value:
|
| 283 |
+
cmd += [prefix, shlex.quote(str(v))]
|
| 284 |
+
elif isinstance(value, bool) and value:
|
| 285 |
+
cmd += [prefix]
|
| 286 |
+
else:
|
| 287 |
+
cmd += [prefix, shlex.quote(str(value))]
|
| 288 |
+
|
| 289 |
+
if entry_cmd:
|
| 290 |
+
cmd += ["--entrypoint", entry_cmd[0]]
|
| 291 |
+
cmd += [shlex.quote(image)]
|
| 292 |
+
if entry_cmd and len(entry_cmd) > 1:
|
| 293 |
+
cmd += entry_cmd[1:]
|
| 294 |
+
if additional_args:
|
| 295 |
+
cmd += additional_args
|
| 296 |
+
return cmd
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def join(split_command: List[str]) -> str:
|
| 300 |
+
"""Return a shell-escaped string from *split_command*."""
|
| 301 |
+
return " ".join(shlex.quote(arg) for arg in split_command)
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import lazyloader
|
| 2 |
+
from .disabled import RunDisabled, SummaryDisabled
|
| 3 |
+
from .run_moment import RunMoment
|
| 4 |
+
|
| 5 |
+
__all__ = ("lazyloader", "RunDisabled", "SummaryDisabled", "RunMoment")
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_settings_toposort_generated.cpython-310.pyc
ADDED
|
Binary file (3.45 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_wburls_generated.cpython-310.pyc
ADDED
|
Binary file (481 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/config_util.cpython-310.pyc
ADDED
|
Binary file (3.05 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/credentials.cpython-310.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/disabled.cpython-310.pyc
ADDED
|
Binary file (1.46 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/lazyloader.cpython-310.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/paths.cpython-310.pyc
ADDED
|
Binary file (2.71 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/proto_util.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/reporting.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sock_client.cpython-310.pyc
ADDED
|
Binary file (8.59 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sparkline.cpython-310.pyc
ADDED
|
Binary file (1.74 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/timer.cpython-310.pyc
ADDED
|
Binary file (1.03 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generate.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import sys
|
| 3 |
+
from typing import Dict, List, Set, Tuple
|
| 4 |
+
|
| 5 |
+
from wandb.errors import UsageError
|
| 6 |
+
from wandb.sdk.wandb_settings import Settings
|
| 7 |
+
|
| 8 |
+
if sys.version_info >= (3, 8):
|
| 9 |
+
from typing import get_type_hints
|
| 10 |
+
else:
|
| 11 |
+
from typing_extensions import get_type_hints
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
template = """
|
| 15 |
+
__all__ = ("SETTINGS_TOPOLOGICALLY_SORTED", "_Setting")
|
| 16 |
+
|
| 17 |
+
import sys
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
if sys.version_info >= (3, 8):
|
| 21 |
+
from typing import Final, Literal
|
| 22 |
+
else:
|
| 23 |
+
from typing_extensions import Final, Literal
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_Setting = Literal[
|
| 27 |
+
$settings_literal_list
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
SETTINGS_TOPOLOGICALLY_SORTED: Final[Tuple[_Setting, ...]] = (
|
| 31 |
+
$settings_topologically_sorted
|
| 32 |
+
)
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Graph:
|
| 37 |
+
# A simple class representing an unweighted directed graph
|
| 38 |
+
# that uses an adjacency list representation.
|
| 39 |
+
# We use to ensure that we don't have cyclic dependencies in the settings
|
| 40 |
+
# and that modifications to the settings are applied in the correct order.
|
| 41 |
+
def __init__(self) -> None:
|
| 42 |
+
self.adj_list: Dict[str, Set[str]] = {}
|
| 43 |
+
|
| 44 |
+
def add_node(self, node: str) -> None:
|
| 45 |
+
if node not in self.adj_list:
|
| 46 |
+
self.adj_list[node] = set()
|
| 47 |
+
|
| 48 |
+
def add_edge(self, node1: str, node2: str) -> None:
|
| 49 |
+
self.adj_list[node1].add(node2)
|
| 50 |
+
|
| 51 |
+
def get_neighbors(self, node: str) -> Set[str]:
|
| 52 |
+
return self.adj_list[node]
|
| 53 |
+
|
| 54 |
+
# return a list of nodes sorted in topological order
|
| 55 |
+
def topological_sort_dfs(self) -> List[str]:
|
| 56 |
+
sorted_copy = {k: sorted(v) for k, v in self.adj_list.items()}
|
| 57 |
+
|
| 58 |
+
sorted_nodes: List[str] = []
|
| 59 |
+
visited_nodes: Set[str] = set()
|
| 60 |
+
current_nodes: Set[str] = set()
|
| 61 |
+
|
| 62 |
+
def visit(n: str) -> None:
|
| 63 |
+
if n in visited_nodes:
|
| 64 |
+
return None
|
| 65 |
+
if n in current_nodes:
|
| 66 |
+
raise UsageError("Cyclic dependency detected in wandb.Settings")
|
| 67 |
+
|
| 68 |
+
current_nodes.add(n)
|
| 69 |
+
for neighbor in sorted_copy[n]:
|
| 70 |
+
visit(neighbor)
|
| 71 |
+
|
| 72 |
+
current_nodes.remove(n)
|
| 73 |
+
visited_nodes.add(n)
|
| 74 |
+
sorted_nodes.append(n)
|
| 75 |
+
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
for node in self.adj_list:
|
| 79 |
+
if node not in visited_nodes:
|
| 80 |
+
visit(node)
|
| 81 |
+
|
| 82 |
+
return sorted_nodes
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _get_modification_order(
|
| 86 |
+
settings: Settings,
|
| 87 |
+
) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:
|
| 88 |
+
"""Return the order in which settings should be modified, based on dependencies."""
|
| 89 |
+
dependency_graph = Graph()
|
| 90 |
+
|
| 91 |
+
props = tuple(get_type_hints(Settings).keys())
|
| 92 |
+
|
| 93 |
+
# discover prop dependencies from validator methods and runtime hooks
|
| 94 |
+
|
| 95 |
+
prefix = "_validate_"
|
| 96 |
+
symbols = set(dir(settings))
|
| 97 |
+
validator_methods = tuple(sorted(m for m in symbols if m.startswith(prefix)))
|
| 98 |
+
|
| 99 |
+
# extract dependencies from validator methods
|
| 100 |
+
for m in validator_methods:
|
| 101 |
+
setting = m.split(prefix)[1]
|
| 102 |
+
dependency_graph.add_node(setting)
|
| 103 |
+
# if the method is not static, inspect its code to find the attributes it depends on
|
| 104 |
+
if (
|
| 105 |
+
not isinstance(Settings.__dict__[m], staticmethod)
|
| 106 |
+
and not isinstance(Settings.__dict__[m], classmethod)
|
| 107 |
+
and Settings.__dict__[m].__code__.co_argcount > 0
|
| 108 |
+
):
|
| 109 |
+
unbound_closure_vars = inspect.getclosurevars(Settings.__dict__[m]).unbound
|
| 110 |
+
dependencies = (v for v in unbound_closure_vars if v in props)
|
| 111 |
+
for d in dependencies:
|
| 112 |
+
dependency_graph.add_node(d)
|
| 113 |
+
dependency_graph.add_edge(setting, d)
|
| 114 |
+
|
| 115 |
+
# extract dependencies from props' runtime hooks
|
| 116 |
+
default_props = settings._default_props()
|
| 117 |
+
for prop, spec in default_props.items():
|
| 118 |
+
if "hook" not in spec:
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
dependency_graph.add_node(prop)
|
| 122 |
+
|
| 123 |
+
hook = spec["hook"]
|
| 124 |
+
if callable(hook):
|
| 125 |
+
hook = [hook]
|
| 126 |
+
|
| 127 |
+
for h in hook:
|
| 128 |
+
unbound_closure_vars = inspect.getclosurevars(h).unbound
|
| 129 |
+
dependencies = (v for v in unbound_closure_vars if v in props)
|
| 130 |
+
for d in dependencies:
|
| 131 |
+
dependency_graph.add_node(d)
|
| 132 |
+
dependency_graph.add_edge(prop, d)
|
| 133 |
+
|
| 134 |
+
modification_order = dependency_graph.topological_sort_dfs()
|
| 135 |
+
return props, tuple(modification_order)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def generate(settings: Settings) -> None:
|
| 139 |
+
_settings_literal_list, _settings_topologically_sorted = _get_modification_order(
|
| 140 |
+
settings
|
| 141 |
+
)
|
| 142 |
+
settings_literal_list = ", ".join(f'"{s}"' for s in _settings_literal_list)
|
| 143 |
+
settings_topologically_sorted = ", ".join(
|
| 144 |
+
f'"{s}"' for s in _settings_topologically_sorted
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
print(
|
| 148 |
+
template.replace(
|
| 149 |
+
"$settings_literal_list",
|
| 150 |
+
settings_literal_list,
|
| 151 |
+
).replace(
|
| 152 |
+
"$settings_topologically_sorted",
|
| 153 |
+
settings_topologically_sorted,
|
| 154 |
+
)
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
generate(Settings())
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generated.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DO NOT EDIT -- GENERATED BY: `generate-tool.py --generate`
|
| 2 |
+
__all__ = ("SETTINGS_TOPOLOGICALLY_SORTED", "_Setting")
|
| 3 |
+
|
| 4 |
+
import sys
|
| 5 |
+
from typing import Tuple
|
| 6 |
+
|
| 7 |
+
if sys.version_info >= (3, 8):
|
| 8 |
+
from typing import Final, Literal
|
| 9 |
+
else:
|
| 10 |
+
from typing_extensions import Final, Literal
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
_Setting = Literal[
|
| 14 |
+
"_args",
|
| 15 |
+
"_aws_lambda",
|
| 16 |
+
"_cli_only_mode",
|
| 17 |
+
"_code_path_local",
|
| 18 |
+
"_colab",
|
| 19 |
+
"_cuda",
|
| 20 |
+
"_disable_meta",
|
| 21 |
+
"_disable_service",
|
| 22 |
+
"_disable_setproctitle",
|
| 23 |
+
"_disable_stats",
|
| 24 |
+
"_disable_update_check",
|
| 25 |
+
"_disable_viewer",
|
| 26 |
+
"_disable_machine_info",
|
| 27 |
+
"_executable",
|
| 28 |
+
"_extra_http_headers",
|
| 29 |
+
"_file_stream_max_bytes",
|
| 30 |
+
"_file_stream_retry_max",
|
| 31 |
+
"_file_stream_retry_wait_min_seconds",
|
| 32 |
+
"_file_stream_retry_wait_max_seconds",
|
| 33 |
+
"_file_stream_timeout_seconds",
|
| 34 |
+
"_file_transfer_retry_max",
|
| 35 |
+
"_file_transfer_retry_wait_min_seconds",
|
| 36 |
+
"_file_transfer_retry_wait_max_seconds",
|
| 37 |
+
"_file_transfer_timeout_seconds",
|
| 38 |
+
"_flow_control_custom",
|
| 39 |
+
"_flow_control_disabled",
|
| 40 |
+
"_graphql_retry_max",
|
| 41 |
+
"_graphql_retry_wait_min_seconds",
|
| 42 |
+
"_graphql_retry_wait_max_seconds",
|
| 43 |
+
"_graphql_timeout_seconds",
|
| 44 |
+
"_internal_check_process",
|
| 45 |
+
"_internal_queue_timeout",
|
| 46 |
+
"_ipython",
|
| 47 |
+
"_jupyter",
|
| 48 |
+
"_jupyter_name",
|
| 49 |
+
"_jupyter_path",
|
| 50 |
+
"_jupyter_root",
|
| 51 |
+
"_kaggle",
|
| 52 |
+
"_live_policy_rate_limit",
|
| 53 |
+
"_live_policy_wait_time",
|
| 54 |
+
"_log_level",
|
| 55 |
+
"_network_buffer",
|
| 56 |
+
"_noop",
|
| 57 |
+
"_notebook",
|
| 58 |
+
"_offline",
|
| 59 |
+
"_sync",
|
| 60 |
+
"_os",
|
| 61 |
+
"_platform",
|
| 62 |
+
"_proxies",
|
| 63 |
+
"_python",
|
| 64 |
+
"_runqueue_item_id",
|
| 65 |
+
"_require_legacy_service",
|
| 66 |
+
"_save_requirements",
|
| 67 |
+
"_service_transport",
|
| 68 |
+
"_service_wait",
|
| 69 |
+
"_shared",
|
| 70 |
+
"_start_datetime",
|
| 71 |
+
"_start_time",
|
| 72 |
+
"_stats_pid",
|
| 73 |
+
"_stats_sampling_interval",
|
| 74 |
+
"_stats_sample_rate_seconds",
|
| 75 |
+
"_stats_samples_to_average",
|
| 76 |
+
"_stats_join_assets",
|
| 77 |
+
"_stats_neuron_monitor_config_path",
|
| 78 |
+
"_stats_open_metrics_endpoints",
|
| 79 |
+
"_stats_open_metrics_filters",
|
| 80 |
+
"_stats_disk_paths",
|
| 81 |
+
"_stats_buffer_size",
|
| 82 |
+
"_tmp_code_dir",
|
| 83 |
+
"_tracelog",
|
| 84 |
+
"_unsaved_keys",
|
| 85 |
+
"_windows",
|
| 86 |
+
"allow_val_change",
|
| 87 |
+
"anonymous",
|
| 88 |
+
"api_key",
|
| 89 |
+
"azure_account_url_to_access_key",
|
| 90 |
+
"base_url",
|
| 91 |
+
"code_dir",
|
| 92 |
+
"colab_url",
|
| 93 |
+
"config_paths",
|
| 94 |
+
"console",
|
| 95 |
+
"console_multipart",
|
| 96 |
+
"credentials_file",
|
| 97 |
+
"deployment",
|
| 98 |
+
"disable_code",
|
| 99 |
+
"disable_git",
|
| 100 |
+
"disable_hints",
|
| 101 |
+
"disable_job_creation",
|
| 102 |
+
"disabled",
|
| 103 |
+
"docker",
|
| 104 |
+
"email",
|
| 105 |
+
"entity",
|
| 106 |
+
"files_dir",
|
| 107 |
+
"force",
|
| 108 |
+
"fork_from",
|
| 109 |
+
"resume_from",
|
| 110 |
+
"git_commit",
|
| 111 |
+
"git_remote",
|
| 112 |
+
"git_remote_url",
|
| 113 |
+
"git_root",
|
| 114 |
+
"heartbeat_seconds",
|
| 115 |
+
"host",
|
| 116 |
+
"http_proxy",
|
| 117 |
+
"https_proxy",
|
| 118 |
+
"identity_token_file",
|
| 119 |
+
"ignore_globs",
|
| 120 |
+
"init_timeout",
|
| 121 |
+
"is_local",
|
| 122 |
+
"job_name",
|
| 123 |
+
"job_source",
|
| 124 |
+
"label_disable",
|
| 125 |
+
"launch",
|
| 126 |
+
"launch_config_path",
|
| 127 |
+
"log_dir",
|
| 128 |
+
"log_internal",
|
| 129 |
+
"log_symlink_internal",
|
| 130 |
+
"log_symlink_user",
|
| 131 |
+
"log_user",
|
| 132 |
+
"login_timeout",
|
| 133 |
+
"mode",
|
| 134 |
+
"notebook_name",
|
| 135 |
+
"program",
|
| 136 |
+
"program_abspath",
|
| 137 |
+
"program_relpath",
|
| 138 |
+
"project",
|
| 139 |
+
"project_url",
|
| 140 |
+
"quiet",
|
| 141 |
+
"reinit",
|
| 142 |
+
"relogin",
|
| 143 |
+
"resume",
|
| 144 |
+
"resume_fname",
|
| 145 |
+
"resumed",
|
| 146 |
+
"root_dir",
|
| 147 |
+
"run_group",
|
| 148 |
+
"run_id",
|
| 149 |
+
"run_job_type",
|
| 150 |
+
"run_mode",
|
| 151 |
+
"run_name",
|
| 152 |
+
"run_notes",
|
| 153 |
+
"run_tags",
|
| 154 |
+
"run_url",
|
| 155 |
+
"sagemaker_disable",
|
| 156 |
+
"save_code",
|
| 157 |
+
"settings_system",
|
| 158 |
+
"settings_workspace",
|
| 159 |
+
"show_colors",
|
| 160 |
+
"show_emoji",
|
| 161 |
+
"show_errors",
|
| 162 |
+
"show_info",
|
| 163 |
+
"show_warnings",
|
| 164 |
+
"silent",
|
| 165 |
+
"start_method",
|
| 166 |
+
"strict",
|
| 167 |
+
"summary_errors",
|
| 168 |
+
"summary_timeout",
|
| 169 |
+
"summary_warnings",
|
| 170 |
+
"sweep_id",
|
| 171 |
+
"sweep_param_path",
|
| 172 |
+
"sweep_url",
|
| 173 |
+
"symlink",
|
| 174 |
+
"sync_dir",
|
| 175 |
+
"sync_file",
|
| 176 |
+
"sync_symlink_latest",
|
| 177 |
+
"table_raise_on_max_row_limit_exceeded",
|
| 178 |
+
"timespec",
|
| 179 |
+
"tmp_dir",
|
| 180 |
+
"username",
|
| 181 |
+
"wandb_dir",
|
| 182 |
+
]
|
| 183 |
+
|
| 184 |
+
SETTINGS_TOPOLOGICALLY_SORTED: Final[Tuple[_Setting, ...]] = (
|
| 185 |
+
"_service_wait",
|
| 186 |
+
"_stats_sample_rate_seconds",
|
| 187 |
+
"_stats_samples_to_average",
|
| 188 |
+
"_stats_sampling_interval",
|
| 189 |
+
"anonymous",
|
| 190 |
+
"api_key",
|
| 191 |
+
"base_url",
|
| 192 |
+
"console",
|
| 193 |
+
"job_source",
|
| 194 |
+
"mode",
|
| 195 |
+
"project",
|
| 196 |
+
"run_id",
|
| 197 |
+
"start_method",
|
| 198 |
+
"_aws_lambda",
|
| 199 |
+
"program",
|
| 200 |
+
"_code_path_local",
|
| 201 |
+
"_colab",
|
| 202 |
+
"_disable_machine_info",
|
| 203 |
+
"_disable_meta",
|
| 204 |
+
"_disable_stats",
|
| 205 |
+
"_network_buffer",
|
| 206 |
+
"_flow_control_disabled",
|
| 207 |
+
"_flow_control_custom",
|
| 208 |
+
"_ipython",
|
| 209 |
+
"_jupyter",
|
| 210 |
+
"_kaggle",
|
| 211 |
+
"_noop",
|
| 212 |
+
"_notebook",
|
| 213 |
+
"disabled",
|
| 214 |
+
"_offline",
|
| 215 |
+
"_shared",
|
| 216 |
+
"_stats_neuron_monitor_config_path",
|
| 217 |
+
"run_mode",
|
| 218 |
+
"_start_datetime",
|
| 219 |
+
"timespec",
|
| 220 |
+
"root_dir",
|
| 221 |
+
"wandb_dir",
|
| 222 |
+
"tmp_dir",
|
| 223 |
+
"_tmp_code_dir",
|
| 224 |
+
"_windows",
|
| 225 |
+
"colab_url",
|
| 226 |
+
"is_local",
|
| 227 |
+
"deployment",
|
| 228 |
+
"disable_code",
|
| 229 |
+
"disable_git",
|
| 230 |
+
"disable_job_creation",
|
| 231 |
+
"files_dir",
|
| 232 |
+
"_proxies",
|
| 233 |
+
"http_proxy",
|
| 234 |
+
"https_proxy",
|
| 235 |
+
"log_dir",
|
| 236 |
+
"log_internal",
|
| 237 |
+
"log_symlink_internal",
|
| 238 |
+
"log_symlink_user",
|
| 239 |
+
"log_user",
|
| 240 |
+
"project_url",
|
| 241 |
+
"resume_fname",
|
| 242 |
+
"run_url",
|
| 243 |
+
"settings_system",
|
| 244 |
+
"settings_workspace",
|
| 245 |
+
"sweep_url",
|
| 246 |
+
"sync_dir",
|
| 247 |
+
"sync_file",
|
| 248 |
+
"sync_symlink_latest",
|
| 249 |
+
)
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/_wburls_generate.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from wburls import wburls # type: ignore
|
| 2 |
+
|
| 3 |
+
template = """
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
if sys.version_info >= (3, 8):
|
| 7 |
+
from typing import Literal
|
| 8 |
+
else:
|
| 9 |
+
from typing_extensions import Literal
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
URLS = Literal[
|
| 13 |
+
$literal_list
|
| 14 |
+
]
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def generate() -> None:
|
| 19 |
+
urls = wburls._get_urls()
|
| 20 |
+
literal_list = ", ".join([f"{key!r}" for key in urls])
|
| 21 |
+
print(template.replace("$literal_list", literal_list))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
generate()
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/apikey.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""apikey util."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import platform
|
| 5 |
+
import stat
|
| 6 |
+
import sys
|
| 7 |
+
import textwrap
|
| 8 |
+
from functools import partial
|
| 9 |
+
from typing import TYPE_CHECKING, Callable, Dict, Optional, Union
|
| 10 |
+
from urllib.parse import urlparse
|
| 11 |
+
|
| 12 |
+
# import Literal
|
| 13 |
+
if sys.version_info >= (3, 8):
|
| 14 |
+
from typing import Literal
|
| 15 |
+
else:
|
| 16 |
+
from typing_extensions import Literal
|
| 17 |
+
|
| 18 |
+
import click
|
| 19 |
+
from requests.utils import NETRC_FILES, get_netrc_auth
|
| 20 |
+
|
| 21 |
+
import wandb
|
| 22 |
+
from wandb.apis import InternalApi
|
| 23 |
+
from wandb.errors import term
|
| 24 |
+
from wandb.util import _is_databricks, isatty, prompt_choices
|
| 25 |
+
|
| 26 |
+
from .wburls import wburls
|
| 27 |
+
|
| 28 |
+
LOGIN_CHOICE_ANON = "Private W&B dashboard, no account required"
|
| 29 |
+
LOGIN_CHOICE_NEW = "Create a W&B account"
|
| 30 |
+
LOGIN_CHOICE_EXISTS = "Use an existing W&B account"
|
| 31 |
+
LOGIN_CHOICE_DRYRUN = "Don't visualize my results"
|
| 32 |
+
LOGIN_CHOICE_NOTTY = "Unconfigured"
|
| 33 |
+
LOGIN_CHOICES = [
|
| 34 |
+
LOGIN_CHOICE_ANON,
|
| 35 |
+
LOGIN_CHOICE_NEW,
|
| 36 |
+
LOGIN_CHOICE_EXISTS,
|
| 37 |
+
LOGIN_CHOICE_DRYRUN,
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
Mode = Literal["allow", "must", "never", "false", "true"]
|
| 41 |
+
|
| 42 |
+
if TYPE_CHECKING:
|
| 43 |
+
from wandb.sdk.wandb_settings import Settings
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
getpass = partial(click.prompt, hide_input=True, err=True)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _fixup_anon_mode(default: Optional[Mode]) -> Optional[Mode]:
|
| 50 |
+
# Convert weird anonymode values from legacy settings files
|
| 51 |
+
# into one of our expected values.
|
| 52 |
+
anon_mode = default or "never"
|
| 53 |
+
mapping: Dict[Mode, Mode] = {"true": "allow", "false": "never"}
|
| 54 |
+
return mapping.get(anon_mode, anon_mode)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def get_netrc_file_path() -> str:
|
| 58 |
+
"""Return the path to the netrc file."""
|
| 59 |
+
# if the NETRC environment variable is set, use that
|
| 60 |
+
netrc_file = os.environ.get("NETRC")
|
| 61 |
+
if netrc_file:
|
| 62 |
+
return os.path.expanduser(netrc_file)
|
| 63 |
+
|
| 64 |
+
# if either .netrc or _netrc exists in the home directory, use that
|
| 65 |
+
for netrc_file in NETRC_FILES:
|
| 66 |
+
home_dir = os.path.expanduser("~")
|
| 67 |
+
if os.path.exists(os.path.join(home_dir, netrc_file)):
|
| 68 |
+
return os.path.join(home_dir, netrc_file)
|
| 69 |
+
|
| 70 |
+
# otherwise, use .netrc on non-Windows platforms and _netrc on Windows
|
| 71 |
+
netrc_file = ".netrc" if platform.system() != "Windows" else "_netrc"
|
| 72 |
+
|
| 73 |
+
return os.path.join(os.path.expanduser("~"), netrc_file)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def prompt_api_key( # noqa: C901
|
| 77 |
+
settings: "Settings",
|
| 78 |
+
api: Optional[InternalApi] = None,
|
| 79 |
+
input_callback: Optional[Callable] = None,
|
| 80 |
+
browser_callback: Optional[Callable] = None,
|
| 81 |
+
no_offline: bool = False,
|
| 82 |
+
no_create: bool = False,
|
| 83 |
+
local: bool = False,
|
| 84 |
+
) -> Union[str, bool, None]:
|
| 85 |
+
"""Prompt for api key.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
str - if key is configured
|
| 89 |
+
None - if dryrun is selected
|
| 90 |
+
False - if unconfigured (notty)
|
| 91 |
+
"""
|
| 92 |
+
input_callback = input_callback or getpass
|
| 93 |
+
log_string = term.LOG_STRING
|
| 94 |
+
api = api or InternalApi(settings)
|
| 95 |
+
anon_mode = _fixup_anon_mode(settings.anonymous) # type: ignore
|
| 96 |
+
jupyter = settings._jupyter or False
|
| 97 |
+
app_url = api.app_url
|
| 98 |
+
|
| 99 |
+
choices = [choice for choice in LOGIN_CHOICES]
|
| 100 |
+
if anon_mode == "never":
|
| 101 |
+
# Omit LOGIN_CHOICE_ANON as a choice if the env var is set to never
|
| 102 |
+
choices.remove(LOGIN_CHOICE_ANON)
|
| 103 |
+
if (jupyter and not settings.login_timeout) or no_offline:
|
| 104 |
+
choices.remove(LOGIN_CHOICE_DRYRUN)
|
| 105 |
+
if (jupyter and not settings.login_timeout) or no_create:
|
| 106 |
+
choices.remove(LOGIN_CHOICE_NEW)
|
| 107 |
+
|
| 108 |
+
if jupyter and "google.colab" in sys.modules:
|
| 109 |
+
log_string = term.LOG_STRING_NOCOLOR
|
| 110 |
+
key = wandb.jupyter.attempt_colab_login(app_url) # type: ignore
|
| 111 |
+
if key is not None:
|
| 112 |
+
write_key(settings, key, api=api)
|
| 113 |
+
return key # type: ignore
|
| 114 |
+
|
| 115 |
+
if anon_mode == "must":
|
| 116 |
+
result = LOGIN_CHOICE_ANON
|
| 117 |
+
# If we're not in an interactive environment, default to dry-run.
|
| 118 |
+
elif (
|
| 119 |
+
not jupyter and (not isatty(sys.stdout) or not isatty(sys.stdin))
|
| 120 |
+
) or _is_databricks():
|
| 121 |
+
result = LOGIN_CHOICE_NOTTY
|
| 122 |
+
elif local:
|
| 123 |
+
result = LOGIN_CHOICE_EXISTS
|
| 124 |
+
elif len(choices) == 1:
|
| 125 |
+
result = choices[0]
|
| 126 |
+
else:
|
| 127 |
+
result = prompt_choices(
|
| 128 |
+
choices, input_timeout=settings.login_timeout, jupyter=jupyter
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
api_ask = (
|
| 132 |
+
f"{log_string}: Paste an API key from your profile and hit enter, "
|
| 133 |
+
"or press ctrl+c to quit"
|
| 134 |
+
)
|
| 135 |
+
if result == LOGIN_CHOICE_ANON:
|
| 136 |
+
key = api.create_anonymous_api_key()
|
| 137 |
+
|
| 138 |
+
write_key(settings, key, api=api, anonymous=True)
|
| 139 |
+
return key # type: ignore
|
| 140 |
+
elif result == LOGIN_CHOICE_NEW:
|
| 141 |
+
key = browser_callback(signup=True) if browser_callback else None
|
| 142 |
+
|
| 143 |
+
if not key:
|
| 144 |
+
wandb.termlog(f"Create an account here: {app_url}/authorize?signup=true")
|
| 145 |
+
key = input_callback(api_ask).strip()
|
| 146 |
+
|
| 147 |
+
write_key(settings, key, api=api)
|
| 148 |
+
return key # type: ignore
|
| 149 |
+
elif result == LOGIN_CHOICE_EXISTS:
|
| 150 |
+
key = browser_callback() if browser_callback else None
|
| 151 |
+
|
| 152 |
+
if not key:
|
| 153 |
+
if not (settings.is_local or local):
|
| 154 |
+
host = app_url
|
| 155 |
+
for prefix in ("http://", "https://"):
|
| 156 |
+
if app_url.startswith(prefix):
|
| 157 |
+
host = app_url[len(prefix) :]
|
| 158 |
+
wandb.termlog(
|
| 159 |
+
f"Logging into {host}. (Learn how to deploy a W&B server locally: {wburls.get('wandb_server')})"
|
| 160 |
+
)
|
| 161 |
+
wandb.termlog(
|
| 162 |
+
f"You can find your API key in your browser here: {app_url}/authorize"
|
| 163 |
+
)
|
| 164 |
+
key = input_callback(api_ask).strip()
|
| 165 |
+
write_key(settings, key, api=api)
|
| 166 |
+
return key # type: ignore
|
| 167 |
+
elif result == LOGIN_CHOICE_NOTTY:
|
| 168 |
+
# TODO: Needs refactor as this needs to be handled by caller
|
| 169 |
+
return False
|
| 170 |
+
elif result == LOGIN_CHOICE_DRYRUN:
|
| 171 |
+
return None
|
| 172 |
+
else:
|
| 173 |
+
# Jupyter environments don't have a tty, but we can still try logging in using
|
| 174 |
+
# the browser callback if one is supplied.
|
| 175 |
+
key, anonymous = (
|
| 176 |
+
browser_callback() if jupyter and browser_callback else (None, False)
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
write_key(settings, key, api=api)
|
| 180 |
+
return key # type: ignore
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def write_netrc(host: str, entity: str, key: str) -> Optional[bool]:
|
| 184 |
+
"""Add our host and key to .netrc."""
|
| 185 |
+
_, key_suffix = key.split("-", 1) if "-" in key else ("", key)
|
| 186 |
+
if len(key_suffix) != 40:
|
| 187 |
+
wandb.termerror(
|
| 188 |
+
"API-key must be exactly 40 characters long: {} ({} chars)".format(
|
| 189 |
+
key_suffix, len(key_suffix)
|
| 190 |
+
)
|
| 191 |
+
)
|
| 192 |
+
return None
|
| 193 |
+
try:
|
| 194 |
+
normalized_host = urlparse(host).netloc.split(":")[0]
|
| 195 |
+
netrc_path = get_netrc_file_path()
|
| 196 |
+
wandb.termlog(
|
| 197 |
+
f"Appending key for {normalized_host} to your netrc file: {netrc_path}"
|
| 198 |
+
)
|
| 199 |
+
machine_line = f"machine {normalized_host}"
|
| 200 |
+
orig_lines = None
|
| 201 |
+
try:
|
| 202 |
+
with open(netrc_path) as f:
|
| 203 |
+
orig_lines = f.read().strip().split("\n")
|
| 204 |
+
except OSError:
|
| 205 |
+
pass
|
| 206 |
+
with open(netrc_path, "w") as f:
|
| 207 |
+
if orig_lines:
|
| 208 |
+
# delete this machine from the file if it's already there.
|
| 209 |
+
skip = 0
|
| 210 |
+
for line in orig_lines:
|
| 211 |
+
# we fix invalid netrc files with an empty host that we wrote before
|
| 212 |
+
# verifying host...
|
| 213 |
+
if line == "machine " or machine_line in line:
|
| 214 |
+
skip = 2
|
| 215 |
+
elif skip:
|
| 216 |
+
skip -= 1
|
| 217 |
+
else:
|
| 218 |
+
f.write("{}\n".format(line))
|
| 219 |
+
f.write(
|
| 220 |
+
textwrap.dedent(
|
| 221 |
+
"""\
|
| 222 |
+
machine {host}
|
| 223 |
+
login {entity}
|
| 224 |
+
password {key}
|
| 225 |
+
"""
|
| 226 |
+
).format(host=normalized_host, entity=entity, key=key)
|
| 227 |
+
)
|
| 228 |
+
os.chmod(netrc_path, stat.S_IRUSR | stat.S_IWUSR)
|
| 229 |
+
return True
|
| 230 |
+
except OSError:
|
| 231 |
+
wandb.termerror(f"Unable to read {netrc_path}")
|
| 232 |
+
return None
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def write_key(
|
| 236 |
+
settings: "Settings",
|
| 237 |
+
key: Optional[str],
|
| 238 |
+
api: Optional["InternalApi"] = None,
|
| 239 |
+
anonymous: bool = False,
|
| 240 |
+
) -> None:
|
| 241 |
+
if not key:
|
| 242 |
+
raise ValueError("No API key specified.")
|
| 243 |
+
|
| 244 |
+
# TODO(jhr): api shouldn't be optional or it shouldn't be passed, clean up callers
|
| 245 |
+
api = api or InternalApi()
|
| 246 |
+
|
| 247 |
+
# Normal API keys are 40-character hex strings. On-prem API keys have a
|
| 248 |
+
# variable-length prefix, a dash, then the 40-char string.
|
| 249 |
+
_, suffix = key.split("-", 1) if "-" in key else ("", key)
|
| 250 |
+
|
| 251 |
+
if len(suffix) != 40:
|
| 252 |
+
raise ValueError(
|
| 253 |
+
"API key must be 40 characters long, yours was {}".format(len(key))
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
if anonymous:
|
| 257 |
+
api.set_setting("anonymous", "true", globally=True, persist=True)
|
| 258 |
+
else:
|
| 259 |
+
api.clear_setting("anonymous", globally=True, persist=True)
|
| 260 |
+
|
| 261 |
+
write_netrc(settings.base_url, "user", key)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def api_key(settings: Optional["Settings"] = None) -> Optional[str]:
|
| 265 |
+
if settings is None:
|
| 266 |
+
settings = wandb.setup().settings # type: ignore
|
| 267 |
+
assert settings is not None
|
| 268 |
+
if settings.api_key:
|
| 269 |
+
return settings.api_key
|
| 270 |
+
auth = get_netrc_auth(settings.base_url)
|
| 271 |
+
if auth:
|
| 272 |
+
return auth[-1]
|
| 273 |
+
return None
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/capped_dict.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
from typing import Any, Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class CappedDict(collections.OrderedDict):
|
| 6 |
+
default_max_size = 50
|
| 7 |
+
|
| 8 |
+
def __init__(self, max_size: Optional[int] = None) -> None:
|
| 9 |
+
self.max_size = max_size or self.default_max_size
|
| 10 |
+
super().__init__()
|
| 11 |
+
|
| 12 |
+
def __setitem__(self, key: str, val: Any) -> None:
|
| 13 |
+
if key not in self:
|
| 14 |
+
max_size = self.max_size - 1
|
| 15 |
+
self._prune_dict(max_size)
|
| 16 |
+
super().__setitem__(key, val)
|
| 17 |
+
|
| 18 |
+
def update(self, **kwargs: Any) -> None: # type: ignore[override]
|
| 19 |
+
super().update(**kwargs)
|
| 20 |
+
self._prune_dict(self.max_size)
|
| 21 |
+
|
| 22 |
+
def _prune_dict(self, max_size: int) -> None:
|
| 23 |
+
if len(self) >= max_size:
|
| 24 |
+
diff = len(self) - max_size
|
| 25 |
+
for k in list(self.keys())[:diff]:
|
| 26 |
+
del self[k]
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/config_util.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from typing import Any, Dict, Optional
|
| 5 |
+
|
| 6 |
+
import yaml
|
| 7 |
+
|
| 8 |
+
import wandb
|
| 9 |
+
from wandb.errors import Error
|
| 10 |
+
from wandb.util import load_yaml
|
| 11 |
+
|
| 12 |
+
from . import filesystem
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger("wandb")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ConfigError(Error):
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def dict_from_proto_list(obj_list):
|
| 22 |
+
d = dict()
|
| 23 |
+
for item in obj_list:
|
| 24 |
+
d[item.key] = dict(desc=None, value=json.loads(item.value_json))
|
| 25 |
+
return d
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def dict_strip_value_dict(config_dict):
|
| 29 |
+
d = dict()
|
| 30 |
+
for k, v in config_dict.items():
|
| 31 |
+
d[k] = v["value"]
|
| 32 |
+
return d
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def dict_no_value_from_proto_list(obj_list):
|
| 36 |
+
d = dict()
|
| 37 |
+
for item in obj_list:
|
| 38 |
+
possible_dict = json.loads(item.value_json)
|
| 39 |
+
if not isinstance(possible_dict, dict) or "value" not in possible_dict:
|
| 40 |
+
continue
|
| 41 |
+
d[item.key] = possible_dict["value"]
|
| 42 |
+
|
| 43 |
+
return d
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# TODO(jhr): these functions should go away once we merge jobspec PR
|
| 47 |
+
def save_config_file_from_dict(config_filename, config_dict):
|
| 48 |
+
s = b"wandb_version: 1"
|
| 49 |
+
if config_dict: # adding an empty dictionary here causes a parse error
|
| 50 |
+
s += b"\n\n" + yaml.dump(
|
| 51 |
+
config_dict,
|
| 52 |
+
Dumper=yaml.SafeDumper,
|
| 53 |
+
default_flow_style=False,
|
| 54 |
+
allow_unicode=True,
|
| 55 |
+
encoding="utf-8",
|
| 56 |
+
sort_keys=False,
|
| 57 |
+
)
|
| 58 |
+
data = s.decode("utf-8")
|
| 59 |
+
filesystem.mkdir_exists_ok(os.path.dirname(config_filename))
|
| 60 |
+
with open(config_filename, "w") as conf_file:
|
| 61 |
+
conf_file.write(data)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def dict_from_config_file(
|
| 65 |
+
filename: str, must_exist: bool = False
|
| 66 |
+
) -> Optional[Dict[str, Any]]:
|
| 67 |
+
if not os.path.exists(filename):
|
| 68 |
+
if must_exist:
|
| 69 |
+
raise ConfigError("config file {} doesn't exist".format(filename))
|
| 70 |
+
logger.debug("no default config file found in {}".format(filename))
|
| 71 |
+
return None
|
| 72 |
+
try:
|
| 73 |
+
conf_file = open(filename)
|
| 74 |
+
except OSError:
|
| 75 |
+
raise ConfigError("Couldn't read config file: {}".format(filename))
|
| 76 |
+
try:
|
| 77 |
+
loaded = load_yaml(conf_file)
|
| 78 |
+
except yaml.parser.ParserError:
|
| 79 |
+
raise ConfigError("Invalid YAML in config yaml")
|
| 80 |
+
if loaded is None:
|
| 81 |
+
wandb.termwarn(
|
| 82 |
+
"Found an empty default config file (config-defaults.yaml). Proceeding with no defaults."
|
| 83 |
+
)
|
| 84 |
+
return None
|
| 85 |
+
config_version = loaded.pop("wandb_version", None)
|
| 86 |
+
if config_version is not None and config_version != 1:
|
| 87 |
+
raise ConfigError("Unknown config version")
|
| 88 |
+
data = dict()
|
| 89 |
+
for k, v in loaded.items():
|
| 90 |
+
data[k] = v["value"]
|
| 91 |
+
return data
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def merge_dicts(dest: dict, src: dict) -> dict:
|
| 95 |
+
"""Recursively merge two dictionaries. Similar to Lodash's _.merge()."""
|
| 96 |
+
for key, value in src.items():
|
| 97 |
+
if isinstance(value, dict) and key in dest and isinstance(dest[key], dict):
|
| 98 |
+
merge_dicts(dest[key], value)
|
| 99 |
+
else:
|
| 100 |
+
dest[key] = value
|
| 101 |
+
return dest
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/credentials.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from datetime import datetime, timedelta
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import requests.utils
|
| 7 |
+
|
| 8 |
+
from wandb.errors import AuthenticationError
|
| 9 |
+
|
| 10 |
+
DEFAULT_WANDB_CREDENTIALS_FILE = Path(
|
| 11 |
+
os.path.expanduser("~/.config/wandb/credentials.json")
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
_expires_at_fmt = "%Y-%m-%d %H:%M:%S"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def access_token(base_url: str, token_file: Path, credentials_file: Path) -> str:
|
| 18 |
+
"""Retrieve an access token from the credentials file.
|
| 19 |
+
|
| 20 |
+
If no access token exists, create a new one by exchanging the identity
|
| 21 |
+
token from the token file, and save it to the credentials file.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
base_url (str): The base URL of the server
|
| 25 |
+
token_file (pathlib.Path): The path to the file containing the
|
| 26 |
+
identity token
|
| 27 |
+
credentials_file (pathlib.Path): The path to file used to save
|
| 28 |
+
temporary access tokens
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
str: The access token
|
| 32 |
+
"""
|
| 33 |
+
if not credentials_file.exists():
|
| 34 |
+
_write_credentials_file(base_url, token_file, credentials_file)
|
| 35 |
+
|
| 36 |
+
data = _fetch_credentials(base_url, token_file, credentials_file)
|
| 37 |
+
return data["access_token"]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _write_credentials_file(base_url: str, token_file: Path, credentials_file: Path):
|
| 41 |
+
"""Obtain an access token from the server and write it to the credentials file.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
base_url (str): The base URL of the server
|
| 45 |
+
token_file (pathlib.Path): The path to the file containing the
|
| 46 |
+
identity token
|
| 47 |
+
credentials_file (pathlib.Path): The path to file used to save
|
| 48 |
+
temporary access tokens
|
| 49 |
+
"""
|
| 50 |
+
credentials = _create_access_token(base_url, token_file)
|
| 51 |
+
data = {"credentials": {base_url: credentials}}
|
| 52 |
+
with open(credentials_file, "w") as file:
|
| 53 |
+
json.dump(data, file, indent=4)
|
| 54 |
+
|
| 55 |
+
# Set file permissions to be read/write by the owner only
|
| 56 |
+
os.chmod(credentials_file, 0o600)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _fetch_credentials(base_url: str, token_file: Path, credentials_file: Path) -> dict:
|
| 60 |
+
"""Fetch the access token from the credentials file.
|
| 61 |
+
|
| 62 |
+
If the access token has expired, fetch a new one from the server and save it
|
| 63 |
+
to the credentials file.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
base_url (str): The base URL of the server
|
| 67 |
+
token_file (pathlib.Path): The path to the file containing the
|
| 68 |
+
identity token
|
| 69 |
+
credentials_file (pathlib.Path): The path to file used to save
|
| 70 |
+
temporary access tokens
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
dict: The credentials including the access token.
|
| 74 |
+
"""
|
| 75 |
+
creds = {}
|
| 76 |
+
with open(credentials_file) as file:
|
| 77 |
+
data = json.load(file)
|
| 78 |
+
if "credentials" not in data:
|
| 79 |
+
data["credentials"] = {}
|
| 80 |
+
if base_url in data["credentials"]:
|
| 81 |
+
creds = data["credentials"][base_url]
|
| 82 |
+
|
| 83 |
+
expires_at = datetime.utcnow()
|
| 84 |
+
if "expires_at" in creds:
|
| 85 |
+
expires_at = datetime.strptime(creds["expires_at"], _expires_at_fmt)
|
| 86 |
+
|
| 87 |
+
if expires_at <= datetime.utcnow():
|
| 88 |
+
creds = _create_access_token(base_url, token_file)
|
| 89 |
+
with open(credentials_file, "w") as file:
|
| 90 |
+
data["credentials"][base_url] = creds
|
| 91 |
+
json.dump(data, file, indent=4)
|
| 92 |
+
|
| 93 |
+
return creds
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _create_access_token(base_url: str, token_file: Path) -> dict:
|
| 97 |
+
"""Exchange an identity token for an access token from the server.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
base_url (str): The base URL of the server.
|
| 101 |
+
token_file (pathlib.Path): The path to the file containing the
|
| 102 |
+
identity token
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
dict: The access token and its expiration.
|
| 106 |
+
|
| 107 |
+
Raises:
|
| 108 |
+
FileNotFoundError: If the token file is not found.
|
| 109 |
+
OSError: If there is an issue reading the token file.
|
| 110 |
+
AuthenticationError: If the server fails to provide an access token.
|
| 111 |
+
"""
|
| 112 |
+
try:
|
| 113 |
+
with open(token_file) as file:
|
| 114 |
+
token = file.read().strip()
|
| 115 |
+
except FileNotFoundError as e:
|
| 116 |
+
raise FileNotFoundError(f"Identity token file not found: {token_file}") from e
|
| 117 |
+
except OSError as e:
|
| 118 |
+
raise OSError(
|
| 119 |
+
f"Failed to read the identity token from file: {token_file}"
|
| 120 |
+
) from e
|
| 121 |
+
|
| 122 |
+
url = f"{base_url}/oidc/token"
|
| 123 |
+
data = {
|
| 124 |
+
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
|
| 125 |
+
"assertion": token,
|
| 126 |
+
}
|
| 127 |
+
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
| 128 |
+
|
| 129 |
+
response = requests.post(url, data=data, headers=headers)
|
| 130 |
+
|
| 131 |
+
if response.status_code != 200:
|
| 132 |
+
raise AuthenticationError(
|
| 133 |
+
f"Failed to retrieve access token: {response.status_code}, {response.text}"
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
resp_json = response.json()
|
| 137 |
+
expires_at = datetime.utcnow() + timedelta(seconds=float(resp_json["expires_in"]))
|
| 138 |
+
resp_json["expires_at"] = expires_at.strftime(_expires_at_fmt)
|
| 139 |
+
del resp_json["expires_in"]
|
| 140 |
+
|
| 141 |
+
return resp_json
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/deprecate.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ["deprecate", "Deprecated"]
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import wandb
|
| 6 |
+
from wandb.proto.wandb_deprecated import DEPRECATED_FEATURES, Deprecated
|
| 7 |
+
from wandb.proto.wandb_telemetry_pb2 import Deprecated as TelemetryDeprecated
|
| 8 |
+
|
| 9 |
+
# avoid cycle, use string type reference
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from .. import wandb_run
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
deprecated_field_names: Tuple[str, ...] = tuple(
|
| 15 |
+
str(v) for k, v in Deprecated.__dict__.items() if not k.startswith("_")
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def deprecate(
|
| 20 |
+
field_name: DEPRECATED_FEATURES,
|
| 21 |
+
warning_message: str,
|
| 22 |
+
run: Optional["wandb_run.Run"] = None,
|
| 23 |
+
) -> None:
|
| 24 |
+
"""Warn the user that a feature has been deprecated.
|
| 25 |
+
|
| 26 |
+
Also stores the information about the event in telemetry.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
field_name: The name of the feature that has been deprecated.
|
| 30 |
+
Defined in wandb/proto/wandb_telemetry.proto::Deprecated
|
| 31 |
+
warning_message: The message to display to the user.
|
| 32 |
+
run: The run to whose telemetry the event will be added.
|
| 33 |
+
"""
|
| 34 |
+
known_fields = TelemetryDeprecated.DESCRIPTOR.fields_by_name.keys()
|
| 35 |
+
if field_name not in known_fields:
|
| 36 |
+
raise ValueError(
|
| 37 |
+
f"Unknown field name: {field_name}. Known fields: {known_fields}"
|
| 38 |
+
)
|
| 39 |
+
_run = run or wandb.run
|
| 40 |
+
with wandb.wandb_lib.telemetry.context(run=_run) as tel: # type: ignore[attr-defined]
|
| 41 |
+
setattr(tel.deprecated, field_name, True)
|
| 42 |
+
wandb.termwarn(warning_message, repeat=False)
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/disabled.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
from wandb.sdk.lib import deprecate
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class SummaryDisabled(dict):
|
| 7 |
+
__setattr__ = dict.__setitem__
|
| 8 |
+
__delattr__ = dict.__delitem__
|
| 9 |
+
|
| 10 |
+
def __getattr__(self, key):
|
| 11 |
+
return self[key]
|
| 12 |
+
|
| 13 |
+
def __getitem__(self, key):
|
| 14 |
+
val = dict.__getitem__(self, key)
|
| 15 |
+
if isinstance(val, dict) and not isinstance(val, SummaryDisabled):
|
| 16 |
+
val = SummaryDisabled(val)
|
| 17 |
+
self[key] = val
|
| 18 |
+
return val
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class RunDisabled:
|
| 22 |
+
"""Compatibility class for integrations that explicitly check for wandb.RunDisabled."""
|
| 23 |
+
|
| 24 |
+
def __getattr__(self, name: str) -> Any:
|
| 25 |
+
deprecate.deprecate(
|
| 26 |
+
field_name=deprecate.Deprecated.run_disabled,
|
| 27 |
+
warning_message="RunDisabled is deprecated and is a no-op. "
|
| 28 |
+
'`wandb.init(mode="disabled")` now returns and instance of `wandb.sdk.wandb_run.Run`.',
|
| 29 |
+
)
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import traceback
|
| 3 |
+
from types import TracebackType
|
| 4 |
+
from typing import TYPE_CHECKING, Optional, Type
|
| 5 |
+
|
| 6 |
+
import wandb
|
| 7 |
+
from wandb.errors import Error
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
from typing import NoReturn
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ExitHooks:
|
| 14 |
+
exception: Optional[BaseException] = None
|
| 15 |
+
|
| 16 |
+
def __init__(self) -> None:
|
| 17 |
+
self.exit_code = 0
|
| 18 |
+
self.exception = None
|
| 19 |
+
|
| 20 |
+
def hook(self) -> None:
|
| 21 |
+
self._orig_exit = sys.exit
|
| 22 |
+
sys.exit = self.exit
|
| 23 |
+
self._orig_excepthook = (
|
| 24 |
+
sys.excepthook
|
| 25 |
+
if sys.excepthook
|
| 26 |
+
!= sys.__excepthook__ # respect hooks by other libraries like pdb
|
| 27 |
+
else None
|
| 28 |
+
)
|
| 29 |
+
sys.excepthook = self.exc_handler # type: ignore
|
| 30 |
+
|
| 31 |
+
def exit(self, code: object = 0) -> "NoReturn":
|
| 32 |
+
orig_code = code
|
| 33 |
+
code = code if code is not None else 0
|
| 34 |
+
code = code if isinstance(code, int) else 1
|
| 35 |
+
self.exit_code = code
|
| 36 |
+
self._orig_exit(orig_code) # type: ignore
|
| 37 |
+
|
| 38 |
+
def was_ctrl_c(self) -> bool:
|
| 39 |
+
return isinstance(self.exception, KeyboardInterrupt)
|
| 40 |
+
|
| 41 |
+
def exc_handler(
|
| 42 |
+
self, exc_type: Type[BaseException], exc: BaseException, tb: TracebackType
|
| 43 |
+
) -> None:
|
| 44 |
+
self.exit_code = 1
|
| 45 |
+
self.exception = exc
|
| 46 |
+
if issubclass(exc_type, Error):
|
| 47 |
+
wandb.termerror(str(exc), repeat=False)
|
| 48 |
+
|
| 49 |
+
if self.was_ctrl_c():
|
| 50 |
+
self.exit_code = 255
|
| 51 |
+
|
| 52 |
+
traceback.print_exception(exc_type, exc, tb)
|
| 53 |
+
if self._orig_excepthook:
|
| 54 |
+
self._orig_excepthook(exc_type, exc, tb)
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/filenames.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Callable, Generator, Union
|
| 3 |
+
|
| 4 |
+
WANDB_DIRS = ("wandb", ".wandb")
|
| 5 |
+
|
| 6 |
+
CONFIG_FNAME = "config.yaml"
|
| 7 |
+
OUTPUT_FNAME = "output.log"
|
| 8 |
+
DIFF_FNAME = "diff.patch"
|
| 9 |
+
SUMMARY_FNAME = "wandb-summary.json"
|
| 10 |
+
METADATA_FNAME = "wandb-metadata.json"
|
| 11 |
+
REQUIREMENTS_FNAME = "requirements.txt"
|
| 12 |
+
HISTORY_FNAME = "wandb-history.jsonl"
|
| 13 |
+
EVENTS_FNAME = "wandb-events.jsonl"
|
| 14 |
+
JOBSPEC_FNAME = "wandb-jobspec.json"
|
| 15 |
+
CONDA_ENVIRONMENTS_FNAME = "conda-environment.yaml"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def is_wandb_file(name: str) -> bool:
|
| 19 |
+
return (
|
| 20 |
+
name.startswith("wandb")
|
| 21 |
+
or name == METADATA_FNAME
|
| 22 |
+
or name == CONFIG_FNAME
|
| 23 |
+
or name == REQUIREMENTS_FNAME
|
| 24 |
+
or name == OUTPUT_FNAME
|
| 25 |
+
or name == DIFF_FNAME
|
| 26 |
+
or name == CONDA_ENVIRONMENTS_FNAME
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def filtered_dir(
|
| 31 |
+
root: str,
|
| 32 |
+
include_fn: Union[Callable[[str, str], bool], Callable[[str], bool]],
|
| 33 |
+
exclude_fn: Union[Callable[[str, str], bool], Callable[[str], bool]],
|
| 34 |
+
) -> Generator[str, None, None]:
|
| 35 |
+
"""Simple generator to walk a directory."""
|
| 36 |
+
import inspect
|
| 37 |
+
|
| 38 |
+
# compatibility with old API, which didn't pass root
|
| 39 |
+
def _include_fn(path: str, root: str) -> bool:
|
| 40 |
+
return (
|
| 41 |
+
include_fn(path, root) # type: ignore
|
| 42 |
+
if len(inspect.signature(include_fn).parameters) == 2
|
| 43 |
+
else include_fn(path) # type: ignore
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def _exclude_fn(path: str, root: str) -> bool:
|
| 47 |
+
return (
|
| 48 |
+
exclude_fn(path, root) # type: ignore
|
| 49 |
+
if len(inspect.signature(exclude_fn).parameters) == 2
|
| 50 |
+
else exclude_fn(path) # type: ignore
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
for dirpath, _, files in os.walk(root):
|
| 54 |
+
for fname in files:
|
| 55 |
+
file_path = os.path.join(dirpath, fname)
|
| 56 |
+
if _include_fn(file_path, root) and not _exclude_fn(file_path, root):
|
| 57 |
+
yield file_path
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def exclude_wandb_fn(path: str, root: str) -> bool:
|
| 61 |
+
return any(
|
| 62 |
+
os.path.relpath(path, root).startswith(wandb_dir + os.sep)
|
| 63 |
+
for wandb_dir in WANDB_DIRS
|
| 64 |
+
)
|
parrot/lib/python3.10/site-packages/wandb/sdk/lib/filesystem.py
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import ctypes
|
| 3 |
+
import errno
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import platform
|
| 7 |
+
import re
|
| 8 |
+
import shutil
|
| 9 |
+
import tempfile
|
| 10 |
+
import threading
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import IO, Any, BinaryIO, Generator, Optional
|
| 13 |
+
|
| 14 |
+
from wandb.sdk.lib.paths import StrPath
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# https://en.wikipedia.org/wiki/Filename#Comparison_of_filename_limitations
|
| 19 |
+
PROBLEMATIC_PATH_CHARS = "".join(chr(i) for i in range(0, 32)) + ':"*<>?|'
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def mkdir_exists_ok(dir_name: StrPath) -> None:
|
| 23 |
+
"""Create `dir_name` and any parent directories if they don't exist.
|
| 24 |
+
|
| 25 |
+
Raises:
|
| 26 |
+
FileExistsError: if `dir_name` exists and is not a directory.
|
| 27 |
+
PermissionError: if `dir_name` is not writable.
|
| 28 |
+
"""
|
| 29 |
+
try:
|
| 30 |
+
os.makedirs(dir_name, exist_ok=True)
|
| 31 |
+
except FileExistsError as e:
|
| 32 |
+
raise FileExistsError(f"{dir_name!s} exists and is not a directory") from e
|
| 33 |
+
except PermissionError as e:
|
| 34 |
+
raise PermissionError(f"{dir_name!s} is not writable") from e
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def path_fallbacks(path: StrPath) -> Generator[str, None, None]:
|
| 38 |
+
"""Yield variations of `path` that may exist on the filesystem.
|
| 39 |
+
|
| 40 |
+
Return a sequence of paths that should be checked in order for existence or
|
| 41 |
+
create-ability. Essentially, keep replacing "suspect" characters until we run out.
|
| 42 |
+
"""
|
| 43 |
+
path = str(path)
|
| 44 |
+
root, tail = os.path.splitdrive(path)
|
| 45 |
+
yield os.path.join(root, tail)
|
| 46 |
+
for char in PROBLEMATIC_PATH_CHARS:
|
| 47 |
+
if char in tail:
|
| 48 |
+
tail = tail.replace(char, "-")
|
| 49 |
+
yield os.path.join(root, tail)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def mkdir_allow_fallback(dir_name: StrPath) -> StrPath:
|
| 53 |
+
"""Create `dir_name`, removing invalid path characters if necessary.
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
The path to the created directory, which may not be the original path.
|
| 57 |
+
"""
|
| 58 |
+
for new_name in path_fallbacks(dir_name):
|
| 59 |
+
try:
|
| 60 |
+
os.makedirs(new_name, exist_ok=True)
|
| 61 |
+
if Path(new_name) != Path(dir_name):
|
| 62 |
+
logger.warning(f"Creating '{new_name}' instead of '{dir_name}'")
|
| 63 |
+
return Path(new_name) if isinstance(dir_name, Path) else new_name
|
| 64 |
+
except (ValueError, NotADirectoryError):
|
| 65 |
+
pass
|
| 66 |
+
except OSError as e:
|
| 67 |
+
if e.errno != 22:
|
| 68 |
+
raise
|
| 69 |
+
|
| 70 |
+
raise OSError(f"Unable to create directory '{dir_name}'")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def files_in(path: StrPath) -> Generator[os.DirEntry, None, None]:
|
| 74 |
+
"""Yield a directory entry for each file under a given path (recursive)."""
|
| 75 |
+
if not os.path.isdir(path):
|
| 76 |
+
return
|
| 77 |
+
for entry in os.scandir(path):
|
| 78 |
+
if entry.is_dir():
|
| 79 |
+
yield from files_in(entry.path)
|
| 80 |
+
else:
|
| 81 |
+
yield entry
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class WriteSerializingFile:
|
| 85 |
+
"""Wrapper for a file object that serializes writes."""
|
| 86 |
+
|
| 87 |
+
def __init__(self, f: BinaryIO) -> None:
|
| 88 |
+
self.lock = threading.Lock()
|
| 89 |
+
self.f = f
|
| 90 |
+
|
| 91 |
+
def write(self, *args, **kargs) -> None: # type: ignore
|
| 92 |
+
self.lock.acquire()
|
| 93 |
+
try:
|
| 94 |
+
self.f.write(*args, **kargs)
|
| 95 |
+
self.f.flush()
|
| 96 |
+
finally:
|
| 97 |
+
self.lock.release()
|
| 98 |
+
|
| 99 |
+
def close(self) -> None:
|
| 100 |
+
self.lock.acquire() # wait for pending writes
|
| 101 |
+
try:
|
| 102 |
+
self.f.close()
|
| 103 |
+
finally:
|
| 104 |
+
self.lock.release()
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class CRDedupedFile(WriteSerializingFile):
|
| 108 |
+
def __init__(self, f: BinaryIO) -> None:
|
| 109 |
+
super().__init__(f=f)
|
| 110 |
+
self._buff = b""
|
| 111 |
+
|
| 112 |
+
def write(self, data) -> None: # type: ignore
|
| 113 |
+
lines = re.split(b"\r\n|\n", data)
|
| 114 |
+
ret = [] # type: ignore
|
| 115 |
+
for line in lines:
|
| 116 |
+
if line[:1] == b"\r":
|
| 117 |
+
if ret:
|
| 118 |
+
ret.pop()
|
| 119 |
+
elif self._buff:
|
| 120 |
+
self._buff = b""
|
| 121 |
+
line = line.split(b"\r")[-1]
|
| 122 |
+
if line:
|
| 123 |
+
ret.append(line)
|
| 124 |
+
if self._buff:
|
| 125 |
+
ret.insert(0, self._buff)
|
| 126 |
+
if ret:
|
| 127 |
+
self._buff = ret.pop()
|
| 128 |
+
super().write(b"\n".join(ret) + b"\n")
|
| 129 |
+
|
| 130 |
+
def close(self) -> None:
|
| 131 |
+
if self._buff:
|
| 132 |
+
super().write(self._buff)
|
| 133 |
+
super().close()
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def copy_or_overwrite_changed(source_path: StrPath, target_path: StrPath) -> StrPath:
|
| 137 |
+
"""Copy source_path to target_path, unless it already exists with the same mtime.
|
| 138 |
+
|
| 139 |
+
We liberally add write permissions to deal with the case of multiple users needing
|
| 140 |
+
to share the same cache or run directory.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
source_path: The path to the file to copy.
|
| 144 |
+
target_path: The path to copy the file to.
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
The path to the copied file (which may be different from target_path).
|
| 148 |
+
"""
|
| 149 |
+
return_type = type(target_path)
|
| 150 |
+
|
| 151 |
+
target_path = system_preferred_path(target_path, warn=True)
|
| 152 |
+
|
| 153 |
+
need_copy = (
|
| 154 |
+
not os.path.isfile(target_path)
|
| 155 |
+
or os.stat(source_path).st_mtime != os.stat(target_path).st_mtime
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
permissions_plus_write = os.stat(source_path).st_mode
|
| 159 |
+
if need_copy:
|
| 160 |
+
dir_name, file_name = os.path.split(target_path)
|
| 161 |
+
target_path = os.path.join(mkdir_allow_fallback(dir_name), file_name)
|
| 162 |
+
try:
|
| 163 |
+
# Use copy2 to preserve file metadata (including modified time).
|
| 164 |
+
shutil.copy2(source_path, target_path)
|
| 165 |
+
except PermissionError:
|
| 166 |
+
# If the file is read-only try to make it writable.
|
| 167 |
+
try:
|
| 168 |
+
os.chmod(target_path, permissions_plus_write)
|
| 169 |
+
shutil.copy2(source_path, target_path)
|
| 170 |
+
except PermissionError as e:
|
| 171 |
+
raise PermissionError("Unable to overwrite '{target_path!s}'") from e
|
| 172 |
+
# Prevent future permissions issues by universal write permissions now.
|
| 173 |
+
os.chmod(target_path, permissions_plus_write)
|
| 174 |
+
|
| 175 |
+
return return_type(target_path) # type: ignore # 'os.PathLike' is abstract.
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@contextlib.contextmanager
|
| 179 |
+
def safe_open(
|
| 180 |
+
path: StrPath, mode: str = "r", *args: Any, **kwargs: Any
|
| 181 |
+
) -> Generator[IO, None, None]:
|
| 182 |
+
"""Open a file, ensuring any changes only apply atomically after close.
|
| 183 |
+
|
| 184 |
+
This context manager ensures that even unsuccessful writes will not leave a "dirty"
|
| 185 |
+
file or overwrite good data, and that all temp data is cleaned up.
|
| 186 |
+
|
| 187 |
+
The semantics and behavior are intended to be nearly identical to the built-in
|
| 188 |
+
open() function. Differences:
|
| 189 |
+
- It creates any parent directories that don't exist, rather than raising.
|
| 190 |
+
- In 'x' mode, it checks at the beginning AND end of the write and fails if the
|
| 191 |
+
file exists either time.
|
| 192 |
+
"""
|
| 193 |
+
path = Path(path).resolve()
|
| 194 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 195 |
+
|
| 196 |
+
if "x" in mode and path.exists():
|
| 197 |
+
raise FileExistsError(f"{path!s} already exists")
|
| 198 |
+
|
| 199 |
+
if "r" in mode and "+" not in mode:
|
| 200 |
+
# This is read-only, so we can just open the original file.
|
| 201 |
+
# TODO (hugh): create a reflink and read from that.
|
| 202 |
+
with path.open(mode, *args, **kwargs) as f:
|
| 203 |
+
yield f
|
| 204 |
+
return
|
| 205 |
+
|
| 206 |
+
with tempfile.TemporaryDirectory(dir=path.parent) as tmp_dir:
|
| 207 |
+
tmp_path = Path(tmp_dir) / path.name
|
| 208 |
+
|
| 209 |
+
if ("r" in mode or "a" in mode) and path.exists():
|
| 210 |
+
# We need to copy the original file in order to support reads and appends.
|
| 211 |
+
# TODO (hugh): use reflinks to avoid the copy on platforms that support it.
|
| 212 |
+
shutil.copy2(path, tmp_path)
|
| 213 |
+
|
| 214 |
+
with tmp_path.open(mode, *args, **kwargs) as f:
|
| 215 |
+
yield f
|
| 216 |
+
f.flush()
|
| 217 |
+
os.fsync(f.fileno())
|
| 218 |
+
|
| 219 |
+
if "x" in mode:
|
| 220 |
+
# Ensure that if another process has beaten us to writing the file we raise
|
| 221 |
+
# rather than overwrite. os.link() atomically creates a hard link to the
|
| 222 |
+
# target file and will raise FileExistsError if the target already exists.
|
| 223 |
+
os.link(tmp_path, path)
|
| 224 |
+
os.unlink(tmp_path)
|
| 225 |
+
else:
|
| 226 |
+
tmp_path.replace(path)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def safe_copy(source_path: StrPath, target_path: StrPath) -> StrPath:
|
| 230 |
+
"""Copy a file atomically.
|
| 231 |
+
|
| 232 |
+
Copying is not usually atomic, and on operating systems that allow multiple
|
| 233 |
+
writers to the same file, the result can get corrupted. If two writers copy
|
| 234 |
+
to the same file, the contents can become interleaved.
|
| 235 |
+
|
| 236 |
+
We mitigate the issue somewhat by copying to a temporary file first and
|
| 237 |
+
then renaming. Renaming is atomic: if process 1 renames file A to X and
|
| 238 |
+
process 2 renames file B to X, then X will either contain the contents
|
| 239 |
+
of A or the contents of B, not some mixture of both.
|
| 240 |
+
"""
|
| 241 |
+
# TODO (hugh): check that there is enough free space.
|
| 242 |
+
output_path = Path(target_path).resolve()
|
| 243 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 244 |
+
with tempfile.TemporaryDirectory(dir=output_path.parent) as tmp_dir:
|
| 245 |
+
tmp_path = (Path(tmp_dir) / Path(source_path).name).with_suffix(".tmp")
|
| 246 |
+
shutil.copy2(source_path, tmp_path)
|
| 247 |
+
tmp_path.replace(output_path)
|
| 248 |
+
return target_path
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def _reflink_linux(existing_path: Path, new_path: Path) -> None:
|
| 252 |
+
"""Create a reflink to `existing_path` at `new_path` on Linux."""
|
| 253 |
+
import fcntl
|
| 254 |
+
|
| 255 |
+
FICLONE = 0x40049409 # magic number from <linux/fs.h> # noqa: N806
|
| 256 |
+
with open(existing_path, "rb") as t_f, open(new_path, "wb+") as l_f:
|
| 257 |
+
fcntl.ioctl(l_f.fileno(), FICLONE, t_f.fileno())
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def _reflink_macos(existing_path: Path, new_path: Path) -> None:
|
| 261 |
+
try:
|
| 262 |
+
clib = ctypes.CDLL("libc.dylib", use_errno=True)
|
| 263 |
+
except (FileNotFoundError, OSError) as e:
|
| 264 |
+
if ctypes.get_errno() != errno.ENOENT and not isinstance(e, FileNotFoundError):
|
| 265 |
+
raise
|
| 266 |
+
# Before macOS 11 (<Nov 2020) clib was in libSystem.dylib, so we can try there.
|
| 267 |
+
clib = ctypes.CDLL("/usr/lib/libSystem.dylib", use_errno=True)
|
| 268 |
+
|
| 269 |
+
try:
|
| 270 |
+
clonefile = clib.clonefile
|
| 271 |
+
except AttributeError:
|
| 272 |
+
raise OSError(errno.ENOTSUP, "'clonefile' is not available on this system")
|
| 273 |
+
|
| 274 |
+
clonefile.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int)
|
| 275 |
+
clonefile.restype = ctypes.c_int
|
| 276 |
+
|
| 277 |
+
if clonefile(os.fsencode(existing_path), os.fsencode(new_path), ctypes.c_int(0)):
|
| 278 |
+
# Anything other than 0 is an error.
|
| 279 |
+
err = ctypes.get_errno()
|
| 280 |
+
raise OSError(err, os.strerror(err), existing_path)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def reflink(existing_path: StrPath, new_path: StrPath, overwrite: bool = False) -> None:
|
| 284 |
+
"""Create a reflink to `existing_path` at `new_path`.
|
| 285 |
+
|
| 286 |
+
A reflink (reflective link) is a copy-on-write reference to a file. Once linked, the
|
| 287 |
+
file and link are both "real" files (not symbolic or hard links) and each can be
|
| 288 |
+
modified independently without affecting the other; however, they share the same
|
| 289 |
+
underlying data blocks on disk so until one is modified they are "zero-cost" copies.
|
| 290 |
+
|
| 291 |
+
Reflinks have all the functionality of copies, so we should use them wherever they
|
| 292 |
+
are supported if we would otherwise copy a file. (This is not particularly radical--
|
| 293 |
+
GNU `cp` defaults to `reflink=auto`, using it whenever available) However, support
|
| 294 |
+
for them is limited to a small number of filesystems. They should work on:
|
| 295 |
+
- Linux with a Btrfs or XFS filesystem (NOT ext4)
|
| 296 |
+
- macOS 10.13 or later with an APFS filesystem (called clone files)
|
| 297 |
+
|
| 298 |
+
Reflinks are also supported on Solaris and Windows with ReFSv2, but we haven't
|
| 299 |
+
implemented support for them.
|
| 300 |
+
|
| 301 |
+
Like hard links, a reflink can only be created on the same filesystem as the target.
|
| 302 |
+
"""
|
| 303 |
+
if platform.system() == "Linux":
|
| 304 |
+
link_fn = _reflink_linux
|
| 305 |
+
elif platform.system() == "Darwin":
|
| 306 |
+
link_fn = _reflink_macos
|
| 307 |
+
else:
|
| 308 |
+
raise OSError(
|
| 309 |
+
errno.ENOTSUP, f"reflinks are not supported on {platform.system()}"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
new_path = Path(new_path).resolve()
|
| 313 |
+
existing_path = Path(existing_path).resolve()
|
| 314 |
+
if new_path.exists():
|
| 315 |
+
if not overwrite:
|
| 316 |
+
raise FileExistsError(f"{new_path} already exists")
|
| 317 |
+
logger.warning(f"Overwriting existing file {new_path}.")
|
| 318 |
+
new_path.unlink()
|
| 319 |
+
|
| 320 |
+
# Create any missing parent directories.
|
| 321 |
+
new_path.parent.mkdir(parents=True, exist_ok=True)
|
| 322 |
+
|
| 323 |
+
try:
|
| 324 |
+
link_fn(existing_path, new_path)
|
| 325 |
+
except OSError as e:
|
| 326 |
+
base_msg = f"failed to create reflink from {existing_path} to {new_path}."
|
| 327 |
+
if e.errno in (errno.EPERM, errno.EACCES):
|
| 328 |
+
raise PermissionError(f"Insufficient permissions; {base_msg}") from e
|
| 329 |
+
if e.errno == errno.ENOENT:
|
| 330 |
+
raise FileNotFoundError(f"File not found; {base_msg}") from e
|
| 331 |
+
if e.errno == errno.EXDEV:
|
| 332 |
+
raise ValueError(f"Cannot link across filesystems; {base_msg}") from e
|
| 333 |
+
if e.errno == errno.EISDIR:
|
| 334 |
+
raise IsADirectoryError(f"Cannot reflink a directory; {base_msg}") from e
|
| 335 |
+
if e.errno in (errno.EOPNOTSUPP, errno.ENOTSUP):
|
| 336 |
+
raise OSError(
|
| 337 |
+
errno.ENOTSUP,
|
| 338 |
+
f"Filesystem does not support reflinks; {base_msg}",
|
| 339 |
+
) from e
|
| 340 |
+
if e.errno == errno.EINVAL:
|
| 341 |
+
raise ValueError(f"Cannot link file ranges; {base_msg}") from e
|
| 342 |
+
raise
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def check_exists(path: StrPath) -> Optional[StrPath]:
|
| 346 |
+
"""Look for variations of `path` and return the first found.
|
| 347 |
+
|
| 348 |
+
This exists to support former behavior around system-dependent paths; we used to use
|
| 349 |
+
':' in Artifact paths unless we were on Windows, but this has issues when e.g. a
|
| 350 |
+
Linux machine is accessing an NTFS filesystem; we might need to look for the
|
| 351 |
+
alternate path. This checks all the possible directories we would consider creating.
|
| 352 |
+
"""
|
| 353 |
+
for dest in path_fallbacks(path):
|
| 354 |
+
if os.path.exists(dest):
|
| 355 |
+
return Path(dest) if isinstance(path, Path) else dest
|
| 356 |
+
return None
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def system_preferred_path(path: StrPath, warn: bool = False) -> StrPath:
|
| 360 |
+
"""Replace ':' with '-' in paths on Windows.
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
path: The path to convert.
|
| 364 |
+
warn: Whether to warn if ':' is replaced.
|
| 365 |
+
"""
|
| 366 |
+
if platform.system() != "Windows":
|
| 367 |
+
return path
|
| 368 |
+
head, tail = os.path.splitdrive(path)
|
| 369 |
+
if warn and ":" in tail:
|
| 370 |
+
logger.warning(f"Replacing ':' in {tail} with '-'")
|
| 371 |
+
new_path = head + tail.replace(":", "-")
|
| 372 |
+
return Path(new_path) if isinstance(path, Path) else new_path
|