diff --git a/.gitattributes b/.gitattributes index b2a3bdd2b9a4737e1e26ec22477e187d0a9baa45..511b1905ec7ba6679e5bf351c2256f0c87012da2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -146,3 +146,4 @@ parrot/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8be837b4c3cd2970a9ad5a9bcfa263ec16176d9a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178e9547694aa62399bc0cf44c530a767396b5da9344416107b76c83bfe05d02 +size 364184 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f79d8c43165fd91302636899303aacf7bea30edc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/job_builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/job_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dee2df351c2636467cd3ae1e520651e34cd1aa8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/job_builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/profiler.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44d0e5793054df7dbbc67b557376be9dc281fe01 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/profiler.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/sender.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/sender.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa3bc8df352490d0a21454f5ae179a47825cd65f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/sender.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/settings_static.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/settings_static.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49b34726f56b3b159b921e7146e656c86a3f1e0f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/settings_static.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/writer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf2cfb00213d47383254f317cf0467e6006c3b8e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/writer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/abstract.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/abstract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9230f2bd61dfccbe4f0f899f87148e287d06df23 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/abstract.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/build.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6435c12538c6517981e57a07d07b21266cf165cf Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/build.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/context_manager.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/context_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87f7756270fcf2ff711e28bff62a7c42b9fa4512 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/context_manager.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/kaniko_builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/kaniko_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e83613f0b2fc56ba916682c6282a0897c5e0f3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/kaniko_builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/noop.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/noop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85f308fdefa621b3cad2991df20c5aebabed54a8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/noop.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/build.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/build.py new file mode 100644 index 0000000000000000000000000000000000000000..ad42414e2e137c49e55a5e56b3d2457b63ab0cb9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/build.py @@ -0,0 +1,297 @@ +import hashlib +import json +import logging +import os +import pathlib +import shlex +from typing import Any, Dict, List, Tuple + +from dockerpycreds.utils import find_executable # type: ignore + +import wandb +import wandb.env +from wandb import docker +from wandb.apis.internal import Api +from wandb.sdk.launch.loader import ( + builder_from_config, + environment_from_config, + registry_from_config, +) +from wandb.util import get_module + +from .._project_spec import EntryPoint, LaunchProject +from ..errors import ExecutionError, LaunchError +from ..utils import LOG_PREFIX, event_loop_thread_exec +from .templates.dockerfile import ( + ACCELERATOR_SETUP_TEMPLATE, + ENTRYPOINT_TEMPLATE, + PIP_TEMPLATE, + PYTHON_SETUP_TEMPLATE, + USER_CREATE_TEMPLATE, +) + +_logger = logging.getLogger(__name__) + + +_WANDB_DOCKERFILE_NAME = "Dockerfile.wandb" + + +async def validate_docker_installation() -> None: + """Verify if Docker is installed on host machine.""" + find_exec = event_loop_thread_exec(find_executable) + if not await find_exec("docker"): + raise ExecutionError( + "Could not find Docker executable. " + "Ensure Docker is installed as per the instructions " + "at https://docs.docker.com/install/overview/." + ) + + +def join(split_command: List[str]) -> str: + """Return a shell-escaped string from *split_command*. + + Also remove quotes from double quoted strings. Ex: + "'local container queue'" --> "local container queue" + """ + return " ".join(shlex.quote(arg.replace("'", "")) for arg in split_command) + + +async def build_image_from_project( + launch_project: LaunchProject, + api: Api, + launch_config: Dict[str, Any], +) -> str: + """Construct a docker image from a project and returns the URI of the image. + + Arguments: + launch_project: The project to build an image from. + api: The API object to use for fetching the project. + launch_config: The launch config to use for building the image. + + Returns: + The URI of the built image. + """ + assert launch_project.uri, "To build an image on queue a URI must be set." + launch_config = launch_config or {} + env_config = launch_config.get("environment", {}) + if not isinstance(env_config, dict): + wrong_type = type(env_config).__name__ + raise LaunchError( + f"Invalid environment config: {env_config} of type {wrong_type} " + "loaded from launch config. Expected dict." + ) + environment = environment_from_config(env_config) + + registry_config = launch_config.get("registry", {}) + if not isinstance(registry_config, dict): + wrong_type = type(registry_config).__name__ + raise LaunchError( + f"Invalid registry config: {registry_config} of type {wrong_type}" + " loaded from launch config. Expected dict." + ) + registry = registry_from_config(registry_config, environment) + + builder_config = launch_config.get("builder", {}) + if not isinstance(builder_config, dict): + wrong_type = type(builder_config).__name__ + raise LaunchError( + f"Invalid builder config: {builder_config} of type {wrong_type} " + "loaded from launch config. Expected dict." + ) + builder = builder_from_config(builder_config, environment, registry) + + if not builder: + raise LaunchError("Unable to build image. No builder found.") + + launch_project.fetch_and_validate_project() + + entry_point = ( + launch_project.get_job_entry_point() or launch_project.override_entrypoint + ) + assert entry_point is not None + wandb.termlog(f"{LOG_PREFIX}Building docker image from uri source") + image_uri = await builder.build_image(launch_project, entry_point) + if not image_uri: + raise LaunchError("Error building image uri") + else: + return image_uri + + +def image_tag_from_dockerfile_and_source( + launch_project: LaunchProject, dockerfile_contents: str +) -> str: + """Hashes the source and dockerfile contents into a unique tag.""" + image_source_string = launch_project.get_image_source_string() + unique_id_string = image_source_string + dockerfile_contents + image_tag = hashlib.sha256(unique_id_string.encode("utf-8")).hexdigest()[:8] + return image_tag + + +def get_docker_user(launch_project: LaunchProject, runner_type: str) -> Tuple[str, int]: + import getpass + + username = getpass.getuser() + + if runner_type == "sagemaker" and not launch_project.docker_image: + # unless user has provided their own image, sagemaker must run as root but keep the name for workdir etc + return username, 0 + + userid = launch_project.docker_user_id or os.geteuid() + return username, userid + + +def get_base_setup( + launch_project: LaunchProject, py_version: str, py_major: str +) -> str: + """Fill in the Dockerfile templates for stage 2 of build. + + CPU version is built on python, Accelerator version is built on user provided. + """ + minor = int(py_version.split(".")[1]) + if minor < 12: + python_base_image = f"python:{py_version}-buster" + else: + python_base_image = f"python:{py_version}-bookworm" + if launch_project.accelerator_base_image: + _logger.info( + f"Using accelerator base image: {launch_project.accelerator_base_image}" + ) + python_packages = [ + f"python{py_version}", + f"libpython{py_version}", + "python3-pip", + "python3-setuptools", + ] + base_setup = ACCELERATOR_SETUP_TEMPLATE.format( + accelerator_base_image=launch_project.accelerator_base_image, + python_packages=" \\\n".join(python_packages), + py_version=py_version, + ) + else: + python_packages = [ + "python3-dev", + "gcc", + ] # gcc required for python < 3.7 for some reason + base_setup = PYTHON_SETUP_TEMPLATE.format(py_base_image=python_base_image) + return base_setup + + +# Move this into the build context manager. +def get_requirements_section( + launch_project: LaunchProject, build_context_dir: str, builder_type: str +) -> str: + if builder_type == "docker": + buildx_installed = docker.is_buildx_installed() + if not buildx_installed: + wandb.termwarn( + "Docker BuildX is not installed, for faster builds upgrade docker: https://github.com/docker/buildx#installing" + ) + prefix = "RUN WANDB_DISABLE_CACHE=true" + elif builder_type == "kaniko": + prefix = "RUN WANDB_DISABLE_CACHE=true" + buildx_installed = False + + if buildx_installed: + prefix = "RUN --mount=type=cache,mode=0777,target=/root/.cache/pip" + + requirements_files = [] + deps_install_line = None + + base_path = pathlib.Path(build_context_dir) + # If there is a requirements.txt at root of build context, use that. + if (base_path / "src" / "requirements.txt").exists(): + requirements_files += ["src/requirements.txt"] + deps_install_line = "pip install uv && uv pip install -r requirements.txt" + with open(base_path / "src" / "requirements.txt") as f: + requirements = f.readlines() + if not any(["wandb" in r for r in requirements]): + wandb.termwarn(f"{LOG_PREFIX}wandb is not present in requirements.txt.") + return PIP_TEMPLATE.format( + buildx_optional_prefix=prefix, + requirements_files=" ".join(requirements_files), + pip_install=deps_install_line, + ) + + # Elif there is pyproject.toml at build context, convert the dependencies + # section to a requirements.txt and use that. + elif (base_path / "src" / "pyproject.toml").exists(): + tomli = get_module("tomli") + if tomli is None: + wandb.termwarn( + "pyproject.toml found but tomli could not be loaded. To " + "install dependencies from pyproject.toml please run " + "`pip install tomli` and try again." + ) + else: + # First try to read deps from standard pyproject format. + with open(base_path / "src" / "pyproject.toml", "rb") as f: + contents = tomli.load(f) + project_deps = [ + str(d) for d in contents.get("project", {}).get("dependencies", []) + ] + if project_deps: + if not any(["wandb" in d for d in project_deps]): + wandb.termwarn( + f"{LOG_PREFIX}wandb is not present as a dependency in pyproject.toml." + ) + with open(base_path / "src" / "requirements.txt", "w") as f: + f.write("\n".join(project_deps)) + requirements_files += ["src/requirements.txt"] + deps_install_line = ( + "pip install uv && uv pip install -r requirements.txt" + ) + return PIP_TEMPLATE.format( + buildx_optional_prefix=prefix, + requirements_files=" ".join(requirements_files), + pip_install=deps_install_line, + ) + + # Else use frozen requirements from wandb run. + if ( + not deps_install_line + and (base_path / "src" / "requirements.frozen.txt").exists() + ): + requirements_files += [ + "src/requirements.frozen.txt", + "_wandb_bootstrap.py", + ] + deps_install_line = ( + launch_project.parse_existing_requirements() + "python _wandb_bootstrap.py" + ) + + if not deps_install_line: + raise LaunchError(f"No dependency sources found for {launch_project}") + + with open(base_path / "src" / "requirements.frozen.txt") as f: + requirements = f.readlines() + if not any(["wandb" in r for r in requirements]): + wandb.termwarn( + f"{LOG_PREFIX}wandb is not present in requirements.frozen.txt." + ) + + return PIP_TEMPLATE.format( + buildx_optional_prefix=prefix, + requirements_files=" ".join(requirements_files), + pip_install=deps_install_line, + ) + + else: + # this means no deps file was found + requirements_line = "RUN mkdir -p env/" # Docker fails otherwise + wandb.termwarn("No requirements file found. No packages will be installed.") + return requirements_line + + +def get_user_setup(username: str, userid: int, runner_type: str) -> str: + if runner_type == "sagemaker": + # sagemaker must run as root + return "USER root" + user_create = USER_CREATE_TEMPLATE.format(uid=userid, user=username) + user_create += f"\nUSER {username}" + return user_create + + +def get_entrypoint_setup( + entry_point: EntryPoint, +) -> str: + return ENTRYPOINT_TEMPLATE.format(entrypoint=json.dumps(entry_point.command)) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/docker_builder.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/docker_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..d764ba005e38f3346452124816c3b2120ea81fdf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/docker_builder.py @@ -0,0 +1,177 @@ +"""Implementation of the docker builder.""" + +import logging +import os +from typing import Any, Dict, Optional + +import wandb +import wandb.docker as docker +from wandb.sdk.launch.agent.job_status_tracker import JobAndRunStatusTracker +from wandb.sdk.launch.builder.abstract import AbstractBuilder, registry_from_uri +from wandb.sdk.launch.environment.abstract import AbstractEnvironment +from wandb.sdk.launch.registry.abstract import AbstractRegistry + +from .._project_spec import EntryPoint, LaunchProject +from ..errors import LaunchDockerError, LaunchError +from ..registry.anon import AnonynmousRegistry +from ..registry.local_registry import LocalRegistry +from ..utils import ( + LOG_PREFIX, + event_loop_thread_exec, + warn_failed_packages_from_build_logs, +) +from .build import _WANDB_DOCKERFILE_NAME, validate_docker_installation +from .context_manager import BuildContextManager + +_logger = logging.getLogger(__name__) + + +class DockerBuilder(AbstractBuilder): + """Builds a docker image for a project. + + Attributes: + builder_config (Dict[str, Any]): The builder config. + + """ + + builder_type = "docker" + target_platform = "linux/amd64" + + def __init__( + self, + environment: AbstractEnvironment, + registry: AbstractRegistry, + config: Dict[str, Any], + ): + """Initialize a DockerBuilder. + + Arguments: + environment (AbstractEnvironment): The environment to use. + registry (AbstractRegistry): The registry to use. + + Raises: + LaunchError: If docker is not installed + """ + self.environment = environment # Docker builder doesn't actually use this. + self.registry = registry + self.config = config + + @classmethod + def from_config( + cls, + config: Dict[str, Any], + environment: AbstractEnvironment, + registry: AbstractRegistry, + ) -> "DockerBuilder": + """Create a DockerBuilder from a config. + + Arguments: + config (Dict[str, Any]): The config. + registry (AbstractRegistry): The registry to use. + verify (bool, optional): Whether to verify the functionality of the builder. + login (bool, optional): Whether to login to the registry. + + Returns: + DockerBuilder: The DockerBuilder. + """ + # If the user provided a destination URI in the builder config + # we use that as the registry. + image_uri = config.get("destination") + if image_uri: + if registry is not None: + wandb.termwarn( + f"{LOG_PREFIX}Overriding registry from registry config" + f" with {image_uri} from builder config." + ) + registry = registry_from_uri(image_uri) + + return cls(environment, registry, config) + + async def verify(self) -> None: + """Verify the builder.""" + await validate_docker_installation() + + async def login(self) -> None: + """Login to the registry.""" + if isinstance(self.registry, LocalRegistry): + _logger.info(f"{LOG_PREFIX}No registry configured, skipping login.") + elif isinstance(self.registry, AnonynmousRegistry): + _logger.info(f"{LOG_PREFIX}Anonymous registry, skipping login.") + else: + username, password = await self.registry.get_username_password() + login = event_loop_thread_exec(docker.login) + await login(username, password, self.registry.uri) + + async def build_image( + self, + launch_project: LaunchProject, + entrypoint: EntryPoint, + job_tracker: Optional[JobAndRunStatusTracker] = None, + ) -> str: + """Build the image for the given project. + + Arguments: + launch_project (LaunchProject): The project to build. + entrypoint (EntryPoint): The entrypoint to use. + """ + await self.verify() + await self.login() + + build_context_manager = BuildContextManager(launch_project=launch_project) + build_ctx_path, image_tag = build_context_manager.create_build_context("docker") + dockerfile = os.path.join(build_ctx_path, _WANDB_DOCKERFILE_NAME) + repository = None if not self.registry else await self.registry.get_repo_uri() + + # if repo is set, use the repo name as the image name + if repository: + image_uri = f"{repository}:{image_tag}" + # otherwise, base the image name off of the source + # which the launch_project checks in image_name + else: + image_uri = f"{launch_project.image_name}:{image_tag}" + + if ( + not launch_project.build_required() + and await self.registry.check_image_exists(image_uri) + ): + return image_uri + + _logger.info( + f"image {image_uri} does not already exist in repository, building." + ) + try: + output = await event_loop_thread_exec(docker.build)( + tags=[image_uri], + file=dockerfile, + context_path=build_ctx_path, + platform=self.config.get("platform"), + ) + + warn_failed_packages_from_build_logs( + output, image_uri, launch_project.api, job_tracker + ) + + except docker.DockerError as e: + if job_tracker: + job_tracker.set_err_stage("build") + raise LaunchDockerError(f"Error communicating with docker client: {e}") + + try: + os.remove(build_ctx_path) + except Exception: + _msg = f"{LOG_PREFIX}Temporary docker context file {build_ctx_path} was not deleted." + _logger.info(_msg) + + if repository: + reg, tag = image_uri.split(":") + wandb.termlog(f"{LOG_PREFIX}Pushing image {image_uri}") + push_resp = await event_loop_thread_exec(docker.push)(reg, tag) + if push_resp is None: + raise LaunchError("Failed to push image to repository") + elif ( + launch_project.resource == "sagemaker" + and f"The push refers to repository [{repository}]" not in push_resp + ): + raise LaunchError(f"Unable to push image to ECR, response: {push_resp}") + + return image_uri diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/dockerfile.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/dockerfile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a31a378e7e041717e784afc224dfd9d3e4745333 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/dockerfile.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/dockerfile.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/dockerfile.py new file mode 100644 index 0000000000000000000000000000000000000000..54ac5bc62201946541541cee6c235391fd9c2d14 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/dockerfile.py @@ -0,0 +1,92 @@ +DOCKERFILE_TEMPLATE = """ +# ----- stage 1: build ----- +FROM {py_build_image} as build + +# requirements section depends on pip vs conda, and presence of buildx +ENV PIP_PROGRESS_BAR off +{requirements_section} + +# ----- stage 2: base ----- +{base_setup} + +COPY --from=build /env /env +ENV PATH="/env/bin:$PATH" + +ENV SHELL /bin/bash + +# some resources (eg sagemaker) must run on root +{user_setup} + +WORKDIR {workdir} +RUN chown -R {uid} {workdir} + +# make artifacts cache dir unrelated to build +RUN mkdir -p {workdir}/.cache && chown -R {uid} {workdir}/.cache + +# copy code/etc +COPY --chown={uid} src/ {workdir} + +ENV PYTHONUNBUFFERED=1 + +{entrypoint_section} +""" + +# this goes into base_setup in TEMPLATE +PYTHON_SETUP_TEMPLATE = """ +FROM {py_base_image} as base +""" + +# this goes into base_setup in TEMPLATE +ACCELERATOR_SETUP_TEMPLATE = """ +FROM {accelerator_base_image} as base + +# make non-interactive so build doesn't block on questions +ENV DEBIAN_FRONTEND=noninteractive + +# install python +RUN apt-get update -qq && apt-get install --no-install-recommends -y \ + {python_packages} \ + && apt-get -qq purge && apt-get -qq clean \ + && rm -rf /var/lib/apt/lists/* + +# make sure `python` points at the right version +RUN update-alternatives --install /usr/bin/python python /usr/bin/python{py_version} 1 \ + && update-alternatives --install /usr/local/bin/python python /usr/bin/python{py_version} 1 +""" + +# this goes into requirements_section in TEMPLATE +PIP_TEMPLATE = """ +RUN python -m venv /env +# make sure we install into the env +ENV PATH="/env/bin:$PATH" + +COPY {requirements_files} ./ +{buildx_optional_prefix} {pip_install} +""" + +# this goes into requirements_section in TEMPLATE +CONDA_TEMPLATE = """ +COPY src/environment.yml . +{buildx_optional_prefix} conda env create -f environment.yml -n env + +# pack the environment so that we can transfer to the base image +RUN conda install -c conda-forge conda-pack +RUN conda pack -n env -o /tmp/env.tar && \ + mkdir /env && cd /env && tar xf /tmp/env.tar && \ + rm /tmp/env.tar +RUN /env/bin/conda-unpack +""" + +USER_CREATE_TEMPLATE = """ +RUN useradd \ + --create-home \ + --no-log-init \ + --shell /bin/bash \ + --gid 0 \ + --uid {uid} \ + {user} || echo "" +""" + +ENTRYPOINT_TEMPLATE = """ +ENTRYPOINT {entrypoint} +""" diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/files.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f774cc80021b198b2c416dfeeb9cc6e98a8a529c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/files.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/internal.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..963d4675fe486420d323a3cc56137aaa49cc7230 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/internal.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/abstract.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/abstract.py new file mode 100644 index 0000000000000000000000000000000000000000..861c9dffb837ac514799c8a179f4d225a428c5ae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/abstract.py @@ -0,0 +1,195 @@ +"""Implementation of the abstract runner class. + +This class defines the interface that the W&B launch runner uses to manage the lifecycle +of runs launched in different environments (e.g. runs launched locally or in a cluster). +""" + +import logging +import os +import subprocess +import sys +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional, Union + +from dockerpycreds.utils import find_executable # type: ignore + +import wandb +from wandb.apis.internal import Api +from wandb.sdk.lib import runid + +from .._project_spec import LaunchProject + +_logger = logging.getLogger(__name__) + + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +State = Literal[ + "unknown", + "starting", + "running", + "failed", + "finished", + "stopping", + "stopped", + "preempted", +] + + +class Status: + def __init__(self, state: "State" = "unknown", messages: List[str] = None): # type: ignore + self.state = state + self.messages = messages or [] + + def __repr__(self) -> "State": + return self.state + + def __str__(self) -> str: + return self.state + + def __eq__(self, __value: object) -> bool: + if isinstance(__value, Status): + return self.state == __value.state + else: + return self.state == __value + + def __hash__(self) -> int: + return hash(self.state) + + +class AbstractRun(ABC): + """Wrapper around a W&B launch run. + + A launched run is a subprocess running an entry point + command, that exposes methods for waiting on and cancelling the run. + This class defines the interface that the W&B launch runner uses to manage the lifecycle + of runs launched in different environments (e.g. runs launched locally or in a cluster). + ``AbstractRun`` is not thread-safe. That is, concurrent calls to wait() / cancel() + from multiple threads may inadvertently kill resources (e.g. local processes) unrelated to the + run. + """ + + def __init__(self) -> None: + self._status = Status() + + @property + def status(self) -> Status: + return self._status + + @abstractmethod + async def get_logs(self) -> Optional[str]: + """Return the logs associated with the run.""" + pass + + def _run_cmd( + self, cmd: List[str], output_only: Optional[bool] = False + ) -> Optional[Union["subprocess.Popen[bytes]", bytes]]: + """Run the command and returns a popen object or the stdout of the command. + + Arguments: + cmd: The command to run + output_only: If true just return the stdout bytes + """ + try: + env = os.environ + popen = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) + if output_only: + popen.wait() + if popen.stdout is not None: + return popen.stdout.read() + return popen + except subprocess.CalledProcessError as e: + wandb.termerror(f"Command failed: {e}") + return None + + @abstractmethod + async def wait(self) -> bool: + """Wait for the run to finish, returning True if the run succeeded and false otherwise. + + Note that in some cases, we may wait until the remote job completes rather than until the W&B run completes. + """ + pass + + @abstractmethod + async def get_status(self) -> Status: + """Get status of the run.""" + pass + + @abstractmethod + async def cancel(self) -> None: + """Cancel the run (interrupts the command subprocess, cancels the run, etc). + + Cancels the run and waits for it to terminate. The W&B run status may not be + set correctly upon run cancellation. + """ + pass + + @property + @abstractmethod + def id(self) -> Optional[str]: + pass + + +class AbstractRunner(ABC): + """Abstract plugin class defining the interface needed to execute W&B Launches. + + You can define subclasses of ``AbstractRunner`` and expose them as third-party + plugins to enable running W&B projects against custom execution backends + (e.g. to run projects against your team's in-house cluster or job scheduler). + """ + + _type: str + + def __init__( + self, + api: Api, + backend_config: Dict[str, Any], + ) -> None: + self._api = api + self.backend_config = backend_config + self._cwd = os.getcwd() + self._namespace = runid.generate_id() + + def find_executable( + self, cmd: str + ) -> Any: # should return a string, but mypy doesn't trust find_executable + """Cross platform utility for checking if a program is available.""" + return find_executable(cmd) + + @property + def api_key(self) -> Any: + return self._api.api_key + + def verify(self) -> bool: + """This is called on first boot to verify the needed commands, and permissions are available. + + For now just call `wandb.termerror` and `sys.exit(1)` + """ + if self._api.api_key is None: + wandb.termerror( + "Couldn't find W&B api key, run wandb login or set WANDB_API_KEY" + ) + sys.exit(1) + return True + + @abstractmethod + async def run( + self, + launch_project: LaunchProject, + image_uri: str, + ) -> Optional[AbstractRun]: + """Submit an LaunchProject to be run. + + Returns a SubmittedRun object to track the execution + Arguments: + launch_project: Object of _project_spec.LaunchProject class representing a wandb launch project + + Returns: + A :py:class:`wandb.sdk.launch.runners.SubmittedRun`. This function is expected to run + the project asynchronously, i.e. it should trigger project execution and then + immediately return a `SubmittedRun` to track execution status. + """ + pass diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_monitor.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_monitor.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5abb1cb3525cacc1e3f19bd92a4fa31ed2e036 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_monitor.py @@ -0,0 +1,474 @@ +"""Monitors kubernetes resources managed by the launch agent.""" + +import asyncio +import logging +import sys +import traceback +from typing import Any, Dict, List, Optional, Tuple, Union + +import kubernetes_asyncio # type: ignore # noqa: F401 +import urllib3 +from kubernetes_asyncio import watch +from kubernetes_asyncio.client import ( # type: ignore # noqa: F401 + ApiException, + BatchV1Api, + CoreV1Api, + CustomObjectsApi, + V1Pod, + V1PodStatus, +) + +import wandb +from wandb.sdk.launch.agent import LaunchAgent +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.runner.abstract import State, Status +from wandb.sdk.launch.utils import get_kube_context_and_api_client + +WANDB_K8S_LABEL_NAMESPACE = "wandb.ai" +WANDB_K8S_RUN_ID = f"{WANDB_K8S_LABEL_NAMESPACE}/run-id" +WANDB_K8S_LABEL_AGENT = f"{WANDB_K8S_LABEL_NAMESPACE}/agent" +WANDB_K8S_LABEL_MONITOR = f"{WANDB_K8S_LABEL_NAMESPACE}/monitor" + + +class Resources: + JOBS = "jobs" + PODS = "pods" + + +class CustomResource: + """Class for custom resources.""" + + def __init__(self, group: str, version: str, plural: str) -> None: + """Initialize the CustomResource.""" + self.group = group + self.version = version + self.plural = plural + + def __str__(self) -> str: + """Return a string representation of the CustomResource.""" + return f"{self.group}/{self.version}/{self.plural}" + + def __hash__(self) -> int: + """Return a hash of the CustomResource.""" + return hash(str(self)) + + +# Maps phases and conditions of custom objects to agent's internal run states. +CRD_STATE_DICT: Dict[str, State] = { + "created": "starting", + "pending": "starting", + "running": "running", + "completing": "running", + "succeeded": "finished", + "completed": "finished", + "failed": "failed", + "aborted": "failed", + "timeout": "failed", + "terminated": "failed", + "terminating": "stopping", +} + +_logger = logging.getLogger(__name__) + + +def create_named_task(name: str, coro: Any, *args: Any, **kwargs: Any) -> asyncio.Task: + """Create a named task.""" + task = asyncio.create_task(coro(*args, **kwargs)) + if sys.version_info >= (3, 8): + task.set_name(name) + task.add_done_callback(_log_err_task_callback) + return task + + +def _log_err_task_callback(task: asyncio.Task) -> None: + """Callback to log exceptions from tasks.""" + exec = task.exception() + if exec is not None: + if isinstance(exec, asyncio.CancelledError): + wandb.termlog(f"Task {task.get_name()} was cancelled") + return + name = str(task) if sys.version_info < (3, 8) else task.get_name() + wandb.termerror(f"Exception in task {name}") + tb = exec.__traceback__ + tb_str = "".join(traceback.format_tb(tb)) + wandb.termerror(tb_str) + + +def _is_preempted(status: "V1PodStatus") -> bool: + """Check if this pod has been preempted.""" + if hasattr(status, "conditions") and status.conditions is not None: + for condition in status.conditions: + if condition.type == "DisruptionTarget" and condition.reason in [ + "EvictionByEvictionAPI", + "PreemptionByScheduler", + "TerminationByKubelet", + ]: + return True + return False + + +def _is_container_creating(status: "V1PodStatus") -> bool: + """Check if this pod has started creating containers.""" + for container_status in status.container_statuses or []: + if ( + container_status.state + and container_status.state.waiting + and container_status.state.waiting.reason == "ContainerCreating" + ): + return True + return False + + +def _is_pod_unschedulable(status: "V1PodStatus") -> Tuple[bool, str]: + """Return whether the pod is unschedulable along with the reason message.""" + if not status.conditions: + return False, "" + for condition in status.conditions: + if ( + condition.type == "PodScheduled" + and condition.status == "False" + and condition.reason == "Unschedulable" + ): + return True, condition.message + return False, "" + + +def _get_crd_job_name(object: "V1Pod") -> Optional[str]: + refs = object.metadata.owner_references + if refs: + return refs[0].name + return None + + +def _state_from_conditions(conditions: List[Dict[str, Any]]) -> Optional[State]: + """Get the status from the pod conditions.""" + true_conditions = [ + c.get("type", "").lower() for c in conditions if c.get("status") == "True" + ] + detected_states = { + CRD_STATE_DICT[c] for c in true_conditions if c in CRD_STATE_DICT + } + # The list below is ordered so that returning the first state detected + # will accurately reflect the state of the job. + states_in_order: List[State] = [ + "finished", + "failed", + "stopping", + "running", + "starting", + ] + for state in states_in_order: + if state in detected_states: + return state + return None + + +def _state_from_replicated_status(status_dict: Dict[str, int]) -> Optional[State]: + """Infer overall job status from replicated job status for jobsets. + + More info on jobset: + https://github.com/kubernetes-sigs/jobset/blob/main/docs/concepts/README.md + + This is useful for detecting when jobsets are starting. + """ + pods_ready = status_dict.get("ready", 0) + pods_active = status_dict.get("active", 0) + if pods_ready >= 1: + return "running" + elif pods_active >= 1: + return "starting" + return None + + +class LaunchKubernetesMonitor: + """Monitors kubernetes resources managed by the launch agent. + + Note: this class is forced to be a singleton in order to prevent multiple + threads from being created that monitor the same kubernetes resources. + """ + + _instance = None # This is used to ensure only one instance is created. + + def __new__(cls, *args: Any, **kwargs: Any) -> "LaunchKubernetesMonitor": + """Create a new instance of the LaunchKubernetesMonitor. + + This method ensures that only one instance of the LaunchKubernetesMonitor + is created. This is done to prevent multiple threads from being created + that monitor the same kubernetes resources. + """ + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__( + self, + core_api: CoreV1Api, + batch_api: BatchV1Api, + custom_api: CustomObjectsApi, + label_selector: str, + ): + """Initialize the LaunchKubernetesMonitor.""" + self._core_api: CoreV1Api = core_api + self._batch_api: BatchV1Api = batch_api + self._custom_api: CustomObjectsApi = custom_api + + self._label_selector: str = label_selector + + # Dict mapping a tuple of (namespace, resource_type) to an + # asyncio.Task that is monitoring that resource type in that namespace. + self._monitor_tasks: Dict[ + Tuple[str, Union[str, CustomResource]], asyncio.Task + ] = dict() + + # Map from job name to job state. + self._job_states: Dict[str, Status] = dict() + + @classmethod + async def ensure_initialized( + cls, + ) -> None: + """Initialize the LaunchKubernetesMonitor.""" + if cls._instance is None: + _, api_client = await get_kube_context_and_api_client( + kubernetes_asyncio, {} + ) + core_api = CoreV1Api(api_client) + batch_api = BatchV1Api(api_client) + custom_api = CustomObjectsApi(api_client) + label_selector = f"{WANDB_K8S_LABEL_MONITOR}=true" + if LaunchAgent.initialized(): + label_selector += f",{WANDB_K8S_LABEL_AGENT}={LaunchAgent.name()}" + cls( + core_api=core_api, + batch_api=batch_api, + custom_api=custom_api, + label_selector=label_selector, + ) + + @classmethod + def monitor_namespace( + cls, namespace: str, custom_resource: Optional[CustomResource] = None + ) -> None: + """Start monitoring a namespaces for resources.""" + if cls._instance is None: + raise LaunchError( + "LaunchKubernetesMonitor not initialized, cannot monitor namespace." + ) + cls._instance.__monitor_namespace(namespace, custom_resource=custom_resource) + + @classmethod + def get_status(cls, job_name: str) -> Status: + """Get the status of a job.""" + if cls._instance is None: + raise LaunchError( + "LaunchKubernetesMonitor not initialized, cannot get status." + ) + return cls._instance.__get_status(job_name) + + @classmethod + def status_count(cls) -> Dict[State, int]: + """Get a dictionary mapping statuses to the # monitored jobs with each status.""" + if cls._instance is None: + raise ValueError( + "LaunchKubernetesMonitor not initialized, cannot get status counts." + ) + return cls._instance.__status_count() + + def __monitor_namespace( + self, namespace: str, custom_resource: Optional[CustomResource] = None + ) -> None: + """Start monitoring a namespaces for resources.""" + if (namespace, Resources.PODS) not in self._monitor_tasks: + self._monitor_tasks[(namespace, Resources.PODS)] = create_named_task( + f"monitor_pods_{namespace}", + self._monitor_pods, + namespace, + ) + # If a custom resource is specified then we will start monitoring + # that resource type in the namespace instead of jobs. + if custom_resource is not None: + if (namespace, custom_resource) not in self._monitor_tasks: + self._monitor_tasks[(namespace, custom_resource)] = create_named_task( + f"monitor_{custom_resource}_{namespace}", + self._monitor_crd, + namespace, + custom_resource=custom_resource, + ) + else: + if (namespace, Resources.JOBS) not in self._monitor_tasks: + self._monitor_tasks[(namespace, Resources.JOBS)] = create_named_task( + f"monitor_jobs_{namespace}", + self._monitor_jobs, + namespace, + ) + + def __get_status(self, job_name: str) -> Status: + """Get the status of a job.""" + if job_name not in self._job_states: + return Status("unknown") + state = self._job_states[job_name] + return state + + def __status_count(self) -> Dict[State, int]: + """Get a dictionary mapping statuses to the # monitored jobs with each status.""" + counts = dict() + for _, status in self._job_states.items(): + state = status.state + if state not in counts: + counts[state] = 1 + else: + counts[state] += 1 + return counts + + def _set_status_state(self, job_name: str, state: State) -> None: + """Set the status of the run.""" + if job_name not in self._job_states: + self._job_states[job_name] = Status(state) + elif self._job_states[job_name].state != state: + self._job_states[job_name].state = state + + def _add_status_message(self, job_name: str, message: str) -> None: + if job_name not in self._job_states: + self._job_states[job_name] = Status("unknown") + wandb.termwarn(f"Warning from Kubernetes for job {job_name}: {message}") + self._job_states[job_name].messages.append(message) + + async def _monitor_pods(self, namespace: str) -> None: + """Monitor a namespace for changes.""" + watcher = SafeWatch(watch.Watch()) + async for event in watcher.stream( + self._core_api.list_namespaced_pod, + namespace=namespace, + label_selector=self._label_selector, + ): + obj = event.get("object") + job_name = obj.metadata.labels.get("job-name") or _get_crd_job_name(obj) + if job_name is None or not hasattr(obj, "status"): + continue + if self.__get_status(job_name) in ["finished", "failed"]: + continue + + is_unschedulable, reason = _is_pod_unschedulable(obj.status) + if is_unschedulable: + self._add_status_message(job_name, reason) + if obj.status.phase == "Running" or _is_container_creating(obj.status): + self._set_status_state(job_name, "running") + elif _is_preempted(obj.status): + self._set_status_state(job_name, "preempted") + + async def _monitor_jobs(self, namespace: str) -> None: + """Monitor a namespace for changes.""" + watcher = SafeWatch(watch.Watch()) + async for event in watcher.stream( + self._batch_api.list_namespaced_job, + namespace=namespace, + label_selector=self._label_selector, + ): + obj = event.get("object") + job_name = obj.metadata.name + + if obj.status.succeeded == 1: + self._set_status_state(job_name, "finished") + elif obj.status.failed is not None and obj.status.failed >= 1: + self._set_status_state(job_name, "failed") + + # If the job is deleted and we haven't seen a terminal state + # then we will consider the job failed. + if event.get("type") == "DELETED": + if self._job_states.get(job_name) != Status("finished"): + self._set_status_state(job_name, "failed") + + async def _monitor_crd( + self, namespace: str, custom_resource: CustomResource + ) -> None: + """Monitor a namespace for changes.""" + watcher = SafeWatch(watch.Watch()) + async for event in watcher.stream( + self._custom_api.list_namespaced_custom_object, + namespace=namespace, + plural=custom_resource.plural, + group=custom_resource.group, + version=custom_resource.version, + label_selector=self._label_selector, + ): + object = event.get("object") + name = object.get("metadata", dict()).get("name") + status = object.get("status") + state = None + if status is None: + continue + replicated_jobs_status = status.get("ReplicatedJobsStatus") + if isinstance(replicated_jobs_status, dict): + state = _state_from_replicated_status(replicated_jobs_status) + state_dict = status.get("state") + if isinstance(state_dict, dict): + phase = state_dict.get("phase") + if phase: + state = CRD_STATE_DICT.get(phase.lower()) + else: + conditions = status.get("conditions") + if isinstance(conditions, list): + state = _state_from_conditions(conditions) + else: + # This should never happen. + _logger.warning( + f"Unexpected conditions type {type(conditions)} " + f"for CRD watcher in {namespace}" + ) + if state is None: + continue + self._set_status_state(name, state) + + +class SafeWatch: + """Wrapper for the kubernetes watch class that can recover in more situations.""" + + def __init__(self, watcher: watch.Watch) -> None: + """Initialize the SafeWatch.""" + self._watcher = watcher + self._last_seen_resource_version: Optional[str] = None + self._stopped = False + + async def stream(self, func: Any, *args: Any, **kwargs: Any) -> Any: + """Stream the watcher. + + This method will automatically resume the stream if it breaks. It will + also save the resource version so that the stream can be resumed from + the last seen resource version. + """ + while True: + try: + async for event in self._watcher.stream( + func, *args, **kwargs, timeout_seconds=30 + ): + if self._stopped: + break + # Save the resource version so that we can resume the stream + # if it breaks. + object = event.get("object") + if isinstance(object, dict): + self._last_seen_resource_version = object.get( + "metadata", dict() + ).get("resourceVersion") + else: + self._last_seen_resource_version = ( + object.metadata.resource_version + ) + kwargs["resource_version"] = self._last_seen_resource_version + yield event + # If stream ends after stop just break + if self._stopped: + break + except urllib3.exceptions.ProtocolError as e: + wandb.termwarn(f"Broken event stream: {e}, attempting to recover") + except ApiException as e: + if e.status == 410: + # If resource version is too old we need to start over. + del kwargs["resource_version"] + self._last_seen_resource_version = None + except Exception as E: + exc_type = type(E).__name__ + stack_trace = traceback.format_exc() + wandb.termerror( + f"Unknown exception in event stream of type {exc_type}: {E}, attempting to recover. Stack trace: {stack_trace}" + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_runner.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..8cb62f739ef213bc5d8b50d44e73df3eb70a0b50 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/kubernetes_runner.py @@ -0,0 +1,963 @@ +"""Implementation of KubernetesRunner class for wandb launch.""" + +import asyncio +import base64 +import datetime +import json +import logging +import os +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union + +import yaml + +import wandb +from wandb.apis.internal import Api +from wandb.sdk.launch.agent.agent import LaunchAgent +from wandb.sdk.launch.environment.abstract import AbstractEnvironment +from wandb.sdk.launch.registry.abstract import AbstractRegistry +from wandb.sdk.launch.registry.azure_container_registry import AzureContainerRegistry +from wandb.sdk.launch.registry.local_registry import LocalRegistry +from wandb.sdk.launch.runner.abstract import Status +from wandb.sdk.launch.runner.kubernetes_monitor import ( + WANDB_K8S_LABEL_AGENT, + WANDB_K8S_LABEL_MONITOR, + WANDB_K8S_RUN_ID, + CustomResource, + LaunchKubernetesMonitor, +) +from wandb.sdk.lib.retry import ExponentialBackoff, retry_async +from wandb.util import get_module + +from .._project_spec import EntryPoint, LaunchProject +from ..errors import LaunchError +from ..utils import ( + CODE_MOUNT_DIR, + LOG_PREFIX, + MAX_ENV_LENGTHS, + PROJECT_SYNCHRONOUS, + get_kube_context_and_api_client, + make_name_dns_safe, +) +from .abstract import AbstractRun, AbstractRunner + +get_module( + "kubernetes_asyncio", + required="Kubernetes runner requires the kubernetes package. Please install it with `pip install wandb[launch]`.", +) + +import kubernetes_asyncio # type: ignore # noqa: E402 +from kubernetes_asyncio import client # noqa: E402 +from kubernetes_asyncio.client.api.batch_v1_api import ( # type: ignore # noqa: E402 + BatchV1Api, +) +from kubernetes_asyncio.client.api.core_v1_api import ( # type: ignore # noqa: E402 + CoreV1Api, +) +from kubernetes_asyncio.client.api.custom_objects_api import ( # type: ignore # noqa: E402 + CustomObjectsApi, +) +from kubernetes_asyncio.client.models.v1_secret import ( # type: ignore # noqa: E402 + V1Secret, +) +from kubernetes_asyncio.client.rest import ApiException # type: ignore # noqa: E402 + +TIMEOUT = 5 +API_KEY_SECRET_MAX_RETRIES = 5 + +_logger = logging.getLogger(__name__) + + +SOURCE_CODE_PVC_MOUNT_PATH = os.environ.get("WANDB_LAUNCH_CODE_PVC_MOUNT_PATH") +SOURCE_CODE_PVC_NAME = os.environ.get("WANDB_LAUNCH_CODE_PVC_NAME") + + +class KubernetesSubmittedRun(AbstractRun): + """Wrapper for a launched run on Kubernetes.""" + + def __init__( + self, + batch_api: "BatchV1Api", + core_api: "CoreV1Api", + name: str, + namespace: Optional[str] = "default", + secret: Optional["V1Secret"] = None, + ) -> None: + """Initialize a KubernetesSubmittedRun. + + Other implementations of the AbstractRun interface poll on the run + when `get_status` is called, but KubernetesSubmittedRun uses + Kubernetes watch streams to update the run status. One thread handles + events from the job object and another thread handles events from the + rank 0 pod. These threads updated the `_status` attributed of the + KubernetesSubmittedRun object. When `get_status` is called, the + `_status` attribute is returned. + + Arguments: + batch_api: Kubernetes BatchV1Api object. + core_api: Kubernetes CoreV1Api object. + name: Name of the job. + namespace: Kubernetes namespace. + secret: Kubernetes secret. + + Returns: + None. + """ + self.batch_api = batch_api + self.core_api = core_api + self.name = name + self.namespace = namespace + self._fail_count = 0 + self.secret = secret + + @property + def id(self) -> str: + """Return the run id.""" + return self.name + + async def get_logs(self) -> Optional[str]: + try: + pods = await self.core_api.list_namespaced_pod( + label_selector=f"job-name={self.name}", namespace=self.namespace + ) + pod_names = [pi.metadata.name for pi in pods.items] + if not pod_names: + wandb.termwarn(f"Found no pods for kubernetes job: {self.name}") + return None + logs = await self.core_api.read_namespaced_pod_log( + name=pod_names[0], namespace=self.namespace + ) + if logs: + return str(logs) + else: + wandb.termwarn(f"No logs for kubernetes pod(s): {pod_names}") + return None + except Exception as e: + wandb.termerror(f"{LOG_PREFIX}Failed to get pod logs: {e}") + return None + + async def wait(self) -> bool: + """Wait for the run to finish. + + Returns: + True if the run finished successfully, False otherwise. + """ + while True: + status = await self.get_status() + wandb.termlog(f"{LOG_PREFIX}Job {self.name} status: {status.state}") + if status.state in ["finished", "failed", "preempted"]: + break + await asyncio.sleep(5) + + await self._delete_secret() + return ( + status.state == "finished" + ) # todo: not sure if this (copied from aws runner) is the right approach? should we return false on failure + + async def get_status(self) -> Status: + status = LaunchKubernetesMonitor.get_status(self.name) + if status in ["stopped", "failed", "finished", "preempted"]: + await self._delete_secret() + return status + + async def cancel(self) -> None: + """Cancel the run.""" + try: + await self.batch_api.delete_namespaced_job( + namespace=self.namespace, + name=self.name, + ) + await self._delete_secret() + except ApiException as e: + raise LaunchError( + f"Failed to delete Kubernetes Job {self.name} in namespace {self.namespace}: {str(e)}" + ) from e + + async def _delete_secret(self) -> None: + # Cleanup secret if not running in a helm-managed context + if not os.environ.get("WANDB_RELEASE_NAME") and self.secret: + await self.core_api.delete_namespaced_secret( + name=self.secret.metadata.name, + namespace=self.secret.metadata.namespace, + ) + self.secret = None + + +class CrdSubmittedRun(AbstractRun): + """Run submitted to a CRD backend, e.g. Volcano.""" + + def __init__( + self, + group: str, + version: str, + plural: str, + name: str, + namespace: str, + core_api: CoreV1Api, + custom_api: CustomObjectsApi, + ) -> None: + """Create a run object for tracking the progress of a CRD. + + Arguments: + group: The API group of the CRD. + version: The API version of the CRD. + plural: The plural name of the CRD. + name: The name of the CRD instance. + namespace: The namespace of the CRD instance. + core_api: The Kubernetes core API client. + custom_api: The Kubernetes custom object API client. + + Raises: + LaunchError: If the CRD instance does not exist. + """ + self.group = group + self.version = version + self.plural = plural + self.name = name + self.namespace = namespace + self.core_api = core_api + self.custom_api = custom_api + self._fail_count = 0 + + @property + def id(self) -> str: + """Get the name of the custom object.""" + return self.name + + async def get_logs(self) -> Optional[str]: + """Get logs for custom object.""" + # TODO: test more carefully once we release multi-node support + logs: Dict[str, Optional[str]] = {} + try: + pods = await self.core_api.list_namespaced_pod( + label_selector=f"wandb/run-id={self.name}", namespace=self.namespace + ) + pod_names = [pi.metadata.name for pi in pods.items] + for pod_name in pod_names: + logs[pod_name] = await self.core_api.read_namespaced_pod_log( + name=pod_name, namespace=self.namespace + ) + except ApiException as e: + wandb.termwarn(f"Failed to get logs for {self.name}: {str(e)}") + return None + if not logs: + return None + logs_as_array = [f"Pod {pod_name}:\n{log}" for pod_name, log in logs.items()] + return "\n".join(logs_as_array) + + async def get_status(self) -> Status: + """Get status of custom object.""" + return LaunchKubernetesMonitor.get_status(self.name) + + async def cancel(self) -> None: + """Cancel the custom object.""" + try: + await self.custom_api.delete_namespaced_custom_object( + group=self.group, + version=self.version, + namespace=self.namespace, + plural=self.plural, + name=self.name, + ) + except ApiException as e: + raise LaunchError( + f"Failed to delete CRD {self.name} in namespace {self.namespace}: {str(e)}" + ) from e + + async def wait(self) -> bool: + """Wait for this custom object to finish running.""" + while True: + status = await self.get_status() + wandb.termlog(f"{LOG_PREFIX}Job {self.name} status: {status}") + if status.state in ["finished", "failed", "preempted"]: + return status.state == "finished" + await asyncio.sleep(5) + + +class KubernetesRunner(AbstractRunner): + """Launches runs onto kubernetes.""" + + def __init__( + self, + api: Api, + backend_config: Dict[str, Any], + environment: AbstractEnvironment, + registry: AbstractRegistry, + ) -> None: + """Create a Kubernetes runner. + + Arguments: + api: The API client object. + backend_config: The backend configuration. + environment: The environment to launch runs into. + + Raises: + LaunchError: If the Kubernetes configuration is invalid. + """ + super().__init__(api, backend_config) + self.environment = environment + self.registry = registry + + def get_namespace( + self, resource_args: Dict[str, Any], context: Dict[str, Any] + ) -> str: + """Get the namespace to launch into. + + Arguments: + resource_args: The resource args to launch. + context: The k8s config context. + + Returns: + The namespace to launch into. + """ + default_namespace = ( + context["context"].get("namespace", "default") if context else "default" + ) + return ( # type: ignore[no-any-return] + resource_args.get("metadata", {}).get("namespace") + or resource_args.get( + "namespace" + ) # continue support for malformed namespace + or self.backend_config.get("runner", {}).get("namespace") + or default_namespace + ) + + async def _inject_defaults( + self, + resource_args: Dict[str, Any], + launch_project: LaunchProject, + image_uri: str, + namespace: str, + core_api: "CoreV1Api", + ) -> Tuple[Dict[str, Any], Optional["V1Secret"]]: + """Apply our default values, return job dict and api key secret. + + Arguments: + resource_args (Dict[str, Any]): The resource args to launch. + launch_project (LaunchProject): The launch project. + builder (Optional[AbstractBuilder]): The builder. + namespace (str): The namespace. + core_api (CoreV1Api): The core api. + + Returns: + Tuple[Dict[str, Any], Optional["V1Secret"]]: The resource args and api key secret. + """ + job: Dict[str, Any] = { + "apiVersion": "batch/v1", + "kind": "Job", + } + job.update(resource_args) + + job_metadata: Dict[str, Any] = job.get("metadata", {}) + job_spec: Dict[str, Any] = {"backoffLimit": 0, "ttlSecondsAfterFinished": 60} + job_spec.update(job.get("spec", {})) + pod_template: Dict[str, Any] = job_spec.get("template", {}) + pod_spec: Dict[str, Any] = {"restartPolicy": "Never"} + pod_spec.update(pod_template.get("spec", {})) + containers: List[Dict[str, Any]] = pod_spec.get("containers", [{}]) + + # Add labels to job metadata + job_metadata.setdefault("labels", {}) + job_metadata["labels"][WANDB_K8S_RUN_ID] = launch_project.run_id + job_metadata["labels"][WANDB_K8S_LABEL_MONITOR] = "true" + if LaunchAgent.initialized(): + job_metadata["labels"][WANDB_K8S_LABEL_AGENT] = LaunchAgent.name() + # name precedence: name in spec > generated name + if not job_metadata.get("name"): + job_metadata["generateName"] = make_name_dns_safe( + f"launch-{launch_project.target_entity}-{launch_project.target_project}-" + ) + + for i, cont in enumerate(containers): + if "name" not in cont: + cont["name"] = cont.get("name", "launch" + str(i)) + if "securityContext" not in cont: + cont["securityContext"] = { + "allowPrivilegeEscalation": False, + "capabilities": {"drop": ["ALL"]}, + "seccompProfile": {"type": "RuntimeDefault"}, + } + + entry_point = ( + launch_project.override_entrypoint or launch_project.get_job_entry_point() + ) + if launch_project.docker_image: + # dont specify run id if user provided image, could have multiple runs + containers[0]["image"] = image_uri + # TODO: handle secret pulling image from registry + elif not any(["image" in cont for cont in containers]): + assert entry_point is not None + # in the non instance case we need to make an imagePullSecret + # so the new job can pull the image + containers[0]["image"] = image_uri + secret = await maybe_create_imagepull_secret( + core_api, self.registry, launch_project.run_id, namespace + ) + if secret is not None: + pod_spec["imagePullSecrets"] = [ + {"name": f"regcred-{launch_project.run_id}"} + ] + + inject_entrypoint_and_args( + containers, + entry_point, + launch_project.override_args, + launch_project.override_entrypoint is not None, + ) + + env_vars = launch_project.get_env_vars_dict( + self._api, MAX_ENV_LENGTHS[self.__class__.__name__] + ) + api_key_secret = None + for cont in containers: + # Add our env vars to user supplied env vars + env = cont.get("env") or [] + for key, value in env_vars.items(): + if ( + key == "WANDB_API_KEY" + and value + and ( + LaunchAgent.initialized() + or self.backend_config[PROJECT_SYNCHRONOUS] + ) + ): + # Override API key with secret. TODO: Do the same for other runners + release_name = os.environ.get("WANDB_RELEASE_NAME") + secret_name = "wandb-api-key" + if release_name: + secret_name += f"-{release_name}" + else: + secret_name += f"-{launch_project.run_id}" + + def handle_exception(e): + wandb.termwarn( + f"Exception when ensuring Kubernetes API key secret: {e}. Retrying..." + ) + + api_key_secret = await retry_async( + backoff=ExponentialBackoff( + initial_sleep=datetime.timedelta(seconds=1), + max_sleep=datetime.timedelta(minutes=1), + max_retries=API_KEY_SECRET_MAX_RETRIES, + ), + fn=ensure_api_key_secret, + on_exc=handle_exception, + core_api=core_api, + secret_name=secret_name, + namespace=namespace, + api_key=value, + ) + env.append( + { + "name": key, + "valueFrom": { + "secretKeyRef": { + "name": secret_name, + "key": "password", + } + }, + } + ) + else: + env.append({"name": key, "value": value}) + cont["env"] = env + + pod_spec["containers"] = containers + pod_template["spec"] = pod_spec + job_spec["template"] = pod_template + job["spec"] = job_spec + job["metadata"] = job_metadata + + add_label_to_pods( + job, + WANDB_K8S_LABEL_MONITOR, + "true", + ) + + if launch_project.job_base_image: + apply_code_mount_configuration( + job, + launch_project, + ) + + # Add wandb.ai/agent: current agent label on all pods + if LaunchAgent.initialized(): + add_label_to_pods( + job, + WANDB_K8S_LABEL_AGENT, + LaunchAgent.name(), + ) + + return job, api_key_secret + + async def run( + self, launch_project: LaunchProject, image_uri: str + ) -> Optional[AbstractRun]: # noqa: C901 + """Execute a launch project on Kubernetes. + + Arguments: + launch_project: The launch project to execute. + builder: The builder to use to build the image. + + Returns: + The run object if the run was successful, otherwise None. + """ + await LaunchKubernetesMonitor.ensure_initialized() + resource_args = launch_project.fill_macros(image_uri).get("kubernetes", {}) + if not resource_args: + wandb.termlog( + f"{LOG_PREFIX}Note: no resource args specified. Add a " + "Kubernetes yaml spec or other options in a json file " + "with --resource-args ." + ) + _logger.info(f"Running Kubernetes job with resource args: {resource_args}") + + context, api_client = await get_kube_context_and_api_client( + kubernetes_asyncio, resource_args + ) + + # If using pvc for code mount, move code there. + if launch_project.job_base_image is not None: + if SOURCE_CODE_PVC_NAME is None or SOURCE_CODE_PVC_MOUNT_PATH is None: + raise LaunchError( + "WANDB_LAUNCH_SOURCE_CODE_PVC_ environment variables not set. " + "Unable to mount source code PVC into base image. " + "Use the `codeMountPvcName` variable in the agent helm chart " + "to enable base image jobs for this agent. See " + "https://github.com/wandb/helm-charts/tree/main/charts/launch-agent " + "for more information." + ) + code_subdir = launch_project.get_image_source_string() + launch_project.change_project_dir( + os.path.join(SOURCE_CODE_PVC_MOUNT_PATH, code_subdir) + ) + + # If the user specified an alternate api, we need will execute this + # run by creating a custom object. + api_version = resource_args.get("apiVersion", "batch/v1") + + if api_version not in ["batch/v1", "batch/v1beta1"]: + env_vars = launch_project.get_env_vars_dict( + self._api, MAX_ENV_LENGTHS[self.__class__.__name__] + ) + # Crawl the resource args and add our env vars to the containers. + add_wandb_env(resource_args, env_vars) + + # Add our labels to the resource args. This is necessary for the + # agent to find the custom object later on. + resource_args["metadata"] = resource_args.get("metadata", {}) + resource_args["metadata"]["labels"] = resource_args["metadata"].get( + "labels", {} + ) + resource_args["metadata"]["labels"][WANDB_K8S_LABEL_MONITOR] = "true" + + # Crawl the resource arsg and add our labels to the pods. This is + # necessary for the agent to find the pods later on. + add_label_to_pods( + resource_args, + WANDB_K8S_LABEL_MONITOR, + "true", + ) + + # Add wandb.ai/agent: current agent label on all pods + if LaunchAgent.initialized(): + add_label_to_pods( + resource_args, + WANDB_K8S_LABEL_AGENT, + LaunchAgent.name(), + ) + resource_args["metadata"]["labels"][WANDB_K8S_LABEL_AGENT] = ( + LaunchAgent.name() + ) + + if launch_project.job_base_image: + apply_code_mount_configuration(resource_args, launch_project) + + overrides = {} + if launch_project.override_args: + overrides["args"] = launch_project.override_args + if launch_project.override_entrypoint: + overrides["command"] = launch_project.override_entrypoint.command + add_entrypoint_args_overrides( + resource_args, + overrides, + ) + api = client.CustomObjectsApi(api_client) + # Infer the attributes of a custom object from the apiVersion and/or + # a kind: attribute in the resource args. + namespace = self.get_namespace(resource_args, context) + group, version, *_ = api_version.split("/") + group = resource_args.get("group", group) + version = resource_args.get("version", version) + kind = resource_args.get("kind", version) + plural = f"{kind.lower()}s" + custom_resource = CustomResource( + group=group, + version=version, + plural=plural, + ) + LaunchKubernetesMonitor.monitor_namespace( + namespace, custom_resource=custom_resource + ) + + try: + response = await api.create_namespaced_custom_object( + group=group, + version=version, + namespace=namespace, + plural=plural, + body=resource_args, + ) + except ApiException as e: + body = json.loads(e.body) + body_yaml = yaml.dump(body) + raise LaunchError( + f"Error creating CRD of kind {kind}: {e.status} {e.reason}\n{body_yaml}" + ) from e + name = response.get("metadata", {}).get("name") + _logger.info(f"Created {kind} {response['metadata']['name']}") + submitted_run = CrdSubmittedRun( + name=name, + group=group, + version=version, + namespace=namespace, + plural=plural, + core_api=client.CoreV1Api(api_client), + custom_api=api, + ) + if self.backend_config[PROJECT_SYNCHRONOUS]: + await submitted_run.wait() + return submitted_run + + batch_api = kubernetes_asyncio.client.BatchV1Api(api_client) + core_api = kubernetes_asyncio.client.CoreV1Api(api_client) + namespace = self.get_namespace(resource_args, context) + job, secret = await self._inject_defaults( + resource_args, launch_project, image_uri, namespace, core_api + ) + msg = "Creating Kubernetes job" + if "name" in resource_args: + msg += f": {resource_args['name']}" + _logger.info(msg) + try: + response = await kubernetes_asyncio.utils.create_from_dict( + api_client, job, namespace=namespace + ) + except kubernetes_asyncio.utils.FailToCreateError as e: + for exc in e.api_exceptions: + resp = json.loads(exc.body) + msg = resp.get("message") + code = resp.get("code") + raise LaunchError( + f"Failed to create Kubernetes job for run {launch_project.run_id} ({code} {exc.reason}): {msg}" + ) + except Exception as e: + raise LaunchError( + f"Unexpected exception when creating Kubernetes job: {str(e)}\n" + ) + job_response = response[0] + job_name = job_response.metadata.name + LaunchKubernetesMonitor.monitor_namespace(namespace) + submitted_job = KubernetesSubmittedRun( + batch_api, core_api, job_name, namespace, secret + ) + if self.backend_config[PROJECT_SYNCHRONOUS]: + await submitted_job.wait() + + return submitted_job + + +def inject_entrypoint_and_args( + containers: List[dict], + entry_point: Optional[EntryPoint], + override_args: List[str], + should_override_entrypoint: bool, +) -> None: + """Inject the entrypoint and args into the containers. + + Arguments: + containers: The containers to inject the entrypoint and args into. + entry_point: The entrypoint to inject. + override_args: The args to inject. + should_override_entrypoint: Whether to override the entrypoint. + + Returns: + None + """ + for i in range(len(containers)): + if override_args: + containers[i]["args"] = override_args + if entry_point and ( + not containers[i].get("command") or should_override_entrypoint + ): + containers[i]["command"] = entry_point.command + + +async def ensure_api_key_secret( + core_api: "CoreV1Api", + secret_name: str, + namespace: str, + api_key: str, +) -> "V1Secret": + """Create a secret containing a user's wandb API key. + + Arguments: + core_api: The Kubernetes CoreV1Api object. + secret_name: The name to use for the secret. + namespace: The namespace to create the secret in. + api_key: The user's wandb API key + + Returns: + The created secret + """ + secret_data = {"password": base64.b64encode(api_key.encode()).decode()} + labels = {"wandb.ai/created-by": "launch-agent"} + secret = client.V1Secret( + data=secret_data, + metadata=client.V1ObjectMeta( + name=secret_name, namespace=namespace, labels=labels + ), + kind="Secret", + type="kubernetes.io/basic-auth", + ) + + try: + try: + return await core_api.create_namespaced_secret(namespace, secret) + except ApiException as e: + # 409 = conflict = secret already exists + if e.status == 409: + existing_secret = await core_api.read_namespaced_secret( + name=secret_name, namespace=namespace + ) + if existing_secret.data != secret_data: + # If it's a previous secret made by launch agent, clean it up + if ( + existing_secret.metadata.labels.get("wandb.ai/created-by") + == "launch-agent" + ): + await core_api.delete_namespaced_secret( + name=secret_name, namespace=namespace + ) + return await core_api.create_namespaced_secret( + namespace, secret + ) + else: + raise LaunchError( + f"Kubernetes secret already exists in namespace {namespace} with incorrect data: {secret_name}" + ) + return existing_secret + raise + except Exception as e: + raise LaunchError( + f"Exception when ensuring Kubernetes API key secret: {str(e)}\n" + ) + + +async def maybe_create_imagepull_secret( + core_api: "CoreV1Api", + registry: AbstractRegistry, + run_id: str, + namespace: str, +) -> Optional["V1Secret"]: + """Create a secret for pulling images from a private registry. + + Arguments: + core_api: The Kubernetes CoreV1Api object. + registry: The registry to pull from. + run_id: The run id. + namespace: The namespace to create the secret in. + + Returns: + A secret if one was created, otherwise None. + """ + secret = None + if isinstance(registry, LocalRegistry) or isinstance( + registry, AzureContainerRegistry + ): + # Secret not required + return None + uname, token = await registry.get_username_password() + creds_info = { + "auths": { + registry.uri: { + "auth": base64.b64encode(f"{uname}:{token}".encode()).decode(), + # need an email but the use is deprecated + "email": "deprecated@wandblaunch.com", + } + } + } + secret_data = { + ".dockerconfigjson": base64.b64encode(json.dumps(creds_info).encode()).decode() + } + secret = client.V1Secret( + data=secret_data, + metadata=client.V1ObjectMeta(name=f"regcred-{run_id}", namespace=namespace), + kind="Secret", + type="kubernetes.io/dockerconfigjson", + ) + try: + try: + return await core_api.create_namespaced_secret(namespace, secret) + except ApiException as e: + # 409 = conflict = secret already exists + if e.status == 409: + return await core_api.read_namespaced_secret( + name=f"regcred-{run_id}", namespace=namespace + ) + raise + except Exception as e: + raise LaunchError(f"Exception when creating Kubernetes secret: {str(e)}\n") + + +def yield_containers(root: Any) -> Iterator[dict]: + """Yield all container specs in a manifest. + + Recursively traverses the manifest and yields all container specs. Container + specs are identified by the presence of a "containers" key in the value. + """ + if isinstance(root, dict): + for k, v in root.items(): + if k == "containers": + if isinstance(v, list): + yield from v + elif isinstance(v, (dict, list)): + yield from yield_containers(v) + elif isinstance(root, list): + for item in root: + yield from yield_containers(item) + + +def add_wandb_env(root: Union[dict, list], env_vars: Dict[str, str]) -> None: + """Injects wandb environment variables into specs. + + Recursively walks the spec and injects the environment variables into + every container spec. Containers are identified by the "containers" key. + + This function treats the WANDB_RUN_ID and WANDB_GROUP_ID environment variables + specially. If they are present in the spec, they will be overwritten. If a setting + for WANDB_RUN_ID is provided in env_vars, then that environment variable will only be + set in the first container modified by this function. + + Arguments: + root: The spec to modify. + env_vars: The environment variables to inject. + + Returns: None. + """ + for cont in yield_containers(root): + env = cont.setdefault("env", []) + env.extend([{"name": key, "value": value} for key, value in env_vars.items()]) + cont["env"] = env + # After we have set WANDB_RUN_ID once, we don't want to set it again + if "WANDB_RUN_ID" in env_vars: + env_vars.pop("WANDB_RUN_ID") + + +def yield_pods(manifest: Any) -> Iterator[dict]: + """Yield all pod specs in a manifest. + + Recursively traverses the manifest and yields all pod specs. Pod specs are + identified by the presence of a "spec" key with a "containers" key in the + value. + """ + if isinstance(manifest, list): + for item in manifest: + yield from yield_pods(item) + elif isinstance(manifest, dict): + if "spec" in manifest and "containers" in manifest["spec"]: + yield manifest + for value in manifest.values(): + if isinstance(value, (dict, list)): + yield from yield_pods(value) + + +def add_label_to_pods( + manifest: Union[dict, list], label_key: str, label_value: str +) -> None: + """Add a label to all pod specs in a manifest. + + Recursively traverses the manifest and adds the label to all pod specs. + Pod specs are identified by the presence of a "spec" key with a "containers" + key in the value. + + Arguments: + manifest: The manifest to modify. + label_key: The label key to add. + label_value: The label value to add. + + Returns: None. + """ + for pod in yield_pods(manifest): + metadata = pod.setdefault("metadata", {}) + labels = metadata.setdefault("labels", {}) + labels[label_key] = label_value + + +def add_entrypoint_args_overrides(manifest: Union[dict, list], overrides: dict) -> None: + """Add entrypoint and args overrides to all containers in a manifest. + + Recursively traverses the manifest and adds the entrypoint and args overrides + to all containers. Containers are identified by the presence of a "spec" key + with a "containers" key in the value. + + Arguments: + manifest: The manifest to modify. + overrides: Dictionary with args and entrypoint keys. + + Returns: None. + """ + if isinstance(manifest, list): + for item in manifest: + add_entrypoint_args_overrides(item, overrides) + elif isinstance(manifest, dict): + if "spec" in manifest and "containers" in manifest["spec"]: + containers = manifest["spec"]["containers"] + for container in containers: + if "command" in overrides: + container["command"] = overrides["command"] + if "args" in overrides: + container["args"] = overrides["args"] + for value in manifest.values(): + add_entrypoint_args_overrides(value, overrides) + + +def apply_code_mount_configuration( + manifest: Union[Dict, list], project: LaunchProject +) -> None: + """Apply code mount configuration to all containers in a manifest. + + Recursively traverses the manifest and adds the code mount configuration to + all containers. Containers are identified by the presence of a "spec" key + with a "containers" key in the value. + + Arguments: + manifest: The manifest to modify. + project: The launch project. + + Returns: None. + """ + assert SOURCE_CODE_PVC_NAME is not None + source_dir = project.get_image_source_string() + for pod in yield_pods(manifest): + for container in yield_containers(pod): + if "volumeMounts" not in container: + container["volumeMounts"] = [] + container["volumeMounts"].append( + { + "name": "wandb-source-code-volume", + "mountPath": CODE_MOUNT_DIR, + "subPath": source_dir, + } + ) + container["workingDir"] = CODE_MOUNT_DIR + spec = pod["spec"] + if "volumes" not in spec: + spec["volumes"] = [] + spec["volumes"].append( + { + "name": "wandb-source-code-volume", + "persistentVolumeClaim": { + "claimName": SOURCE_CODE_PVC_NAME, + }, + } + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_container.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_container.py new file mode 100644 index 0000000000000000000000000000000000000000..e831467a5c1967d0258231cfbe305d32936e29e5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_container.py @@ -0,0 +1,301 @@ +import asyncio +import logging +import os +import shlex +import subprocess +import sys +import threading +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +import wandb +from wandb.sdk.launch.environment.abstract import AbstractEnvironment +from wandb.sdk.launch.registry.abstract import AbstractRegistry + +from .._project_spec import LaunchProject +from ..errors import LaunchError +from ..utils import ( + CODE_MOUNT_DIR, + LOG_PREFIX, + MAX_ENV_LENGTHS, + PROJECT_SYNCHRONOUS, + _is_wandb_dev_uri, + _is_wandb_local_uri, + docker_image_exists, + event_loop_thread_exec, + pull_docker_image, + sanitize_wandb_api_key, +) +from .abstract import AbstractRun, AbstractRunner, Status + +if TYPE_CHECKING: + from wandb.apis.internal import Api + +_logger = logging.getLogger(__name__) + + +class LocalSubmittedRun(AbstractRun): + """Instance of ``AbstractRun`` corresponding to a subprocess launched to run an entry point command locally.""" + + def __init__(self) -> None: + super().__init__() + self._command_proc: Optional[subprocess.Popen] = None + self._stdout: Optional[str] = None + self._terminate_flag: bool = False + self._thread: Optional[threading.Thread] = None + + def set_command_proc(self, command_proc: subprocess.Popen) -> None: + self._command_proc = command_proc + + def set_thread(self, thread: threading.Thread) -> None: + self._thread = thread + + @property + def id(self) -> Optional[str]: + if self._command_proc is None: + return None + return str(self._command_proc.pid) + + async def wait(self) -> bool: + assert self._thread is not None + # if command proc is not set + # wait for thread to set it + if self._command_proc is None: + while self._thread.is_alive(): + await asyncio.sleep(5) + # command proc can be updated by another thread + if self._command_proc is not None: + break # type: ignore # mypy thinks this is unreachable + else: + return False + wait = event_loop_thread_exec(self._command_proc.wait) + return int(await wait()) == 0 + + async def get_logs(self) -> Optional[str]: + return self._stdout + + async def cancel(self) -> None: + # thread is set immediately after starting, should always exist + assert self._thread is not None + + # cancel called before the thread subprocess has started + # indicates to thread to not start command proc if not already started + self._terminate_flag = True + + async def get_status(self) -> Status: + assert self._thread is not None, "Failed to get status, self._thread = None" + if self._command_proc is None: + if self._thread.is_alive(): + return Status("running") + return Status("stopped") + exit_code = self._command_proc.poll() + if exit_code is None: + return Status("running") + if exit_code == 0: + return Status("finished") + return Status("failed") + + +class LocalContainerRunner(AbstractRunner): + """Runner class, uses a project to create a LocallySubmittedRun.""" + + def __init__( + self, + api: "Api", + backend_config: Dict[str, Any], + environment: AbstractEnvironment, + registry: AbstractRegistry, + ) -> None: + super().__init__(api, backend_config) + self.environment = environment + self.registry = registry + + def _populate_docker_args( + self, launch_project: LaunchProject, image_uri: str + ) -> Dict[str, Any]: + docker_args: Dict[str, Any] = launch_project.fill_macros(image_uri).get( + "local-container", {} + ) + if _is_wandb_local_uri(self._api.settings("base_url")): + if sys.platform == "win32": + docker_args["net"] = "host" + else: + docker_args["network"] = "host" + if sys.platform == "linux" or sys.platform == "linux2": + docker_args["add-host"] = "host.docker.internal:host-gateway" + base_image = launch_project.job_base_image + if base_image is not None: + # Mount code into the container and set the working directory. + if "volume" not in docker_args: + docker_args["volume"] = [] + docker_args["volume"].append( + f"{launch_project.project_dir}:{CODE_MOUNT_DIR}" + ) + docker_args["workdir"] = CODE_MOUNT_DIR + return docker_args + + async def run( + self, + launch_project: LaunchProject, + image_uri: str, + ) -> Optional[AbstractRun]: + docker_args = self._populate_docker_args(launch_project, image_uri) + synchronous: bool = self.backend_config[PROJECT_SYNCHRONOUS] + + env_vars = launch_project.get_env_vars_dict( + self._api, MAX_ENV_LENGTHS[self.__class__.__name__] + ) + + # When running against local port, need to swap to local docker host + if ( + _is_wandb_local_uri(self._api.settings("base_url")) + and sys.platform == "darwin" + ): + _, _, port = self._api.settings("base_url").split(":") + env_vars["WANDB_BASE_URL"] = f"http://host.docker.internal:{port}" + elif _is_wandb_dev_uri(self._api.settings("base_url")): + env_vars["WANDB_BASE_URL"] = "http://host.docker.internal:9001" + + if launch_project.docker_image or launch_project.job_base_image: + try: + pull_docker_image(image_uri) + except Exception as e: + wandb.termwarn(f"Error attempting to pull docker image {image_uri}") + if not docker_image_exists(image_uri): + raise LaunchError( + f"Failed to pull docker image {image_uri} with error: {e}" + ) + + entrypoint = launch_project.get_job_entry_point() + entry_cmd = None if entrypoint is None else entrypoint.command + command_str = " ".join( + get_docker_command( + image_uri, + env_vars, + docker_args=docker_args, + entry_cmd=entry_cmd, + additional_args=launch_project.override_args, + ) + ).strip() + sanitized_cmd_str = sanitize_wandb_api_key(command_str) + _msg = f"{LOG_PREFIX}Launching run in docker with command: {sanitized_cmd_str}" + wandb.termlog(_msg) + run = _run_entry_point(command_str, launch_project.project_dir) + if synchronous: + await run.wait() + return run + + +def _run_entry_point(command: str, work_dir: Optional[str]) -> AbstractRun: + """Run an entry point command in a subprocess. + + Arguments: + command: Entry point command to run + work_dir: Working directory in which to run the command + + Returns: + An instance of `LocalSubmittedRun` + """ + if work_dir is None: + work_dir = os.getcwd() + env = os.environ.copy() + run = LocalSubmittedRun() + thread = threading.Thread( + target=_thread_process_runner, + args=(run, ["bash", "-c", command], work_dir, env), + ) + run.set_thread(thread) + thread.start() + return run + + +def _thread_process_runner( + run: LocalSubmittedRun, args: List[str], work_dir: str, env: Dict[str, str] +) -> None: + # cancel was called before we started the subprocess + if run._terminate_flag: + return + # TODO: Make this async + process = subprocess.Popen( + args, + close_fds=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + bufsize=1, + cwd=work_dir, + env=env, + ) + run.set_command_proc(process) + run._stdout = "" + while True: + # the agent thread could set the terminate flag + if run._terminate_flag: + process.terminate() # type: ignore + chunk = os.read(process.stdout.fileno(), 4096) # type: ignore + if not chunk: + break + index = chunk.find(b"\r") + decoded_chunk = None + while not decoded_chunk: + try: + decoded_chunk = chunk.decode() + except UnicodeDecodeError: + # Multi-byte character cut off, try to get the rest of it + chunk += os.read(process.stdout.fileno(), 1) # type: ignore + if index != -1: + run._stdout += decoded_chunk + print(chunk.decode(), end="") + else: + run._stdout += decoded_chunk + "\r" + print(chunk.decode(), end="\r") + + +def get_docker_command( + image: str, + env_vars: Dict[str, str], + entry_cmd: Optional[List[str]] = None, + docker_args: Optional[Dict[str, Any]] = None, + additional_args: Optional[List[str]] = None, +) -> List[str]: + """Construct the docker command using the image and docker args. + + Arguments: + image: a Docker image to be run + env_vars: a dictionary of environment variables for the command + entry_cmd: the entry point command to run + docker_args: a dictionary of additional docker args for the command + """ + docker_path = "docker" + cmd: List[Any] = [docker_path, "run", "--rm"] + + # hacky handling of env vars, needs to be improved + for env_key, env_value in env_vars.items(): + cmd += ["-e", f"{shlex.quote(env_key)}={shlex.quote(env_value)}"] + + if docker_args: + for name, value in docker_args.items(): + if len(name) == 1: + prefix = "-" + shlex.quote(name) + else: + prefix = "--" + shlex.quote(name) + if isinstance(value, list): + for v in value: + cmd += [prefix, shlex.quote(str(v))] + elif isinstance(value, bool) and value: + cmd += [prefix] + else: + cmd += [prefix, shlex.quote(str(value))] + + if entry_cmd: + cmd += ["--entrypoint", entry_cmd[0]] + cmd += [shlex.quote(image)] + if entry_cmd and len(entry_cmd) > 1: + cmd += entry_cmd[1:] + if additional_args: + cmd += additional_args + return cmd + + +def join(split_command: List[str]) -> str: + """Return a shell-escaped string from *split_command*.""" + return " ".join(shlex.quote(arg) for arg in split_command) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4289765c6c7c4fda2dbe42dddc1f5f7d69da0c45 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__init__.py @@ -0,0 +1,5 @@ +from . import lazyloader +from .disabled import RunDisabled, SummaryDisabled +from .run_moment import RunMoment + +__all__ = ("lazyloader", "RunDisabled", "SummaryDisabled", "RunMoment") diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_settings_toposort_generated.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_settings_toposort_generated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0498623a14284a6a4bfbd4b69a8b7e28e94288f3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_settings_toposort_generated.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_wburls_generated.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_wburls_generated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f37aafb1612fd8bd45e07e788ab04b790a6b931 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/_wburls_generated.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/config_util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/config_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ce8579f92a1f94eaa905a3091fcbe2a4d228cd4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/config_util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/credentials.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/credentials.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58ca6bf9ccd8be90e98c8d295b5d9ec9d3fa3e08 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/credentials.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/disabled.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/disabled.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2b63c5c9f975e58c16ea69301320ffbb2f08bdf Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/disabled.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/lazyloader.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/lazyloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4fbbd4d931da817332122c0b58ed9963b890298 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/lazyloader.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/paths.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/paths.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b61e3514eed6783380d1b73975614f3397eda8e9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/paths.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/proto_util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/proto_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8031153b98781258be7af1cf456ca198e10136b7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/proto_util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/reporting.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/reporting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36b51884e221c7dfc15e68f691278679d8f4eae3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/reporting.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sock_client.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sock_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..535b0a2e257c1d3944c4e29fc458293dd2b6bc79 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sock_client.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sparkline.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sparkline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90bd26efdac4812d95fa533ac6a45ff411d3d152 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/sparkline.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/timer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b445a29f24f2ecc0b408f5e32748de9c71dd1ee2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/__pycache__/timer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generate.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generate.py new file mode 100644 index 0000000000000000000000000000000000000000..9afe707d673fd282a1d1af44d74dae1b272b7d5f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generate.py @@ -0,0 +1,159 @@ +import inspect +import sys +from typing import Dict, List, Set, Tuple + +from wandb.errors import UsageError +from wandb.sdk.wandb_settings import Settings + +if sys.version_info >= (3, 8): + from typing import get_type_hints +else: + from typing_extensions import get_type_hints + + +template = """ +__all__ = ("SETTINGS_TOPOLOGICALLY_SORTED", "_Setting") + +import sys +from typing import Tuple + +if sys.version_info >= (3, 8): + from typing import Final, Literal +else: + from typing_extensions import Final, Literal + + +_Setting = Literal[ + $settings_literal_list +] + +SETTINGS_TOPOLOGICALLY_SORTED: Final[Tuple[_Setting, ...]] = ( + $settings_topologically_sorted +) +""" + + +class Graph: + # A simple class representing an unweighted directed graph + # that uses an adjacency list representation. + # We use to ensure that we don't have cyclic dependencies in the settings + # and that modifications to the settings are applied in the correct order. + def __init__(self) -> None: + self.adj_list: Dict[str, Set[str]] = {} + + def add_node(self, node: str) -> None: + if node not in self.adj_list: + self.adj_list[node] = set() + + def add_edge(self, node1: str, node2: str) -> None: + self.adj_list[node1].add(node2) + + def get_neighbors(self, node: str) -> Set[str]: + return self.adj_list[node] + + # return a list of nodes sorted in topological order + def topological_sort_dfs(self) -> List[str]: + sorted_copy = {k: sorted(v) for k, v in self.adj_list.items()} + + sorted_nodes: List[str] = [] + visited_nodes: Set[str] = set() + current_nodes: Set[str] = set() + + def visit(n: str) -> None: + if n in visited_nodes: + return None + if n in current_nodes: + raise UsageError("Cyclic dependency detected in wandb.Settings") + + current_nodes.add(n) + for neighbor in sorted_copy[n]: + visit(neighbor) + + current_nodes.remove(n) + visited_nodes.add(n) + sorted_nodes.append(n) + + return None + + for node in self.adj_list: + if node not in visited_nodes: + visit(node) + + return sorted_nodes + + +def _get_modification_order( + settings: Settings, +) -> Tuple[Tuple[str, ...], Tuple[str, ...]]: + """Return the order in which settings should be modified, based on dependencies.""" + dependency_graph = Graph() + + props = tuple(get_type_hints(Settings).keys()) + + # discover prop dependencies from validator methods and runtime hooks + + prefix = "_validate_" + symbols = set(dir(settings)) + validator_methods = tuple(sorted(m for m in symbols if m.startswith(prefix))) + + # extract dependencies from validator methods + for m in validator_methods: + setting = m.split(prefix)[1] + dependency_graph.add_node(setting) + # if the method is not static, inspect its code to find the attributes it depends on + if ( + not isinstance(Settings.__dict__[m], staticmethod) + and not isinstance(Settings.__dict__[m], classmethod) + and Settings.__dict__[m].__code__.co_argcount > 0 + ): + unbound_closure_vars = inspect.getclosurevars(Settings.__dict__[m]).unbound + dependencies = (v for v in unbound_closure_vars if v in props) + for d in dependencies: + dependency_graph.add_node(d) + dependency_graph.add_edge(setting, d) + + # extract dependencies from props' runtime hooks + default_props = settings._default_props() + for prop, spec in default_props.items(): + if "hook" not in spec: + continue + + dependency_graph.add_node(prop) + + hook = spec["hook"] + if callable(hook): + hook = [hook] + + for h in hook: + unbound_closure_vars = inspect.getclosurevars(h).unbound + dependencies = (v for v in unbound_closure_vars if v in props) + for d in dependencies: + dependency_graph.add_node(d) + dependency_graph.add_edge(prop, d) + + modification_order = dependency_graph.topological_sort_dfs() + return props, tuple(modification_order) + + +def generate(settings: Settings) -> None: + _settings_literal_list, _settings_topologically_sorted = _get_modification_order( + settings + ) + settings_literal_list = ", ".join(f'"{s}"' for s in _settings_literal_list) + settings_topologically_sorted = ", ".join( + f'"{s}"' for s in _settings_topologically_sorted + ) + + print( + template.replace( + "$settings_literal_list", + settings_literal_list, + ).replace( + "$settings_topologically_sorted", + settings_topologically_sorted, + ) + ) + + +if __name__ == "__main__": + generate(Settings()) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generated.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generated.py new file mode 100644 index 0000000000000000000000000000000000000000..fc80f609d489ef630fde591083ef9464ca5ff8af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_settings_toposort_generated.py @@ -0,0 +1,249 @@ +# DO NOT EDIT -- GENERATED BY: `generate-tool.py --generate` +__all__ = ("SETTINGS_TOPOLOGICALLY_SORTED", "_Setting") + +import sys +from typing import Tuple + +if sys.version_info >= (3, 8): + from typing import Final, Literal +else: + from typing_extensions import Final, Literal + + +_Setting = Literal[ + "_args", + "_aws_lambda", + "_cli_only_mode", + "_code_path_local", + "_colab", + "_cuda", + "_disable_meta", + "_disable_service", + "_disable_setproctitle", + "_disable_stats", + "_disable_update_check", + "_disable_viewer", + "_disable_machine_info", + "_executable", + "_extra_http_headers", + "_file_stream_max_bytes", + "_file_stream_retry_max", + "_file_stream_retry_wait_min_seconds", + "_file_stream_retry_wait_max_seconds", + "_file_stream_timeout_seconds", + "_file_transfer_retry_max", + "_file_transfer_retry_wait_min_seconds", + "_file_transfer_retry_wait_max_seconds", + "_file_transfer_timeout_seconds", + "_flow_control_custom", + "_flow_control_disabled", + "_graphql_retry_max", + "_graphql_retry_wait_min_seconds", + "_graphql_retry_wait_max_seconds", + "_graphql_timeout_seconds", + "_internal_check_process", + "_internal_queue_timeout", + "_ipython", + "_jupyter", + "_jupyter_name", + "_jupyter_path", + "_jupyter_root", + "_kaggle", + "_live_policy_rate_limit", + "_live_policy_wait_time", + "_log_level", + "_network_buffer", + "_noop", + "_notebook", + "_offline", + "_sync", + "_os", + "_platform", + "_proxies", + "_python", + "_runqueue_item_id", + "_require_legacy_service", + "_save_requirements", + "_service_transport", + "_service_wait", + "_shared", + "_start_datetime", + "_start_time", + "_stats_pid", + "_stats_sampling_interval", + "_stats_sample_rate_seconds", + "_stats_samples_to_average", + "_stats_join_assets", + "_stats_neuron_monitor_config_path", + "_stats_open_metrics_endpoints", + "_stats_open_metrics_filters", + "_stats_disk_paths", + "_stats_buffer_size", + "_tmp_code_dir", + "_tracelog", + "_unsaved_keys", + "_windows", + "allow_val_change", + "anonymous", + "api_key", + "azure_account_url_to_access_key", + "base_url", + "code_dir", + "colab_url", + "config_paths", + "console", + "console_multipart", + "credentials_file", + "deployment", + "disable_code", + "disable_git", + "disable_hints", + "disable_job_creation", + "disabled", + "docker", + "email", + "entity", + "files_dir", + "force", + "fork_from", + "resume_from", + "git_commit", + "git_remote", + "git_remote_url", + "git_root", + "heartbeat_seconds", + "host", + "http_proxy", + "https_proxy", + "identity_token_file", + "ignore_globs", + "init_timeout", + "is_local", + "job_name", + "job_source", + "label_disable", + "launch", + "launch_config_path", + "log_dir", + "log_internal", + "log_symlink_internal", + "log_symlink_user", + "log_user", + "login_timeout", + "mode", + "notebook_name", + "program", + "program_abspath", + "program_relpath", + "project", + "project_url", + "quiet", + "reinit", + "relogin", + "resume", + "resume_fname", + "resumed", + "root_dir", + "run_group", + "run_id", + "run_job_type", + "run_mode", + "run_name", + "run_notes", + "run_tags", + "run_url", + "sagemaker_disable", + "save_code", + "settings_system", + "settings_workspace", + "show_colors", + "show_emoji", + "show_errors", + "show_info", + "show_warnings", + "silent", + "start_method", + "strict", + "summary_errors", + "summary_timeout", + "summary_warnings", + "sweep_id", + "sweep_param_path", + "sweep_url", + "symlink", + "sync_dir", + "sync_file", + "sync_symlink_latest", + "table_raise_on_max_row_limit_exceeded", + "timespec", + "tmp_dir", + "username", + "wandb_dir", +] + +SETTINGS_TOPOLOGICALLY_SORTED: Final[Tuple[_Setting, ...]] = ( + "_service_wait", + "_stats_sample_rate_seconds", + "_stats_samples_to_average", + "_stats_sampling_interval", + "anonymous", + "api_key", + "base_url", + "console", + "job_source", + "mode", + "project", + "run_id", + "start_method", + "_aws_lambda", + "program", + "_code_path_local", + "_colab", + "_disable_machine_info", + "_disable_meta", + "_disable_stats", + "_network_buffer", + "_flow_control_disabled", + "_flow_control_custom", + "_ipython", + "_jupyter", + "_kaggle", + "_noop", + "_notebook", + "disabled", + "_offline", + "_shared", + "_stats_neuron_monitor_config_path", + "run_mode", + "_start_datetime", + "timespec", + "root_dir", + "wandb_dir", + "tmp_dir", + "_tmp_code_dir", + "_windows", + "colab_url", + "is_local", + "deployment", + "disable_code", + "disable_git", + "disable_job_creation", + "files_dir", + "_proxies", + "http_proxy", + "https_proxy", + "log_dir", + "log_internal", + "log_symlink_internal", + "log_symlink_user", + "log_user", + "project_url", + "resume_fname", + "run_url", + "settings_system", + "settings_workspace", + "sweep_url", + "sync_dir", + "sync_file", + "sync_symlink_latest", +) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_wburls_generate.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_wburls_generate.py new file mode 100644 index 0000000000000000000000000000000000000000..2d246ef0ad85eef152e16cc7bffa59116a7e698c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/_wburls_generate.py @@ -0,0 +1,25 @@ +from wburls import wburls # type: ignore + +template = """ +import sys + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + + +URLS = Literal[ + $literal_list +] +""" + + +def generate() -> None: + urls = wburls._get_urls() + literal_list = ", ".join([f"{key!r}" for key in urls]) + print(template.replace("$literal_list", literal_list)) + + +if __name__ == "__main__": + generate() diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/apikey.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/apikey.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d7b34e3e609da491452d95f8e641959b6827c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/apikey.py @@ -0,0 +1,273 @@ +"""apikey util.""" + +import os +import platform +import stat +import sys +import textwrap +from functools import partial +from typing import TYPE_CHECKING, Callable, Dict, Optional, Union +from urllib.parse import urlparse + +# import Literal +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +import click +from requests.utils import NETRC_FILES, get_netrc_auth + +import wandb +from wandb.apis import InternalApi +from wandb.errors import term +from wandb.util import _is_databricks, isatty, prompt_choices + +from .wburls import wburls + +LOGIN_CHOICE_ANON = "Private W&B dashboard, no account required" +LOGIN_CHOICE_NEW = "Create a W&B account" +LOGIN_CHOICE_EXISTS = "Use an existing W&B account" +LOGIN_CHOICE_DRYRUN = "Don't visualize my results" +LOGIN_CHOICE_NOTTY = "Unconfigured" +LOGIN_CHOICES = [ + LOGIN_CHOICE_ANON, + LOGIN_CHOICE_NEW, + LOGIN_CHOICE_EXISTS, + LOGIN_CHOICE_DRYRUN, +] + +Mode = Literal["allow", "must", "never", "false", "true"] + +if TYPE_CHECKING: + from wandb.sdk.wandb_settings import Settings + + +getpass = partial(click.prompt, hide_input=True, err=True) + + +def _fixup_anon_mode(default: Optional[Mode]) -> Optional[Mode]: + # Convert weird anonymode values from legacy settings files + # into one of our expected values. + anon_mode = default or "never" + mapping: Dict[Mode, Mode] = {"true": "allow", "false": "never"} + return mapping.get(anon_mode, anon_mode) + + +def get_netrc_file_path() -> str: + """Return the path to the netrc file.""" + # if the NETRC environment variable is set, use that + netrc_file = os.environ.get("NETRC") + if netrc_file: + return os.path.expanduser(netrc_file) + + # if either .netrc or _netrc exists in the home directory, use that + for netrc_file in NETRC_FILES: + home_dir = os.path.expanduser("~") + if os.path.exists(os.path.join(home_dir, netrc_file)): + return os.path.join(home_dir, netrc_file) + + # otherwise, use .netrc on non-Windows platforms and _netrc on Windows + netrc_file = ".netrc" if platform.system() != "Windows" else "_netrc" + + return os.path.join(os.path.expanduser("~"), netrc_file) + + +def prompt_api_key( # noqa: C901 + settings: "Settings", + api: Optional[InternalApi] = None, + input_callback: Optional[Callable] = None, + browser_callback: Optional[Callable] = None, + no_offline: bool = False, + no_create: bool = False, + local: bool = False, +) -> Union[str, bool, None]: + """Prompt for api key. + + Returns: + str - if key is configured + None - if dryrun is selected + False - if unconfigured (notty) + """ + input_callback = input_callback or getpass + log_string = term.LOG_STRING + api = api or InternalApi(settings) + anon_mode = _fixup_anon_mode(settings.anonymous) # type: ignore + jupyter = settings._jupyter or False + app_url = api.app_url + + choices = [choice for choice in LOGIN_CHOICES] + if anon_mode == "never": + # Omit LOGIN_CHOICE_ANON as a choice if the env var is set to never + choices.remove(LOGIN_CHOICE_ANON) + if (jupyter and not settings.login_timeout) or no_offline: + choices.remove(LOGIN_CHOICE_DRYRUN) + if (jupyter and not settings.login_timeout) or no_create: + choices.remove(LOGIN_CHOICE_NEW) + + if jupyter and "google.colab" in sys.modules: + log_string = term.LOG_STRING_NOCOLOR + key = wandb.jupyter.attempt_colab_login(app_url) # type: ignore + if key is not None: + write_key(settings, key, api=api) + return key # type: ignore + + if anon_mode == "must": + result = LOGIN_CHOICE_ANON + # If we're not in an interactive environment, default to dry-run. + elif ( + not jupyter and (not isatty(sys.stdout) or not isatty(sys.stdin)) + ) or _is_databricks(): + result = LOGIN_CHOICE_NOTTY + elif local: + result = LOGIN_CHOICE_EXISTS + elif len(choices) == 1: + result = choices[0] + else: + result = prompt_choices( + choices, input_timeout=settings.login_timeout, jupyter=jupyter + ) + + api_ask = ( + f"{log_string}: Paste an API key from your profile and hit enter, " + "or press ctrl+c to quit" + ) + if result == LOGIN_CHOICE_ANON: + key = api.create_anonymous_api_key() + + write_key(settings, key, api=api, anonymous=True) + return key # type: ignore + elif result == LOGIN_CHOICE_NEW: + key = browser_callback(signup=True) if browser_callback else None + + if not key: + wandb.termlog(f"Create an account here: {app_url}/authorize?signup=true") + key = input_callback(api_ask).strip() + + write_key(settings, key, api=api) + return key # type: ignore + elif result == LOGIN_CHOICE_EXISTS: + key = browser_callback() if browser_callback else None + + if not key: + if not (settings.is_local or local): + host = app_url + for prefix in ("http://", "https://"): + if app_url.startswith(prefix): + host = app_url[len(prefix) :] + wandb.termlog( + f"Logging into {host}. (Learn how to deploy a W&B server locally: {wburls.get('wandb_server')})" + ) + wandb.termlog( + f"You can find your API key in your browser here: {app_url}/authorize" + ) + key = input_callback(api_ask).strip() + write_key(settings, key, api=api) + return key # type: ignore + elif result == LOGIN_CHOICE_NOTTY: + # TODO: Needs refactor as this needs to be handled by caller + return False + elif result == LOGIN_CHOICE_DRYRUN: + return None + else: + # Jupyter environments don't have a tty, but we can still try logging in using + # the browser callback if one is supplied. + key, anonymous = ( + browser_callback() if jupyter and browser_callback else (None, False) + ) + + write_key(settings, key, api=api) + return key # type: ignore + + +def write_netrc(host: str, entity: str, key: str) -> Optional[bool]: + """Add our host and key to .netrc.""" + _, key_suffix = key.split("-", 1) if "-" in key else ("", key) + if len(key_suffix) != 40: + wandb.termerror( + "API-key must be exactly 40 characters long: {} ({} chars)".format( + key_suffix, len(key_suffix) + ) + ) + return None + try: + normalized_host = urlparse(host).netloc.split(":")[0] + netrc_path = get_netrc_file_path() + wandb.termlog( + f"Appending key for {normalized_host} to your netrc file: {netrc_path}" + ) + machine_line = f"machine {normalized_host}" + orig_lines = None + try: + with open(netrc_path) as f: + orig_lines = f.read().strip().split("\n") + except OSError: + pass + with open(netrc_path, "w") as f: + if orig_lines: + # delete this machine from the file if it's already there. + skip = 0 + for line in orig_lines: + # we fix invalid netrc files with an empty host that we wrote before + # verifying host... + if line == "machine " or machine_line in line: + skip = 2 + elif skip: + skip -= 1 + else: + f.write("{}\n".format(line)) + f.write( + textwrap.dedent( + """\ + machine {host} + login {entity} + password {key} + """ + ).format(host=normalized_host, entity=entity, key=key) + ) + os.chmod(netrc_path, stat.S_IRUSR | stat.S_IWUSR) + return True + except OSError: + wandb.termerror(f"Unable to read {netrc_path}") + return None + + +def write_key( + settings: "Settings", + key: Optional[str], + api: Optional["InternalApi"] = None, + anonymous: bool = False, +) -> None: + if not key: + raise ValueError("No API key specified.") + + # TODO(jhr): api shouldn't be optional or it shouldn't be passed, clean up callers + api = api or InternalApi() + + # Normal API keys are 40-character hex strings. On-prem API keys have a + # variable-length prefix, a dash, then the 40-char string. + _, suffix = key.split("-", 1) if "-" in key else ("", key) + + if len(suffix) != 40: + raise ValueError( + "API key must be 40 characters long, yours was {}".format(len(key)) + ) + + if anonymous: + api.set_setting("anonymous", "true", globally=True, persist=True) + else: + api.clear_setting("anonymous", globally=True, persist=True) + + write_netrc(settings.base_url, "user", key) + + +def api_key(settings: Optional["Settings"] = None) -> Optional[str]: + if settings is None: + settings = wandb.setup().settings # type: ignore + assert settings is not None + if settings.api_key: + return settings.api_key + auth = get_netrc_auth(settings.base_url) + if auth: + return auth[-1] + return None diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/capped_dict.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/capped_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..5e162109926cf7c115bf835323de4d292f8725e8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/capped_dict.py @@ -0,0 +1,26 @@ +import collections +from typing import Any, Optional + + +class CappedDict(collections.OrderedDict): + default_max_size = 50 + + def __init__(self, max_size: Optional[int] = None) -> None: + self.max_size = max_size or self.default_max_size + super().__init__() + + def __setitem__(self, key: str, val: Any) -> None: + if key not in self: + max_size = self.max_size - 1 + self._prune_dict(max_size) + super().__setitem__(key, val) + + def update(self, **kwargs: Any) -> None: # type: ignore[override] + super().update(**kwargs) + self._prune_dict(self.max_size) + + def _prune_dict(self, max_size: int) -> None: + if len(self) >= max_size: + diff = len(self) - max_size + for k in list(self.keys())[:diff]: + del self[k] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/config_util.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/config_util.py new file mode 100644 index 0000000000000000000000000000000000000000..92bb098f71ca99cb9a14977661a8d505ee2439d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/config_util.py @@ -0,0 +1,101 @@ +import json +import logging +import os +from typing import Any, Dict, Optional + +import yaml + +import wandb +from wandb.errors import Error +from wandb.util import load_yaml + +from . import filesystem + +logger = logging.getLogger("wandb") + + +class ConfigError(Error): + pass + + +def dict_from_proto_list(obj_list): + d = dict() + for item in obj_list: + d[item.key] = dict(desc=None, value=json.loads(item.value_json)) + return d + + +def dict_strip_value_dict(config_dict): + d = dict() + for k, v in config_dict.items(): + d[k] = v["value"] + return d + + +def dict_no_value_from_proto_list(obj_list): + d = dict() + for item in obj_list: + possible_dict = json.loads(item.value_json) + if not isinstance(possible_dict, dict) or "value" not in possible_dict: + continue + d[item.key] = possible_dict["value"] + + return d + + +# TODO(jhr): these functions should go away once we merge jobspec PR +def save_config_file_from_dict(config_filename, config_dict): + s = b"wandb_version: 1" + if config_dict: # adding an empty dictionary here causes a parse error + s += b"\n\n" + yaml.dump( + config_dict, + Dumper=yaml.SafeDumper, + default_flow_style=False, + allow_unicode=True, + encoding="utf-8", + sort_keys=False, + ) + data = s.decode("utf-8") + filesystem.mkdir_exists_ok(os.path.dirname(config_filename)) + with open(config_filename, "w") as conf_file: + conf_file.write(data) + + +def dict_from_config_file( + filename: str, must_exist: bool = False +) -> Optional[Dict[str, Any]]: + if not os.path.exists(filename): + if must_exist: + raise ConfigError("config file {} doesn't exist".format(filename)) + logger.debug("no default config file found in {}".format(filename)) + return None + try: + conf_file = open(filename) + except OSError: + raise ConfigError("Couldn't read config file: {}".format(filename)) + try: + loaded = load_yaml(conf_file) + except yaml.parser.ParserError: + raise ConfigError("Invalid YAML in config yaml") + if loaded is None: + wandb.termwarn( + "Found an empty default config file (config-defaults.yaml). Proceeding with no defaults." + ) + return None + config_version = loaded.pop("wandb_version", None) + if config_version is not None and config_version != 1: + raise ConfigError("Unknown config version") + data = dict() + for k, v in loaded.items(): + data[k] = v["value"] + return data + + +def merge_dicts(dest: dict, src: dict) -> dict: + """Recursively merge two dictionaries. Similar to Lodash's _.merge().""" + for key, value in src.items(): + if isinstance(value, dict) and key in dest and isinstance(dest[key], dict): + merge_dicts(dest[key], value) + else: + dest[key] = value + return dest diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/credentials.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/credentials.py new file mode 100644 index 0000000000000000000000000000000000000000..422cf0efe6e219c9edd38f8edd7a8d23b9c89d9d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/credentials.py @@ -0,0 +1,141 @@ +import json +import os +from datetime import datetime, timedelta +from pathlib import Path + +import requests.utils + +from wandb.errors import AuthenticationError + +DEFAULT_WANDB_CREDENTIALS_FILE = Path( + os.path.expanduser("~/.config/wandb/credentials.json") +) + +_expires_at_fmt = "%Y-%m-%d %H:%M:%S" + + +def access_token(base_url: str, token_file: Path, credentials_file: Path) -> str: + """Retrieve an access token from the credentials file. + + If no access token exists, create a new one by exchanging the identity + token from the token file, and save it to the credentials file. + + Args: + base_url (str): The base URL of the server + token_file (pathlib.Path): The path to the file containing the + identity token + credentials_file (pathlib.Path): The path to file used to save + temporary access tokens + + Returns: + str: The access token + """ + if not credentials_file.exists(): + _write_credentials_file(base_url, token_file, credentials_file) + + data = _fetch_credentials(base_url, token_file, credentials_file) + return data["access_token"] + + +def _write_credentials_file(base_url: str, token_file: Path, credentials_file: Path): + """Obtain an access token from the server and write it to the credentials file. + + Args: + base_url (str): The base URL of the server + token_file (pathlib.Path): The path to the file containing the + identity token + credentials_file (pathlib.Path): The path to file used to save + temporary access tokens + """ + credentials = _create_access_token(base_url, token_file) + data = {"credentials": {base_url: credentials}} + with open(credentials_file, "w") as file: + json.dump(data, file, indent=4) + + # Set file permissions to be read/write by the owner only + os.chmod(credentials_file, 0o600) + + +def _fetch_credentials(base_url: str, token_file: Path, credentials_file: Path) -> dict: + """Fetch the access token from the credentials file. + + If the access token has expired, fetch a new one from the server and save it + to the credentials file. + + Args: + base_url (str): The base URL of the server + token_file (pathlib.Path): The path to the file containing the + identity token + credentials_file (pathlib.Path): The path to file used to save + temporary access tokens + + Returns: + dict: The credentials including the access token. + """ + creds = {} + with open(credentials_file) as file: + data = json.load(file) + if "credentials" not in data: + data["credentials"] = {} + if base_url in data["credentials"]: + creds = data["credentials"][base_url] + + expires_at = datetime.utcnow() + if "expires_at" in creds: + expires_at = datetime.strptime(creds["expires_at"], _expires_at_fmt) + + if expires_at <= datetime.utcnow(): + creds = _create_access_token(base_url, token_file) + with open(credentials_file, "w") as file: + data["credentials"][base_url] = creds + json.dump(data, file, indent=4) + + return creds + + +def _create_access_token(base_url: str, token_file: Path) -> dict: + """Exchange an identity token for an access token from the server. + + Args: + base_url (str): The base URL of the server. + token_file (pathlib.Path): The path to the file containing the + identity token + + Returns: + dict: The access token and its expiration. + + Raises: + FileNotFoundError: If the token file is not found. + OSError: If there is an issue reading the token file. + AuthenticationError: If the server fails to provide an access token. + """ + try: + with open(token_file) as file: + token = file.read().strip() + except FileNotFoundError as e: + raise FileNotFoundError(f"Identity token file not found: {token_file}") from e + except OSError as e: + raise OSError( + f"Failed to read the identity token from file: {token_file}" + ) from e + + url = f"{base_url}/oidc/token" + data = { + "grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", + "assertion": token, + } + headers = {"Content-Type": "application/x-www-form-urlencoded"} + + response = requests.post(url, data=data, headers=headers) + + if response.status_code != 200: + raise AuthenticationError( + f"Failed to retrieve access token: {response.status_code}, {response.text}" + ) + + resp_json = response.json() + expires_at = datetime.utcnow() + timedelta(seconds=float(resp_json["expires_in"])) + resp_json["expires_at"] = expires_at.strftime(_expires_at_fmt) + del resp_json["expires_in"] + + return resp_json diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/deprecate.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/deprecate.py new file mode 100644 index 0000000000000000000000000000000000000000..14afb06206855784d0b00e0aa78ca5945d9915bb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/deprecate.py @@ -0,0 +1,42 @@ +__all__ = ["deprecate", "Deprecated"] + +from typing import TYPE_CHECKING, Optional, Tuple + +import wandb +from wandb.proto.wandb_deprecated import DEPRECATED_FEATURES, Deprecated +from wandb.proto.wandb_telemetry_pb2 import Deprecated as TelemetryDeprecated + +# avoid cycle, use string type reference +if TYPE_CHECKING: + from .. import wandb_run + + +deprecated_field_names: Tuple[str, ...] = tuple( + str(v) for k, v in Deprecated.__dict__.items() if not k.startswith("_") +) + + +def deprecate( + field_name: DEPRECATED_FEATURES, + warning_message: str, + run: Optional["wandb_run.Run"] = None, +) -> None: + """Warn the user that a feature has been deprecated. + + Also stores the information about the event in telemetry. + + Args: + field_name: The name of the feature that has been deprecated. + Defined in wandb/proto/wandb_telemetry.proto::Deprecated + warning_message: The message to display to the user. + run: The run to whose telemetry the event will be added. + """ + known_fields = TelemetryDeprecated.DESCRIPTOR.fields_by_name.keys() + if field_name not in known_fields: + raise ValueError( + f"Unknown field name: {field_name}. Known fields: {known_fields}" + ) + _run = run or wandb.run + with wandb.wandb_lib.telemetry.context(run=_run) as tel: # type: ignore[attr-defined] + setattr(tel.deprecated, field_name, True) + wandb.termwarn(warning_message, repeat=False) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/disabled.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/disabled.py new file mode 100644 index 0000000000000000000000000000000000000000..b995ce0170c51834fdc9cc187393ba32facba69a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/disabled.py @@ -0,0 +1,29 @@ +from typing import Any + +from wandb.sdk.lib import deprecate + + +class SummaryDisabled(dict): + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + def __getattr__(self, key): + return self[key] + + def __getitem__(self, key): + val = dict.__getitem__(self, key) + if isinstance(val, dict) and not isinstance(val, SummaryDisabled): + val = SummaryDisabled(val) + self[key] = val + return val + + +class RunDisabled: + """Compatibility class for integrations that explicitly check for wandb.RunDisabled.""" + + def __getattr__(self, name: str) -> Any: + deprecate.deprecate( + field_name=deprecate.Deprecated.run_disabled, + warning_message="RunDisabled is deprecated and is a no-op. " + '`wandb.init(mode="disabled")` now returns and instance of `wandb.sdk.wandb_run.Run`.', + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..aa747495299a646041f7deaeb72c1a9bcc050660 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/exit_hooks.py @@ -0,0 +1,54 @@ +import sys +import traceback +from types import TracebackType +from typing import TYPE_CHECKING, Optional, Type + +import wandb +from wandb.errors import Error + +if TYPE_CHECKING: + from typing import NoReturn + + +class ExitHooks: + exception: Optional[BaseException] = None + + def __init__(self) -> None: + self.exit_code = 0 + self.exception = None + + def hook(self) -> None: + self._orig_exit = sys.exit + sys.exit = self.exit + self._orig_excepthook = ( + sys.excepthook + if sys.excepthook + != sys.__excepthook__ # respect hooks by other libraries like pdb + else None + ) + sys.excepthook = self.exc_handler # type: ignore + + def exit(self, code: object = 0) -> "NoReturn": + orig_code = code + code = code if code is not None else 0 + code = code if isinstance(code, int) else 1 + self.exit_code = code + self._orig_exit(orig_code) # type: ignore + + def was_ctrl_c(self) -> bool: + return isinstance(self.exception, KeyboardInterrupt) + + def exc_handler( + self, exc_type: Type[BaseException], exc: BaseException, tb: TracebackType + ) -> None: + self.exit_code = 1 + self.exception = exc + if issubclass(exc_type, Error): + wandb.termerror(str(exc), repeat=False) + + if self.was_ctrl_c(): + self.exit_code = 255 + + traceback.print_exception(exc_type, exc, tb) + if self._orig_excepthook: + self._orig_excepthook(exc_type, exc, tb) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/filenames.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/filenames.py new file mode 100644 index 0000000000000000000000000000000000000000..c272b7a2c3337ddb6d91ce03ed800e84affcc5ff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/filenames.py @@ -0,0 +1,64 @@ +import os +from typing import Callable, Generator, Union + +WANDB_DIRS = ("wandb", ".wandb") + +CONFIG_FNAME = "config.yaml" +OUTPUT_FNAME = "output.log" +DIFF_FNAME = "diff.patch" +SUMMARY_FNAME = "wandb-summary.json" +METADATA_FNAME = "wandb-metadata.json" +REQUIREMENTS_FNAME = "requirements.txt" +HISTORY_FNAME = "wandb-history.jsonl" +EVENTS_FNAME = "wandb-events.jsonl" +JOBSPEC_FNAME = "wandb-jobspec.json" +CONDA_ENVIRONMENTS_FNAME = "conda-environment.yaml" + + +def is_wandb_file(name: str) -> bool: + return ( + name.startswith("wandb") + or name == METADATA_FNAME + or name == CONFIG_FNAME + or name == REQUIREMENTS_FNAME + or name == OUTPUT_FNAME + or name == DIFF_FNAME + or name == CONDA_ENVIRONMENTS_FNAME + ) + + +def filtered_dir( + root: str, + include_fn: Union[Callable[[str, str], bool], Callable[[str], bool]], + exclude_fn: Union[Callable[[str, str], bool], Callable[[str], bool]], +) -> Generator[str, None, None]: + """Simple generator to walk a directory.""" + import inspect + + # compatibility with old API, which didn't pass root + def _include_fn(path: str, root: str) -> bool: + return ( + include_fn(path, root) # type: ignore + if len(inspect.signature(include_fn).parameters) == 2 + else include_fn(path) # type: ignore + ) + + def _exclude_fn(path: str, root: str) -> bool: + return ( + exclude_fn(path, root) # type: ignore + if len(inspect.signature(exclude_fn).parameters) == 2 + else exclude_fn(path) # type: ignore + ) + + for dirpath, _, files in os.walk(root): + for fname in files: + file_path = os.path.join(dirpath, fname) + if _include_fn(file_path, root) and not _exclude_fn(file_path, root): + yield file_path + + +def exclude_wandb_fn(path: str, root: str) -> bool: + return any( + os.path.relpath(path, root).startswith(wandb_dir + os.sep) + for wandb_dir in WANDB_DIRS + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/filesystem.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..7c78e32349a78d0c8c40f4eeb72cb311bd531150 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/filesystem.py @@ -0,0 +1,372 @@ +import contextlib +import ctypes +import errno +import logging +import os +import platform +import re +import shutil +import tempfile +import threading +from pathlib import Path +from typing import IO, Any, BinaryIO, Generator, Optional + +from wandb.sdk.lib.paths import StrPath + +logger = logging.getLogger(__name__) + +# https://en.wikipedia.org/wiki/Filename#Comparison_of_filename_limitations +PROBLEMATIC_PATH_CHARS = "".join(chr(i) for i in range(0, 32)) + ':"*<>?|' + + +def mkdir_exists_ok(dir_name: StrPath) -> None: + """Create `dir_name` and any parent directories if they don't exist. + + Raises: + FileExistsError: if `dir_name` exists and is not a directory. + PermissionError: if `dir_name` is not writable. + """ + try: + os.makedirs(dir_name, exist_ok=True) + except FileExistsError as e: + raise FileExistsError(f"{dir_name!s} exists and is not a directory") from e + except PermissionError as e: + raise PermissionError(f"{dir_name!s} is not writable") from e + + +def path_fallbacks(path: StrPath) -> Generator[str, None, None]: + """Yield variations of `path` that may exist on the filesystem. + + Return a sequence of paths that should be checked in order for existence or + create-ability. Essentially, keep replacing "suspect" characters until we run out. + """ + path = str(path) + root, tail = os.path.splitdrive(path) + yield os.path.join(root, tail) + for char in PROBLEMATIC_PATH_CHARS: + if char in tail: + tail = tail.replace(char, "-") + yield os.path.join(root, tail) + + +def mkdir_allow_fallback(dir_name: StrPath) -> StrPath: + """Create `dir_name`, removing invalid path characters if necessary. + + Returns: + The path to the created directory, which may not be the original path. + """ + for new_name in path_fallbacks(dir_name): + try: + os.makedirs(new_name, exist_ok=True) + if Path(new_name) != Path(dir_name): + logger.warning(f"Creating '{new_name}' instead of '{dir_name}'") + return Path(new_name) if isinstance(dir_name, Path) else new_name + except (ValueError, NotADirectoryError): + pass + except OSError as e: + if e.errno != 22: + raise + + raise OSError(f"Unable to create directory '{dir_name}'") + + +def files_in(path: StrPath) -> Generator[os.DirEntry, None, None]: + """Yield a directory entry for each file under a given path (recursive).""" + if not os.path.isdir(path): + return + for entry in os.scandir(path): + if entry.is_dir(): + yield from files_in(entry.path) + else: + yield entry + + +class WriteSerializingFile: + """Wrapper for a file object that serializes writes.""" + + def __init__(self, f: BinaryIO) -> None: + self.lock = threading.Lock() + self.f = f + + def write(self, *args, **kargs) -> None: # type: ignore + self.lock.acquire() + try: + self.f.write(*args, **kargs) + self.f.flush() + finally: + self.lock.release() + + def close(self) -> None: + self.lock.acquire() # wait for pending writes + try: + self.f.close() + finally: + self.lock.release() + + +class CRDedupedFile(WriteSerializingFile): + def __init__(self, f: BinaryIO) -> None: + super().__init__(f=f) + self._buff = b"" + + def write(self, data) -> None: # type: ignore + lines = re.split(b"\r\n|\n", data) + ret = [] # type: ignore + for line in lines: + if line[:1] == b"\r": + if ret: + ret.pop() + elif self._buff: + self._buff = b"" + line = line.split(b"\r")[-1] + if line: + ret.append(line) + if self._buff: + ret.insert(0, self._buff) + if ret: + self._buff = ret.pop() + super().write(b"\n".join(ret) + b"\n") + + def close(self) -> None: + if self._buff: + super().write(self._buff) + super().close() + + +def copy_or_overwrite_changed(source_path: StrPath, target_path: StrPath) -> StrPath: + """Copy source_path to target_path, unless it already exists with the same mtime. + + We liberally add write permissions to deal with the case of multiple users needing + to share the same cache or run directory. + + Args: + source_path: The path to the file to copy. + target_path: The path to copy the file to. + + Returns: + The path to the copied file (which may be different from target_path). + """ + return_type = type(target_path) + + target_path = system_preferred_path(target_path, warn=True) + + need_copy = ( + not os.path.isfile(target_path) + or os.stat(source_path).st_mtime != os.stat(target_path).st_mtime + ) + + permissions_plus_write = os.stat(source_path).st_mode + if need_copy: + dir_name, file_name = os.path.split(target_path) + target_path = os.path.join(mkdir_allow_fallback(dir_name), file_name) + try: + # Use copy2 to preserve file metadata (including modified time). + shutil.copy2(source_path, target_path) + except PermissionError: + # If the file is read-only try to make it writable. + try: + os.chmod(target_path, permissions_plus_write) + shutil.copy2(source_path, target_path) + except PermissionError as e: + raise PermissionError("Unable to overwrite '{target_path!s}'") from e + # Prevent future permissions issues by universal write permissions now. + os.chmod(target_path, permissions_plus_write) + + return return_type(target_path) # type: ignore # 'os.PathLike' is abstract. + + +@contextlib.contextmanager +def safe_open( + path: StrPath, mode: str = "r", *args: Any, **kwargs: Any +) -> Generator[IO, None, None]: + """Open a file, ensuring any changes only apply atomically after close. + + This context manager ensures that even unsuccessful writes will not leave a "dirty" + file or overwrite good data, and that all temp data is cleaned up. + + The semantics and behavior are intended to be nearly identical to the built-in + open() function. Differences: + - It creates any parent directories that don't exist, rather than raising. + - In 'x' mode, it checks at the beginning AND end of the write and fails if the + file exists either time. + """ + path = Path(path).resolve() + path.parent.mkdir(parents=True, exist_ok=True) + + if "x" in mode and path.exists(): + raise FileExistsError(f"{path!s} already exists") + + if "r" in mode and "+" not in mode: + # This is read-only, so we can just open the original file. + # TODO (hugh): create a reflink and read from that. + with path.open(mode, *args, **kwargs) as f: + yield f + return + + with tempfile.TemporaryDirectory(dir=path.parent) as tmp_dir: + tmp_path = Path(tmp_dir) / path.name + + if ("r" in mode or "a" in mode) and path.exists(): + # We need to copy the original file in order to support reads and appends. + # TODO (hugh): use reflinks to avoid the copy on platforms that support it. + shutil.copy2(path, tmp_path) + + with tmp_path.open(mode, *args, **kwargs) as f: + yield f + f.flush() + os.fsync(f.fileno()) + + if "x" in mode: + # Ensure that if another process has beaten us to writing the file we raise + # rather than overwrite. os.link() atomically creates a hard link to the + # target file and will raise FileExistsError if the target already exists. + os.link(tmp_path, path) + os.unlink(tmp_path) + else: + tmp_path.replace(path) + + +def safe_copy(source_path: StrPath, target_path: StrPath) -> StrPath: + """Copy a file atomically. + + Copying is not usually atomic, and on operating systems that allow multiple + writers to the same file, the result can get corrupted. If two writers copy + to the same file, the contents can become interleaved. + + We mitigate the issue somewhat by copying to a temporary file first and + then renaming. Renaming is atomic: if process 1 renames file A to X and + process 2 renames file B to X, then X will either contain the contents + of A or the contents of B, not some mixture of both. + """ + # TODO (hugh): check that there is enough free space. + output_path = Path(target_path).resolve() + output_path.parent.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory(dir=output_path.parent) as tmp_dir: + tmp_path = (Path(tmp_dir) / Path(source_path).name).with_suffix(".tmp") + shutil.copy2(source_path, tmp_path) + tmp_path.replace(output_path) + return target_path + + +def _reflink_linux(existing_path: Path, new_path: Path) -> None: + """Create a reflink to `existing_path` at `new_path` on Linux.""" + import fcntl + + FICLONE = 0x40049409 # magic number from # noqa: N806 + with open(existing_path, "rb") as t_f, open(new_path, "wb+") as l_f: + fcntl.ioctl(l_f.fileno(), FICLONE, t_f.fileno()) + + +def _reflink_macos(existing_path: Path, new_path: Path) -> None: + try: + clib = ctypes.CDLL("libc.dylib", use_errno=True) + except (FileNotFoundError, OSError) as e: + if ctypes.get_errno() != errno.ENOENT and not isinstance(e, FileNotFoundError): + raise + # Before macOS 11 ( None: + """Create a reflink to `existing_path` at `new_path`. + + A reflink (reflective link) is a copy-on-write reference to a file. Once linked, the + file and link are both "real" files (not symbolic or hard links) and each can be + modified independently without affecting the other; however, they share the same + underlying data blocks on disk so until one is modified they are "zero-cost" copies. + + Reflinks have all the functionality of copies, so we should use them wherever they + are supported if we would otherwise copy a file. (This is not particularly radical-- + GNU `cp` defaults to `reflink=auto`, using it whenever available) However, support + for them is limited to a small number of filesystems. They should work on: + - Linux with a Btrfs or XFS filesystem (NOT ext4) + - macOS 10.13 or later with an APFS filesystem (called clone files) + + Reflinks are also supported on Solaris and Windows with ReFSv2, but we haven't + implemented support for them. + + Like hard links, a reflink can only be created on the same filesystem as the target. + """ + if platform.system() == "Linux": + link_fn = _reflink_linux + elif platform.system() == "Darwin": + link_fn = _reflink_macos + else: + raise OSError( + errno.ENOTSUP, f"reflinks are not supported on {platform.system()}" + ) + + new_path = Path(new_path).resolve() + existing_path = Path(existing_path).resolve() + if new_path.exists(): + if not overwrite: + raise FileExistsError(f"{new_path} already exists") + logger.warning(f"Overwriting existing file {new_path}.") + new_path.unlink() + + # Create any missing parent directories. + new_path.parent.mkdir(parents=True, exist_ok=True) + + try: + link_fn(existing_path, new_path) + except OSError as e: + base_msg = f"failed to create reflink from {existing_path} to {new_path}." + if e.errno in (errno.EPERM, errno.EACCES): + raise PermissionError(f"Insufficient permissions; {base_msg}") from e + if e.errno == errno.ENOENT: + raise FileNotFoundError(f"File not found; {base_msg}") from e + if e.errno == errno.EXDEV: + raise ValueError(f"Cannot link across filesystems; {base_msg}") from e + if e.errno == errno.EISDIR: + raise IsADirectoryError(f"Cannot reflink a directory; {base_msg}") from e + if e.errno in (errno.EOPNOTSUPP, errno.ENOTSUP): + raise OSError( + errno.ENOTSUP, + f"Filesystem does not support reflinks; {base_msg}", + ) from e + if e.errno == errno.EINVAL: + raise ValueError(f"Cannot link file ranges; {base_msg}") from e + raise + + +def check_exists(path: StrPath) -> Optional[StrPath]: + """Look for variations of `path` and return the first found. + + This exists to support former behavior around system-dependent paths; we used to use + ':' in Artifact paths unless we were on Windows, but this has issues when e.g. a + Linux machine is accessing an NTFS filesystem; we might need to look for the + alternate path. This checks all the possible directories we would consider creating. + """ + for dest in path_fallbacks(path): + if os.path.exists(dest): + return Path(dest) if isinstance(path, Path) else dest + return None + + +def system_preferred_path(path: StrPath, warn: bool = False) -> StrPath: + """Replace ':' with '-' in paths on Windows. + + Args: + path: The path to convert. + warn: Whether to warn if ':' is replaced. + """ + if platform.system() != "Windows": + return path + head, tail = os.path.splitdrive(path) + if warn and ":" in tail: + logger.warning(f"Replacing ':' in {tail} with '-'") + new_path = head + tail.replace(":", "-") + return Path(new_path) if isinstance(path, Path) else new_path diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/fsm.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/fsm.py new file mode 100644 index 0000000000000000000000000000000000000000..88f6bc1f8b898131c14b72a4e2b8852b02fa3ba0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/fsm.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +"""Finite state machine. + +Simple FSM implementation. + +Usage: + ```python + class A: + def on_output(self, inputs) -> None: + pass + + + class B: + def on_output(self, inputs) -> None: + pass + + + def to_b(inputs) -> bool: + return True + + + def to_a(inputs) -> bool: + return True + + + f = Fsm(states=[A(), B()], table={A: [(to_b, B)], B: [(to_a, A)]}) + f.run({"input1": 1, "input2": 2}) + ``` +""" + +import sys +from abc import abstractmethod +from dataclasses import dataclass +from typing import Callable, Dict, Generic, Optional, Sequence, Type, TypeVar, Union + +if sys.version_info >= (3, 8): + from typing import Protocol, runtime_checkable +else: + from typing_extensions import Protocol, runtime_checkable + +if sys.version_info >= (3, 10): + from typing import TypeAlias +else: + from typing_extensions import TypeAlias + +T_FsmInputs = TypeVar("T_FsmInputs", contravariant=True) +T_FsmContext = TypeVar("T_FsmContext") +T_FsmContext_cov = TypeVar("T_FsmContext_cov", covariant=True) +T_FsmContext_contra = TypeVar("T_FsmContext_contra", contravariant=True) + + +@runtime_checkable +class FsmStateCheck(Protocol[T_FsmInputs]): + @abstractmethod + def on_check(self, inputs: T_FsmInputs) -> None: ... # pragma: no cover + + +@runtime_checkable +class FsmStateOutput(Protocol[T_FsmInputs]): + @abstractmethod + def on_state(self, inputs: T_FsmInputs) -> None: ... # pragma: no cover + + +@runtime_checkable +class FsmStateEnter(Protocol[T_FsmInputs]): + @abstractmethod + def on_enter(self, inputs: T_FsmInputs) -> None: ... # pragma: no cover + + +@runtime_checkable +class FsmStateEnterWithContext(Protocol[T_FsmInputs, T_FsmContext_contra]): + @abstractmethod + def on_enter( + self, inputs: T_FsmInputs, context: T_FsmContext_contra + ) -> None: ... # pragma: no cover + + +@runtime_checkable +class FsmStateStay(Protocol[T_FsmInputs]): + @abstractmethod + def on_stay(self, inputs: T_FsmInputs) -> None: ... # pragma: no cover + + +@runtime_checkable +class FsmStateExit(Protocol[T_FsmInputs, T_FsmContext_cov]): + @abstractmethod + def on_exit(self, inputs: T_FsmInputs) -> T_FsmContext_cov: ... # pragma: no cover + + +# It would be nice if python provided optional protocol members, but it doesnt as described here: +# https://peps.python.org/pep-0544/#support-optional-protocol-members +# Until then, we can only enforce that a state at least supports one protocol interface. This +# unfortunately will not check the signature of other potential protocols. +FsmState: TypeAlias = Union[ + FsmStateCheck[T_FsmInputs], + FsmStateOutput[T_FsmInputs], + FsmStateEnter[T_FsmInputs], + FsmStateEnterWithContext[T_FsmInputs, T_FsmContext], + FsmStateStay[T_FsmInputs], + FsmStateExit[T_FsmInputs, T_FsmContext], +] + + +@dataclass +class FsmEntry(Generic[T_FsmInputs, T_FsmContext]): + condition: Callable[[T_FsmInputs], bool] + target_state: Type[FsmState[T_FsmInputs, T_FsmContext]] + action: Optional[Callable[[T_FsmInputs], None]] = None + + +FsmTableWithContext: TypeAlias = Dict[ + Type[FsmState[T_FsmInputs, T_FsmContext]], + Sequence[FsmEntry[T_FsmInputs, T_FsmContext]], +] + + +FsmTable: TypeAlias = FsmTableWithContext[T_FsmInputs, None] + + +class FsmWithContext(Generic[T_FsmInputs, T_FsmContext]): + _state_dict: Dict[Type[FsmState], FsmState] + _table: FsmTableWithContext[T_FsmInputs, T_FsmContext] + _state: FsmState[T_FsmInputs, T_FsmContext] + _states: Sequence[FsmState] + + def __init__( + self, + states: Sequence[FsmState], + table: FsmTableWithContext[T_FsmInputs, T_FsmContext], + ) -> None: + self._states = states + self._table = table + self._state_dict = {type(s): s for s in states} + self._state = self._state_dict[type(states[0])] + + def _transition( + self, + inputs: T_FsmInputs, + new_state: Type[FsmState[T_FsmInputs, T_FsmContext]], + action: Optional[Callable[[T_FsmInputs], None]], + ) -> None: + if action: + action(inputs) + + context = None + if isinstance(self._state, FsmStateExit): + context = self._state.on_exit(inputs) + + prev_state = type(self._state) + if prev_state == new_state: + if isinstance(self._state, FsmStateStay): + self._state.on_stay(inputs) + else: + self._state = self._state_dict[new_state] + if context and isinstance(self._state, FsmStateEnterWithContext): + self._state.on_enter(inputs, context=context) + elif isinstance(self._state, FsmStateEnter): + self._state.on_enter(inputs) + + def _check_transitions(self, inputs: T_FsmInputs) -> None: + for entry in self._table[type(self._state)]: + if entry.condition(inputs): + self._transition(inputs, entry.target_state, entry.action) + return + + def input(self, inputs: T_FsmInputs) -> None: + if isinstance(self._state, FsmStateCheck): + self._state.on_check(inputs) + self._check_transitions(inputs) + if isinstance(self._state, FsmStateOutput): + self._state.on_state(inputs) + + +Fsm: TypeAlias = FsmWithContext[T_FsmInputs, None] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/gitlib.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/gitlib.py new file mode 100644 index 0000000000000000000000000000000000000000..953f67083884adb018a250bd4ed54fab42857139 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/gitlib.py @@ -0,0 +1,239 @@ +import configparser +import logging +import os +from typing import TYPE_CHECKING, Any, Optional +from urllib.parse import urlparse, urlunparse + +import wandb + +try: + from git import ( # type: ignore + GitCommandError, + InvalidGitRepositoryError, + NoSuchPathError, + Repo, + ) +except ImportError: + Repo = None # type: ignore + +if TYPE_CHECKING: + from git import Repo + + +logger = logging.getLogger(__name__) + + +class GitRepo: + def __init__( + self, + root: Optional[str] = None, + remote: str = "origin", + lazy: bool = True, + remote_url: Optional[str] = None, + commit: Optional[str] = None, + ) -> None: + self.remote_name = remote if remote_url is None else None + self._root = root + self._remote_url = remote_url + self._commit = commit + self._repo = None + self._repo_initialized = False + if not lazy: + self._repo = self._init_repo() + + def _init_repo(self) -> Optional[Repo]: + self._repo_initialized = True + if Repo is None: + return None + if self.remote_name is None: + return None + try: + return Repo(self._root or os.getcwd(), search_parent_directories=True) + except FileNotFoundError: + wandb.termwarn("current working directory has been invalidated") + logger.warn("current working directory has been invalidated") + except InvalidGitRepositoryError: + logger.debug("git repository is invalid") + except NoSuchPathError: + wandb.termwarn(f"git root {self._root} does not exist") + logger.warn(f"git root {self._root} does not exist") + return None + + @property + def repo(self) -> Optional[Repo]: + if not self._repo_initialized: + self._repo = self._init_repo() + return self._repo + + @property + def auto(self) -> bool: + return self._remote_url is None + + def is_untracked(self, file_name: str) -> Optional[bool]: + if not self.repo: + return True + try: + return file_name in self.repo.untracked_files + except GitCommandError: + return None + + @property + def enabled(self) -> bool: + return bool(self.repo) + + @property + def root(self) -> Any: + if not self.repo: + return None + try: + return self.repo.git.rev_parse("--show-toplevel") + except GitCommandError as e: + # todo: collect telemetry on this + logger.error(f"git root error: {e}") + return None + + @property + def dirty(self) -> Any: + if not self.repo: + return False + try: + return self.repo.is_dirty() + except GitCommandError: + return False + + @property + def email(self) -> Optional[str]: + if not self.repo: + return None + try: + return self.repo.config_reader().get_value("user", "email") # type: ignore + except configparser.Error: + return None + + @property + def last_commit(self) -> Any: + if self._commit: + return self._commit + if not self.repo: + return None + if not self.repo.head or not self.repo.head.is_valid(): + return None + # TODO: Saw a user getting a Unicode decode error when parsing refs, + # more details on implementing a real fix in [WB-4064] + try: + if len(self.repo.refs) > 0: # type: ignore[arg-type] + return self.repo.head.commit.hexsha + else: + return self.repo.git.show_ref("--head").split(" ")[0] + except Exception: + logger.exception("Unable to find most recent commit in git") + return None + + @property + def branch(self) -> Any: + if not self.repo: + return None + return self.repo.head.ref.name + + @property + def remote(self) -> Any: + if not self.repo: + return None + try: + return self.repo.remotes[self.remote_name] # type: ignore[index] + except IndexError: + return None + + # the --submodule=diff option doesn't exist in pre-2.11 versions of git (november 2016) + # https://stackoverflow.com/questions/10757091/git-list-of-all-changed-files-including-those-in-submodules + @property + def has_submodule_diff(self) -> bool: + if not self.repo: + return False + return bool(self.repo.git.version_info >= (2, 11, 0)) + + @property + def remote_url(self) -> Any: + if self._remote_url: + return self._remote_url + if not self.remote: + return None + parsed = urlparse(self.remote.url) + hostname = parsed.hostname + if parsed.port is not None: + hostname = f"{hostname}:{parsed.port}" + if parsed.password is not None: + return urlunparse(parsed._replace(netloc=f"{parsed.username}:@{hostname}")) + return urlunparse(parsed._replace(netloc=hostname)) + + @property + def root_dir(self) -> Any: + if not self.repo: + return None + try: + return self.repo.git.rev_parse("--show-toplevel") + except GitCommandError: + return None + + def get_upstream_fork_point(self) -> Any: + """Get the most recent ancestor of HEAD that occurs on an upstream branch. + + First looks at the current branch's tracking branch, if applicable. If + that doesn't work, looks at every other branch to find the most recent + ancestor of HEAD that occurs on a tracking branch. + + Returns: + git.Commit object or None + """ + possible_relatives = [] + try: + if not self.repo: + return None + try: + active_branch = self.repo.active_branch + except (TypeError, ValueError): + logger.debug("git is in a detached head state") + return None # detached head + else: + tracking_branch = active_branch.tracking_branch() + if tracking_branch: + possible_relatives.append(tracking_branch.commit) + + if not possible_relatives: + for branch in self.repo.branches: # type: ignore[attr-defined] + tracking_branch = branch.tracking_branch() + if tracking_branch is not None: + possible_relatives.append(tracking_branch.commit) + + head = self.repo.head + most_recent_ancestor = None + for possible_relative in possible_relatives: + # at most one: + for ancestor in self.repo.merge_base(head, possible_relative): + if most_recent_ancestor is None: + most_recent_ancestor = ancestor + elif self.repo.is_ancestor(most_recent_ancestor, ancestor): # type: ignore + most_recent_ancestor = ancestor + return most_recent_ancestor + except GitCommandError as e: + logger.debug("git remote upstream fork point could not be found") + logger.debug(str(e)) + return None + + def tag(self, name: str, message: Optional[str]) -> Any: + if not self.repo: + return None + try: + return self.repo.create_tag(f"wandb/{name}", message=message, force=True) + except GitCommandError: + print("Failed to tag repository.") + return None + + def push(self, name: str) -> Any: + if not self.remote: + return None + try: + return self.remote.push(f"wandb/{name}", force=True) + except GitCommandError: + logger.debug("failed to push git") + return None diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/gql_request.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/gql_request.py new file mode 100644 index 0000000000000000000000000000000000000000..381097447c805593f7710887b765e475f3f4a904 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/gql_request.py @@ -0,0 +1,65 @@ +"""A simple GraphQL client for sending queries and mutations. + +Note: This was originally wandb/vendor/gql-0.2.0/wandb_gql/transport/requests.py +The only substantial change is to re-use a requests.Session object. +""" + +from typing import Any, Callable, Dict, Optional, Tuple, Union + +import requests +from wandb_gql.transport.http import HTTPTransport +from wandb_graphql.execution import ExecutionResult +from wandb_graphql.language import ast +from wandb_graphql.language.printer import print_ast + + +class GraphQLSession(HTTPTransport): + def __init__( + self, + url: str, + auth: Optional[Union[Tuple[str, str], Callable]] = None, + use_json: bool = False, + timeout: Optional[Union[int, float]] = None, + proxies: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> None: + """Setup a session for sending GraphQL queries and mutations. + + Args: + url (str): The GraphQL URL + auth (tuple or callable): Auth tuple or callable for Basic/Digest/Custom HTTP Auth + use_json (bool): Send request body as JSON instead of form-urlencoded + timeout (int, float): Specifies a default timeout for requests (Default: None) + """ + super().__init__(url, **kwargs) + self.session = requests.Session() + if proxies: + self.session.proxies.update(proxies) + self.session.auth = auth + self.default_timeout = timeout + self.use_json = use_json + + def execute( + self, + document: ast.Node, + variable_values: Optional[Dict] = None, + timeout: Optional[Union[int, float]] = None, + ) -> ExecutionResult: + query_str = print_ast(document) + payload = {"query": query_str, "variables": variable_values or {}} + + data_key = "json" if self.use_json else "data" + post_args = { + "headers": self.headers, + "cookies": self.cookies, + "timeout": timeout or self.default_timeout, + data_key: payload, + } + request = self.session.post(self.url, **post_args) + request.raise_for_status() + + result = request.json() + data, errors = result.get("data"), result.get("errors") + if data is None and errors is None: + raise RuntimeError(f"Received non-compatible response: {result}") + return ExecutionResult(data=data, errors=errors) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/handler_util.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/handler_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b4efd8d571baebab80f9a7702efdd962f0c9028d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/handler_util.py @@ -0,0 +1,21 @@ +import wandb.data_types as data_types + + +def get_types(): + classes = map(data_types.__dict__.get, data_types.__all__) + types = [] + for cls in classes: + if hasattr(cls, "_log_type") and cls._log_type is not None: + types.append(cls._log_type) + # add table-file type because this is a special case + # that does not have a matching _log_type for artifacts + # and files + types.append("table-file") + return types + + +WANDB_TYPES = get_types() + + +def metric_is_wandb_dict(metric): + return "_type" in list(metric.keys()) and metric["_type"] in WANDB_TYPES diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/hashutil.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/hashutil.py new file mode 100644 index 0000000000000000000000000000000000000000..579c86c9116207e8bfd7e02307ed2da4460822d3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/hashutil.py @@ -0,0 +1,62 @@ +import base64 +import hashlib +import mmap +import os +import sys +from pathlib import Path +from typing import NewType, Union + +from wandb.sdk.lib.paths import StrPath + +ETag = NewType("ETag", str) +HexMD5 = NewType("HexMD5", str) +B64MD5 = NewType("B64MD5", str) + + +def _md5(data: bytes = b"") -> "hashlib._Hash": + """Allow FIPS-compliant md5 hash when supported.""" + if sys.version_info >= (3, 9): + return hashlib.md5(data, usedforsecurity=False) + else: + return hashlib.md5(data) + + +def md5_string(string: str) -> B64MD5: + return _b64_from_hasher(_md5(string.encode("utf-8"))) + + +def _b64_from_hasher(hasher: "hashlib._Hash") -> B64MD5: + return B64MD5(base64.b64encode(hasher.digest()).decode("ascii")) + + +def b64_to_hex_id(string: B64MD5) -> HexMD5: + return HexMD5(base64.standard_b64decode(string).hex()) + + +def hex_to_b64_id(encoded_string: Union[str, bytes]) -> B64MD5: + if isinstance(encoded_string, bytes): + encoded_string = encoded_string.decode("utf-8") + as_str = bytes.fromhex(encoded_string) + return B64MD5(base64.standard_b64encode(as_str).decode("utf-8")) + + +def md5_file_b64(*paths: StrPath) -> B64MD5: + return _b64_from_hasher(_md5_file_hasher(*paths)) + + +def md5_file_hex(*paths: StrPath) -> HexMD5: + return HexMD5(_md5_file_hasher(*paths).hexdigest()) + + +def _md5_file_hasher(*paths: StrPath) -> "hashlib._Hash": + md5_hash = _md5() + + for path in sorted(Path(p) for p in paths): + with path.open("rb") as f: + if os.stat(f.fileno()).st_size <= 1024 * 1024: + md5_hash.update(f.read()) + else: + with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mview: + md5_hash.update(mview) + + return md5_hash diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/import_hooks.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/import_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef7bb3f4384797fb491650acd0446ef0d1cb35b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/import_hooks.py @@ -0,0 +1,275 @@ +"""Implements a post-import hook mechanism. + +Styled as per PEP-369. Note that it doesn't cope with modules being reloaded. + +Note: This file is based on +https://github.com/GrahamDumpleton/wrapt/blob/1.12.1/src/wrapt/importer.py +and manual backports of later patches up to 1.15.0 in the wrapt repository +(with slight modifications). +""" + +import sys +import threading +from importlib.util import find_spec +from typing import Any, Callable, Dict, Optional, Union + +# The dictionary registering any post import hooks to be triggered once +# the target module has been imported. Once a module has been imported +# and the hooks fired, the list of hooks recorded against the target +# module will be truncated but the list left in the dictionary. This +# acts as a flag to indicate that the module had already been imported. + +_post_import_hooks: Dict = {} +_post_import_hooks_init: bool = False +_post_import_hooks_lock = threading.RLock() + +# Register a new post import hook for the target module name. This +# differs from the PEP-369 implementation in that it also allows the +# hook function to be specified as a string consisting of the name of +# the callback in the form 'module:function'. This will result in a +# proxy callback being registered which will defer loading of the +# specified module containing the callback function until required. + + +def _create_import_hook_from_string(name: str) -> Callable: + def import_hook(module: Any) -> Callable: + module_name, function = name.split(":") + attrs = function.split(".") + __import__(module_name) + callback = sys.modules[module_name] + for attr in attrs: + callback = getattr(callback, attr) + return callback(module) # type: ignore + + return import_hook + + +def register_post_import_hook( + hook: Union[str, Callable], hook_id: str, name: str +) -> None: + # Create a deferred import hook if hook is a string name rather than + # a callable function. + + if isinstance(hook, (str,)): + hook = _create_import_hook_from_string(hook) + + # Automatically install the import hook finder if it has not already + # been installed. + + with _post_import_hooks_lock: + global _post_import_hooks_init + + if not _post_import_hooks_init: + _post_import_hooks_init = True + sys.meta_path.insert(0, ImportHookFinder()) # type: ignore + + # Check if the module is already imported. If not, register the hook + # to be called after import. + + module = sys.modules.get(name, None) + + if module is None: + _post_import_hooks.setdefault(name, {}).update({hook_id: hook}) + + # If the module is already imported, we fire the hook right away. Note that + # the hook is called outside of the lock to avoid deadlocks if code run as a + # consequence of calling the module import hook in turn triggers a separate + # thread which tries to register an import hook. + + if module is not None: + hook(module) + + +def unregister_post_import_hook(name: str, hook_id: Optional[str]) -> None: + # Remove the import hook if it has been registered. + with _post_import_hooks_lock: + hooks = _post_import_hooks.get(name) + + if hooks is not None: + if hook_id is not None: + hooks.pop(hook_id, None) + + if not hooks: + del _post_import_hooks[name] + else: + del _post_import_hooks[name] + + +def unregister_all_post_import_hooks() -> None: + with _post_import_hooks_lock: + _post_import_hooks.clear() + + +# Indicate that a module has been loaded. Any post import hooks which +# were registered against the target module will be invoked. If an +# exception is raised in any of the post import hooks, that will cause +# the import of the target module to fail. + + +def notify_module_loaded(module: Any) -> None: + name = getattr(module, "__name__", None) + + with _post_import_hooks_lock: + hooks = _post_import_hooks.pop(name, {}) + + # Note that the hook is called outside of the lock to avoid deadlocks if + # code run as a consequence of calling the module import hook in turn + # triggers a separate thread which tries to register an import hook. + for hook in hooks.values(): + if hook: + hook(module) + + +# A custom module import finder. This intercepts attempts to import +# modules and watches out for attempts to import target modules of +# interest. When a module of interest is imported, then any post import +# hooks which are registered will be invoked. + + +class _ImportHookChainedLoader: + def __init__(self, loader: Any) -> None: + self.loader = loader + + if hasattr(loader, "load_module"): + self.load_module = self._load_module + if hasattr(loader, "create_module"): + self.create_module = self._create_module + if hasattr(loader, "exec_module"): + self.exec_module = self._exec_module + + def _set_loader(self, module: Any) -> None: + # Set module's loader to self.loader unless it's already set to + # something else. Import machinery will set it to spec.loader if it is + # None, so handle None as well. The module may not support attribute + # assignment, in which case we simply skip it. Note that we also deal + # with __loader__ not existing at all. This is to future proof things + # due to proposal to remove the attribute as described in the GitHub + # issue at https://github.com/python/cpython/issues/77458. Also prior + # to Python 3.3, the __loader__ attribute was only set if a custom + # module loader was used. It isn't clear whether the attribute still + # existed in that case or was set to None. + + class UNDEFINED: + pass + + if getattr(module, "__loader__", UNDEFINED) in (None, self): + try: + module.__loader__ = self.loader + except AttributeError: + pass + + if ( + getattr(module, "__spec__", None) is not None + and getattr(module.__spec__, "loader", None) is self + ): + module.__spec__.loader = self.loader + + def _load_module(self, fullname: str) -> Any: + module = self.loader.load_module(fullname) + self._set_loader(module) + notify_module_loaded(module) + + return module + + # Python 3.4 introduced create_module() and exec_module() instead of + # load_module() alone. Splitting the two steps. + + def _create_module(self, spec: Any) -> Any: + return self.loader.create_module(spec) + + def _exec_module(self, module: Any) -> None: + self._set_loader(module) + self.loader.exec_module(module) + notify_module_loaded(module) + + +class ImportHookFinder: + def __init__(self) -> None: + self.in_progress: Dict = {} + + def find_module( # type: ignore + self, + fullname: str, + path: Optional[str] = None, + ) -> Optional["_ImportHookChainedLoader"]: + # If the module being imported is not one we have registered + # post import hooks for, we can return immediately. We will + # take no further part in the importing of this module. + + with _post_import_hooks_lock: + if fullname not in _post_import_hooks: + return None + + # When we are interested in a specific module, we will call back + # into the import system a second time to defer to the import + # finder that is supposed to handle the importing of the module. + # We set an in progress flag for the target module so that on + # the second time through we don't trigger another call back + # into the import system and cause a infinite loop. + + if fullname in self.in_progress: + return None + + self.in_progress[fullname] = True + + # Now call back into the import system again. + + try: + # For Python 3 we need to use find_spec().loader + # from the importlib.util module. It doesn't actually + # import the target module and only finds the + # loader. If a loader is found, we need to return + # our own loader which will then in turn call the + # real loader to import the module and invoke the + # post import hooks. + loader = getattr(find_spec(fullname), "loader", None) + + if loader and not isinstance(loader, _ImportHookChainedLoader): + return _ImportHookChainedLoader(loader) + + finally: + del self.in_progress[fullname] + + def find_spec( + self, fullname: str, path: Optional[str] = None, target: Any = None + ) -> Any: + # Since Python 3.4, you are meant to implement find_spec() method + # instead of find_module() and since Python 3.10 you get deprecation + # warnings if you don't define find_spec(). + + # If the module being imported is not one we have registered + # post import hooks for, we can return immediately. We will + # take no further part in the importing of this module. + + with _post_import_hooks_lock: + if fullname not in _post_import_hooks: + return None + + # When we are interested in a specific module, we will call back + # into the import system a second time to defer to the import + # finder that is supposed to handle the importing of the module. + # We set an in progress flag for the target module so that on + # the second time through we don't trigger another call back + # into the import system and cause a infinite loop. + + if fullname in self.in_progress: + return None + + self.in_progress[fullname] = True + + # Now call back into the import system again. + + try: + # This should only be Python 3 so find_spec() should always + # exist so don't need to check. + spec = find_spec(fullname) + loader = getattr(spec, "loader", None) + + if loader and not isinstance(loader, _ImportHookChainedLoader): + assert spec is not None + spec.loader = _ImportHookChainedLoader(loader) # type: ignore + + return spec + + finally: + del self.in_progress[fullname] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/ipython.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/ipython.py new file mode 100644 index 0000000000000000000000000000000000000000..19d5cd9067b4b6294fed5119ddde2d56b97c7d3f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/ipython.py @@ -0,0 +1,146 @@ +import logging +import sys +import warnings +from typing import Optional + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +import wandb + +PythonType = Literal["python", "ipython", "jupyter"] + +logger = logging.getLogger(__name__) + + +TABLE_STYLES = """ +""" + + +def toggle_button(what="run"): + return f"" + + +def _get_python_type() -> PythonType: + if "IPython" not in sys.modules: + return "python" + + try: + from IPython import get_ipython # type: ignore + + # Calling get_ipython can cause an ImportError + if get_ipython() is None: + return "python" + except ImportError: + return "python" + + # jupyter-based environments (e.g. jupyter itself, colab, kaggle, etc) have a connection file + ip_kernel_app_connection_file = ( + (get_ipython().config.get("IPKernelApp", {}) or {}) + .get("connection_file", "") + .lower() + ) or ( + (get_ipython().config.get("ColabKernelApp", {}) or {}) + .get("connection_file", "") + .lower() + ) + + if ( + ("terminal" in get_ipython().__module__) + or ("jupyter" not in ip_kernel_app_connection_file) + or ("spyder" in sys.modules) + ): + return "ipython" + else: + return "jupyter" + + +def in_jupyter() -> bool: + return _get_python_type() == "jupyter" + + +def in_notebook() -> bool: + return _get_python_type() != "python" + + +def display_html(html: str): # type: ignore + """Display HTML in notebooks, is a noop outside a jupyter context.""" + if wandb.run and wandb.run._settings.silent: + return + try: + from IPython.core.display import HTML, display # type: ignore + except ImportError: + wandb.termwarn("Unable to render HTML, can't import display from ipython.core") + return False + return display(HTML(html)) + + +def display_widget(widget): + """Display ipywidgets in notebooks, is a noop outside of a jupyter context.""" + if wandb.run and wandb.run._settings.silent: + return + try: + from IPython.core.display import display + except ImportError: + wandb.termwarn( + "Unable to render Widget, can't import display from ipython.core" + ) + return False + return display(widget) + + +class ProgressWidget: + """A simple wrapper to render a nice progress bar with a label.""" + + def __init__(self, widgets, min, max): + self.widgets = widgets + self._progress = widgets.FloatProgress(min=min, max=max) + self._label = widgets.Label() + self._widget = self.widgets.VBox([self._label, self._progress]) + self._displayed = False + self._disabled = False + + def update(self, value: float, label: str) -> None: + if self._disabled: + return + try: + self._progress.value = value + self._label.value = label + if not self._displayed: + self._displayed = True + display_widget(self._widget) + except Exception as e: + self._disabled = True + logger.exception(e) + wandb.termwarn( + "Unable to render progress bar, see the user log for details" + ) + + def close(self) -> None: + if self._disabled or not self._displayed: + return + self._widget.close() + + +def jupyter_progress_bar(min: float = 0, max: float = 1.0) -> Optional[ProgressWidget]: + """Return an ipywidget progress bar or None if we can't import it.""" + widgets = wandb.util.get_module("ipywidgets") + try: + if widgets is None: + # TODO: this currently works in iPython but it's deprecated since 4.0 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + from IPython.html import widgets # type: ignore + + assert hasattr(widgets, "VBox") + assert hasattr(widgets, "Label") + assert hasattr(widgets, "FloatProgress") + return ProgressWidget(widgets, min=min, max=max) + except (ImportError, AssertionError): + return None diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/json_util.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/json_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6407319221118b822d7b047f1599f7cfe3b954d0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/json_util.py @@ -0,0 +1,80 @@ +import json +import logging +import os +from typing import Any, Union + +logger = logging.getLogger(__name__) + + +try: + import orjson # type: ignore + + # todo: orjson complies with the json standard and does not support + # NaN, Infinity, and -Infinity. Should be fixed in the future. + + # additional safeguard for now + if os.environ.get("_WANDB_ORJSON"): + + def dumps(obj: Any, **kwargs: Any) -> str: + """Wrapper for .dumps.""" + cls = kwargs.pop("cls", None) + try: + _kwargs = kwargs.copy() + if cls: + _kwargs["default"] = cls.default + encoded = orjson.dumps( + obj, option=orjson.OPT_NON_STR_KEYS, **_kwargs + ).decode() + except Exception as e: + logger.exception(f"Error using orjson.dumps: {e}") + if cls: + kwargs["cls"] = cls + encoded = json.dumps(obj, **kwargs) + + return encoded # type: ignore[no-any-return] + + def dump(obj: Any, fp: Any, **kwargs: Any) -> None: + """Wrapper for .dump.""" + cls = kwargs.pop("cls", None) + try: + _kwargs = kwargs.copy() + if cls: + _kwargs["default"] = cls.default + encoded = orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS, **_kwargs) + fp.write(encoded) + except Exception as e: + logger.exception(f"Error using orjson.dump: {e}") + if cls: + kwargs["cls"] = cls + json.dump(obj, fp, **kwargs) + + def loads(obj: Union[str, bytes]) -> Any: + """Wrapper for orjson.loads.""" + try: + decoded = orjson.loads(obj) + except Exception as e: + logger.exception(f"Error using orjson.loads: {e}") + decoded = json.loads(obj) + + return decoded + + def load(fp: Any) -> Any: + """Wrapper for orjson.load.""" + try: + decoded = orjson.loads(fp.read()) + except Exception as e: + logger.exception(f"Error using orjson.load: {e}") + decoded = json.load(fp) + + return decoded + + else: + from json import ( # type: ignore[assignment] # noqa: F401 + dump, + dumps, + load, + loads, + ) + +except ImportError: + from json import dump, dumps, load, loads # type: ignore[assignment] # noqa: F401 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/lazyloader.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/lazyloader.py new file mode 100644 index 0000000000000000000000000000000000000000..b54cab8cc80d15fbe489a84231b2d1f06297bbcf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/lazyloader.py @@ -0,0 +1,63 @@ +"""module lazyloader.""" + +import importlib +import sys +import types + + +class LazyLoader(types.ModuleType): + """Lazily import a module, mainly to avoid pulling in large dependencies. + + We use this for tensorflow and other optional libraries primarily at the + top module level. + """ + + # The lint error here is incorrect. + def __init__( + self, + local_name, # pylint: disable=super-on-old-class + parent_module_globals, + name, + warning=None, + ): + self._local_name = local_name + self._parent_module_globals = parent_module_globals + self._warning = warning + + super().__init__(str(name)) + + def _load(self): + """Load the module and insert it into the parent's globals.""" + # Import the target module and insert it into the parent's namespace + module = importlib.import_module(self.__name__) + self._parent_module_globals[self._local_name] = module + # print("import", self.__name__) + # print("Set global", self._local_name) + # print("mod", module) + sys.modules[self._local_name] = module + + # Emit a warning if one was specified + if self._warning: + print(self._warning) + # Make sure to only warn once. + self._warning = None + + # Update this object's dict so that if someone keeps a reference to the + # LazyLoader, lookups are efficient (__getattr__ is only called on lookups + # that fail). + self.__dict__.update(module.__dict__) + + return module + + # def __getattribute__(self, item): + # print("getattribute", item) + + def __getattr__(self, item): + # print("getattr", item) + module = self._load() + return getattr(module, item) + + def __dir__(self): + # print("dir") + module = self._load() + return dir(module) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/mailbox.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/mailbox.py new file mode 100644 index 0000000000000000000000000000000000000000..e0dbb49c4b7567825b3b43fcb71fef6f0f926039 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/mailbox.py @@ -0,0 +1,460 @@ +"""mailbox.""" + +import secrets +import string +import threading +import time +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple + +from wandb.errors import Error +from wandb.proto import wandb_internal_pb2 as pb + +if TYPE_CHECKING: + from wandb.sdk.interface.interface_shared import InterfaceShared + + +def _generate_address(length: int = 12) -> str: + address = "".join( + secrets.choice(string.ascii_lowercase + string.digits) for i in range(length) + ) + return address + + +class MailboxError(Error): + """Generic Mailbox Exception.""" + + pass + + +class ContextCancelledError(MailboxError): + """Context cancelled Exception.""" + + pass + + +class _MailboxWaitAll: + _event: threading.Event + _lock: threading.Lock + _handles: List["MailboxHandle"] + _failed_handles: int + + def __init__(self) -> None: + self._event = threading.Event() + self._lock = threading.Lock() + self._handles = [] + self._failed_handles = 0 + + def notify(self) -> None: + with self._lock: + self._event.set() + + def _add_handle(self, handle: "MailboxHandle") -> None: + handle._slot._set_wait_all(self) + self._handles.append(handle) + + # set wait_all event if an event has already been set before added to wait_all + if handle._slot._event.is_set(): + self._event.set() + + @property + def active_handles(self) -> List["MailboxHandle"]: + return [h for h in self._handles if not h._is_failed] + + @property + def active_handles_count(self) -> int: + return len(self.active_handles) + + @property + def failed_handles_count(self) -> int: + return self._failed_handles + + def _mark_handle_failed(self, handle: "MailboxHandle") -> None: + handle._mark_failed() + self._failed_handles += 1 + + def clear_handles(self) -> None: + for handle in self._handles: + handle._slot._clear_wait_all() + self._handles = [] + + def _wait(self, timeout: float) -> bool: + return self._event.wait(timeout=timeout) + + def _get_and_clear(self, timeout: float) -> List["MailboxHandle"]: + found: List[MailboxHandle] = [] + if self._wait(timeout=timeout): + with self._lock: + remove_handles = [] + + # Look through handles for triggered events + for handle in self._handles: + if handle._slot._event.is_set(): + found.append(handle) + remove_handles.append(handle) + + for handle in remove_handles: + self._handles.remove(handle) + + self._event.clear() + return found + + +class _MailboxSlot: + _result: Optional[pb.Result] + _event: threading.Event + _lock: threading.Lock + _wait_all: Optional[_MailboxWaitAll] + _address: str + _abandoned: bool + + def __init__(self, address: str) -> None: + self._result = None + self._event = threading.Event() + self._lock = threading.Lock() + self._address = address + self._wait_all = None + self._abandoned = False + + def _set_wait_all(self, wait_all: _MailboxWaitAll) -> None: + assert not self._wait_all, "Only one caller can wait_all for a slot at a time" + self._wait_all = wait_all + + def _clear_wait_all(self) -> None: + self._wait_all = None + + def _wait(self, timeout: float) -> bool: + return self._event.wait(timeout=timeout) + + def _get_and_clear(self, timeout: float) -> Tuple[Optional[pb.Result], bool]: + found = None + if self._wait(timeout=timeout): + with self._lock: + found = self._result + self._event.clear() + abandoned = self._abandoned + return found, abandoned + + def _deliver(self, result: pb.Result) -> None: + with self._lock: + self._result = result + self._event.set() + + if self._wait_all: + self._wait_all.notify() + + def _notify_abandon(self) -> None: + self._abandoned = True + with self._lock: + self._event.set() + + if self._wait_all: + self._wait_all.notify() + + +class MailboxProbe: + _result: Optional[pb.Result] + _handle: Optional["MailboxHandle"] + + def __init__(self) -> None: + self._handle = None + self._result = None + + def set_probe_result(self, result: pb.Result) -> None: + self._result = result + + def get_probe_result(self) -> Optional[pb.Result]: + return self._result + + def get_mailbox_handle(self) -> Optional["MailboxHandle"]: + return self._handle + + def set_mailbox_handle(self, handle: "MailboxHandle") -> None: + self._handle = handle + + +class MailboxProgress: + _percent_done: float + _handle: "MailboxHandle" + _probe_handles: List[MailboxProbe] + _stopped: bool + + def __init__(self, _handle: "MailboxHandle") -> None: + self._handle = _handle + self._percent_done = 0.0 + self._probe_handles = [] + self._stopped = False + + @property + def percent_done(self) -> float: + return self._percent_done + + def set_percent_done(self, percent_done: float) -> None: + self._percent_done = percent_done + + def add_probe_handle(self, probe_handle: MailboxProbe) -> None: + self._probe_handles.append(probe_handle) + + def get_probe_handles(self) -> List[MailboxProbe]: + return self._probe_handles + + def wait_stop(self) -> None: + self._stopped = True + + @property + def _is_stopped(self) -> bool: + return self._stopped + + +class MailboxProgressAll: + _progress_handles: List[MailboxProgress] + + def __init__(self) -> None: + self._progress_handles = [] + + def add_progress_handle(self, progress_handle: MailboxProgress) -> None: + self._progress_handles.append(progress_handle) + + def get_progress_handles(self) -> List[MailboxProgress]: + # only return progress handles for not failed handles + return [ph for ph in self._progress_handles if not ph._handle._is_failed] + + +class MailboxHandle: + _mailbox: "Mailbox" + _slot: _MailboxSlot + _on_probe: Optional[Callable[[MailboxProbe], None]] + _on_progress: Optional[Callable[[MailboxProgress], None]] + _interface: Optional["InterfaceShared"] + _keepalive: bool + _failed: bool + + def __init__(self, mailbox: "Mailbox", slot: _MailboxSlot) -> None: + self._mailbox = mailbox + self._slot = slot + self._on_probe = None + self._on_progress = None + self._interface = None + self._keepalive = False + self._failed = False + + def add_probe(self, on_probe: Callable[[MailboxProbe], None]) -> None: + self._on_probe = on_probe + + def add_progress(self, on_progress: Callable[[MailboxProgress], None]) -> None: + self._on_progress = on_progress + + def _time(self) -> float: + return time.monotonic() + + def wait( # noqa: C901 + self, + *, + timeout: float, + on_probe: Optional[Callable[[MailboxProbe], None]] = None, + on_progress: Optional[Callable[[MailboxProgress], None]] = None, + release: bool = True, + cancel: bool = False, + ) -> Optional[pb.Result]: + probe_handle: Optional[MailboxProbe] = None + progress_handle: Optional[MailboxProgress] = None + found: Optional[pb.Result] = None + start_time = self._time() + percent_done = 0.0 + progress_sent = False + wait_timeout = 1.0 + if timeout >= 0: + wait_timeout = min(timeout, wait_timeout) + + on_progress = on_progress or self._on_progress + if on_progress: + progress_handle = MailboxProgress(_handle=self) + + on_probe = on_probe or self._on_probe + if on_probe: + probe_handle = MailboxProbe() + if progress_handle: + progress_handle.add_probe_handle(probe_handle) + + while True: + if self._keepalive and self._interface: + if self._interface._transport_keepalive_failed(): + raise MailboxError("transport failed") + + found, abandoned = self._slot._get_and_clear(timeout=wait_timeout) + if found: + # Always update progress to 100% when done + if on_progress and progress_handle and progress_sent: + progress_handle.set_percent_done(100) + on_progress(progress_handle) + break + if abandoned: + break + now = self._time() + if timeout >= 0: + if now >= start_time + timeout: + # todo: communicate that we timed out + break + if on_probe and probe_handle: + on_probe(probe_handle) + if on_progress and progress_handle: + if timeout > 0: + percent_done = min((now - start_time) / timeout, 1.0) + progress_handle.set_percent_done(percent_done) + on_progress(progress_handle) + if progress_handle._is_stopped: + break + progress_sent = True + if not found and cancel: + self._cancel() + if release: + self._release() + return found + + def _cancel(self) -> None: + mailbox_slot = self.address + if self._interface: + self._interface.publish_cancel(mailbox_slot) + + def _release(self) -> None: + self._mailbox._release_slot(self.address) + + def abandon(self) -> None: + self._slot._notify_abandon() + self._release() + + @property + def _is_failed(self) -> bool: + return self._failed + + def _mark_failed(self) -> None: + self._failed = True + + @property + def address(self) -> str: + return self._slot._address + + +class Mailbox: + _slots: Dict[str, _MailboxSlot] + _keepalive: bool + + def __init__(self) -> None: + self._slots = {} + self._keepalive = False + + def enable_keepalive(self) -> None: + self._keepalive = True + + def wait( + self, + handle: MailboxHandle, + *, + timeout: float, + on_progress: Optional[Callable[[MailboxProgress], None]] = None, + cancel: bool = False, + ) -> Optional[pb.Result]: + return handle.wait(timeout=timeout, on_progress=on_progress, cancel=cancel) + + def _time(self) -> float: + return time.monotonic() + + def wait_all( + self, + handles: List[MailboxHandle], + *, + timeout: float, + on_progress_all: Optional[Callable[[MailboxProgressAll], None]] = None, + ) -> bool: + progress_all_handle: Optional[MailboxProgressAll] = None + + if on_progress_all: + progress_all_handle = MailboxProgressAll() + + wait_all = _MailboxWaitAll() + for handle in handles: + wait_all._add_handle(handle) + if progress_all_handle and handle._on_progress: + progress_handle = MailboxProgress(_handle=handle) + if handle._on_probe: + probe_handle = MailboxProbe() + progress_handle.add_probe_handle(probe_handle) + progress_all_handle.add_progress_handle(progress_handle) + + start_time = self._time() + + while wait_all.active_handles_count > 0: + # Make sure underlying interfaces are still up + if self._keepalive: + for handle in wait_all.active_handles: + if not handle._interface: + continue + if handle._interface._transport_keepalive_failed(): + wait_all._mark_handle_failed(handle) + + # if there are no valid handles left, either break or raise exception + if not wait_all.active_handles_count: + if wait_all.failed_handles_count: + wait_all.clear_handles() + raise MailboxError("transport failed") + break + + # wait for next event + wait_all._get_and_clear(timeout=1) + + # TODO: we can do more careful timekeeping and not run probes and progress + # indications until a full second elapses in the case where we found a wait_all + # event. Extra probes should be ok for now. + + if progress_all_handle and on_progress_all: + # Run all probe handles + for progress_handle in progress_all_handle.get_progress_handles(): + for probe_handle in progress_handle.get_probe_handles(): + if ( + progress_handle._handle + and progress_handle._handle._on_probe + ): + progress_handle._handle._on_probe(probe_handle) + + on_progress_all(progress_all_handle) + + now = self._time() + if timeout >= 0 and now >= start_time + timeout: + break + + return wait_all.active_handles_count == 0 + + def deliver(self, result: pb.Result) -> None: + mailbox = result.control.mailbox_slot + slot = self._slots.get(mailbox) + if not slot: + return + slot._deliver(result) + + def _allocate_slot(self) -> _MailboxSlot: + address = _generate_address() + slot = _MailboxSlot(address=address) + self._slots[address] = slot + return slot + + def _release_slot(self, address: str) -> None: + self._slots.pop(address, None) + + def get_handle(self) -> MailboxHandle: + slot = self._allocate_slot() + handle = MailboxHandle(mailbox=self, slot=slot) + return handle + + def _deliver_record( + self, record: pb.Record, interface: "InterfaceShared" + ) -> MailboxHandle: + handle = self.get_handle() + handle._interface = interface + handle._keepalive = self._keepalive + record.control.mailbox_slot = handle.address + try: + interface._publish(record) + except Exception: + interface._transport_mark_failed() + raise + interface._transport_mark_success() + return handle diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/module.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/module.py new file mode 100644 index 0000000000000000000000000000000000000000..3958b82775444bd35fc8bbf60892bd915ee1510b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/module.py @@ -0,0 +1,69 @@ +# +import wandb + +from . import preinit + + +def set_global( + run=None, + config=None, + log=None, + summary=None, + save=None, + use_artifact=None, + log_artifact=None, + define_metric=None, + alert=None, + plot_table=None, + mark_preempting=None, + log_model=None, + use_model=None, + link_model=None, +): + if run: + wandb.run = run + if config is not None: + wandb.config = config + if log: + wandb.log = log + if summary is not None: + wandb.summary = summary + if save: + wandb.save = save + if use_artifact: + wandb.use_artifact = use_artifact + if log_artifact: + wandb.log_artifact = log_artifact + if define_metric: + wandb.define_metric = define_metric + if plot_table: + wandb.plot_table = plot_table + if alert: + wandb.alert = alert + if mark_preempting: + wandb.mark_preempting = mark_preempting + if log_model: + wandb.log_model = log_model + if use_model: + wandb.use_model = use_model + if link_model: + wandb.link_model = link_model + + +def unset_globals(): + wandb.run = None + wandb.config = preinit.PreInitObject("wandb.config") + wandb.summary = preinit.PreInitObject("wandb.summary") + wandb.log = preinit.PreInitCallable("wandb.log", wandb.wandb_sdk.wandb_run.Run.log) + wandb.save = preinit.PreInitCallable( + "wandb.save", wandb.wandb_sdk.wandb_run.Run.save + ) + wandb.use_artifact = preinit.PreInitCallable( + "wandb.use_artifact", wandb.wandb_sdk.wandb_run.Run.use_artifact + ) + wandb.log_artifact = preinit.PreInitCallable( + "wandb.log_artifact", wandb.wandb_sdk.wandb_run.Run.log_artifact + ) + wandb.define_metric = preinit.PreInitCallable( + "wandb.define_metric", wandb.wandb_sdk.wandb_run.Run.define_metric + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/paths.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/paths.py new file mode 100644 index 0000000000000000000000000000000000000000..1fae7110ec5ffe83d46c18c4263b0745a44264c9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/paths.py @@ -0,0 +1,106 @@ +import os +import platform +from functools import wraps +from pathlib import PurePath, PurePosixPath +from typing import Any, NewType, Union + +# Path _inputs_ should generally accept any kind of path. This is named the same and +# modeled after the hint defined in the Python standard library's `typeshed`: +# https://github.com/python/typeshed/blob/0b1cd5989669544866213807afa833a88f649ee7/stdlib/_typeshed/__init__.pyi#L56-L65 +StrPath = Union[str, "os.PathLike[str]"] + +# A native path to a file on a local filesystem. +FilePathStr = NewType("FilePathStr", str) + +URIStr = NewType("URIStr", str) + + +class LogicalPath(str): + """A string that represents a path relative to an artifact or run. + + The format of the string is always as a POSIX path, e.g. "foo/bar.txt". + + A neat trick is that you can use this class as if it were a PurePosixPath. E.g.: + ``` + >>> path = LogicalPath("foo/bar.txt") + >>> path.parts + ('foo', 'bar.txt') + >>> path.parent / "baz.txt" + 'foo/baz.txt' + >>> type(path.relative_to("foo")) + LogicalPath + ``` + """ + + # It should probably always be a relative path, but that would be a behavior change. + # + # These strings used to be the output of `to_forward_slash_path`, which only works + # with strings and whose behavior is pretty simple: + # ``` + # if platform.system() == "Windows": + # path = path.replace("\\", "/") + # ``` + # + # This results in some weird things, such as backslashes being allowed from + # non-Windows platforms (which would probably break if such an artifact was used + # from Windows) and anchors or absolute paths being allowed. E.g., the Windows path + # "C:\foo\bar.txt" becomes "C:/foo/bar.txt", which then would mount as + # "./artifacts/artifact_name:v0/C:/foo/bar.txt" on MacOS and as + # "./artifacts/artifact_name-v0/C-/foo/bar.txt" on Windows. + # + # This implementation preserves behavior for strings but attempts to sanitize other + # formerly unsupported inputs more aggressively. It uses the `.as_posix()` form of + # pathlib objects rather than the `str()` form to reduce how often identical inputs + # will result in different outputs on different platforms; however, it doesn't alter + # absolute paths or check for prohibited characters etc. + + def __new__(cls, path: StrPath) -> "LogicalPath": + if isinstance(path, LogicalPath): + return super().__new__(cls, path) + if hasattr(path, "as_posix"): + path = PurePosixPath(path.as_posix()) + return super().__new__(cls, str(path)) + if hasattr(path, "__fspath__"): + path = path.__fspath__() # Can be str or bytes. + if isinstance(path, bytes): + path = os.fsdecode(path) + # For historical reasons we have to convert backslashes to forward slashes, but + # only on Windows, and need to do it before any pathlib operations. + if platform.system() == "Windows": + path = path.replace("\\", "/") + # This weird contortion and the one above are because in some unusual cases + # PurePosixPath(path.as_posix()).as_posix() != path.as_posix(). + path = PurePath(path).as_posix() + return super().__new__(cls, str(PurePosixPath(path))) + + def to_path(self) -> PurePosixPath: + """Convert this path to a PurePosixPath.""" + return PurePosixPath(self) + + def __getattr__(self, attr: str) -> Any: + """Act like a subclass of PurePosixPath for all methods not defined on str.""" + try: + result = getattr(self.to_path(), attr) + except AttributeError as e: + raise AttributeError(f"LogicalPath has no attribute {attr!r}") from e + + if isinstance(result, PurePosixPath): + return LogicalPath(result) + + # If the result is a callable (a method), wrap it so that it has the same + # behavior: if the call result returns a PurePosixPath, return a LogicalPath. + if callable(result): + + @wraps(result) + def wrapper(*args: Any, **kwargs: Any) -> Any: + inner_result = result(*args, **kwargs) + if isinstance(inner_result, PurePosixPath): + return LogicalPath(inner_result) + return inner_result + + return wrapper + return result + + def __truediv__(self, other: StrPath) -> "LogicalPath": + """Act like a PurePosixPath for the / operator, but return a LogicalPath.""" + return LogicalPath(self.to_path() / LogicalPath(other)) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/preinit.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/preinit.py new file mode 100644 index 0000000000000000000000000000000000000000..624528198b1cab580f95d29eb91b0531378041e8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/preinit.py @@ -0,0 +1,42 @@ +from typing import Any, Callable, Optional + +import wandb + + +class PreInitObject: + def __init__(self, name: str, destination: Optional[Any] = None) -> None: + self._name = name + + if destination is not None: + self.__doc__ = destination.__doc__ + + def __getitem__(self, key: str) -> None: + raise wandb.Error(f"You must call wandb.init() before {self._name}[{key!r}]") + + def __setitem__(self, key: str, value: Any) -> Any: + raise wandb.Error(f"You must call wandb.init() before {self._name}[{key!r}]") + + def __setattr__(self, key: str, value: Any) -> Any: + if not key.startswith("_"): + raise wandb.Error(f"You must call wandb.init() before {self._name}.{key}") + else: + return object.__setattr__(self, key, value) + + def __getattr__(self, key: str) -> Any: + if not key.startswith("_"): + raise wandb.Error(f"You must call wandb.init() before {self._name}.{key}") + else: + raise AttributeError + + +def PreInitCallable( # noqa: N802 + name: str, destination: Optional[Any] = None +) -> Callable: + def preinit_wrapper(*args: Any, **kwargs: Any) -> Any: + raise wandb.Error(f"You must call wandb.init() before {name}()") + + preinit_wrapper.__name__ = str(name) + if destination: + preinit_wrapper.__wrapped__ = destination # type: ignore + preinit_wrapper.__doc__ = destination.__doc__ + return preinit_wrapper diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/printer.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/printer.py new file mode 100644 index 0000000000000000000000000000000000000000..0aeddc126d2a1ff232036aab275e38aa267cdc25 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/printer.py @@ -0,0 +1,313 @@ +# Note: this is a helper printer class, this file might go away once we switch to rich console printing + +import itertools +import platform +import sys +from abc import abstractmethod +from typing import Callable, List, Optional, Tuple, Union + +import click + +import wandb + +from . import ipython, sparkline + +# Follow the same logic as the python logging module +CRITICAL = 50 +FATAL = CRITICAL +ERROR = 40 +WARNING = 30 +WARN = WARNING +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +_level_to_name = { + CRITICAL: "CRITICAL", + ERROR: "ERROR", + WARNING: "WARNING", + INFO: "INFO", + DEBUG: "DEBUG", + NOTSET: "NOTSET", +} + +_name_to_level = { + "CRITICAL": CRITICAL, + "FATAL": FATAL, + "ERROR": ERROR, + "WARN": WARNING, + "WARNING": WARNING, + "INFO": INFO, + "DEBUG": DEBUG, + "NOTSET": NOTSET, +} + + +class _Printer: + def sparklines(self, series: List[Union[int, float]]) -> Optional[str]: + # Only print sparklines if the terminal is utf-8 + if wandb.util.is_unicode_safe(sys.stdout): + return sparkline.sparkify(series) + return None + + def abort( + self, + ) -> str: + return "Control-C" if platform.system() != "Windows" else "Ctrl-C" + + def display( + self, + text: Union[str, List[str], Tuple[str]], + *, + level: Optional[Union[str, int]] = None, + off: Optional[bool] = None, + default_text: Optional[Union[str, List[str], Tuple[str]]] = None, + ) -> None: + if off: + return + self._display(text, level=level, default_text=default_text) + + @abstractmethod + def _display( + self, + text: Union[str, List[str], Tuple[str]], + *, + level: Optional[Union[str, int]] = None, + default_text: Optional[Union[str, List[str], Tuple[str]]] = None, + ) -> None: + raise NotImplementedError + + @staticmethod + def _sanitize_level(name_or_level: Optional[Union[str, int]]) -> int: + if isinstance(name_or_level, str): + try: + return _name_to_level[name_or_level.upper()] + except KeyError: + raise ValueError( + f"Unknown level name: {name_or_level}, supported levels: {_name_to_level.keys()}" + ) + + if isinstance(name_or_level, int): + return name_or_level + + if name_or_level is None: + return INFO + + raise ValueError(f"Unknown status level {name_or_level}") + + @abstractmethod + def code(self, text: str) -> str: + raise NotImplementedError + + @abstractmethod + def name(self, text: str) -> str: + raise NotImplementedError + + @abstractmethod + def link(self, link: str, text: Optional[str] = None) -> str: + raise NotImplementedError + + @abstractmethod + def emoji(self, name: str) -> str: + raise NotImplementedError + + @abstractmethod + def status(self, text: str, failure: Optional[bool] = None) -> str: + raise NotImplementedError + + @abstractmethod + def files(self, text: str) -> str: + raise NotImplementedError + + @abstractmethod + def grid(self, rows: List[List[str]], title: Optional[str] = None) -> str: + raise NotImplementedError + + @abstractmethod + def panel(self, columns: List[str]) -> str: + raise NotImplementedError + + +class PrinterTerm(_Printer): + def __init__(self) -> None: + super().__init__() + self._html = False + self._progress = itertools.cycle(["-", "\\", "|", "/"]) + + def _display( + self, + text: Union[str, List[str], Tuple[str]], + *, + level: Optional[Union[str, int]] = None, + default_text: Optional[Union[str, List[str], Tuple[str]]] = None, + ) -> None: + text = "\n".join(text) if isinstance(text, (list, tuple)) else text + if default_text is not None: + default_text = ( + "\n".join(default_text) + if isinstance(default_text, (list, tuple)) + else default_text + ) + text = text or default_text + self._display_fn_mapping(level)(text) + + @staticmethod + def _display_fn_mapping(level: Optional[Union[str, int]]) -> Callable[[str], None]: + level = _Printer._sanitize_level(level) + + if level >= CRITICAL: + return wandb.termerror + elif ERROR <= level < CRITICAL: + return wandb.termerror + elif WARNING <= level < ERROR: + return wandb.termwarn + elif INFO <= level < WARNING: + return wandb.termlog + elif DEBUG <= level < INFO: + return wandb.termlog + else: + return wandb.termlog + + def progress_update(self, text: str, percent_done: Optional[float] = None) -> None: + wandb.termlog(f"{next(self._progress)} {text}", newline=False) + + def progress_close(self, text: Optional[str] = None) -> None: + text = text or " " * 79 + wandb.termlog(text) + + def code(self, text: str) -> str: + ret: str = click.style(text, bold=True) + return ret + + def name(self, text: str) -> str: + ret: str = click.style(text, fg="yellow") + return ret + + def link(self, link: str, text: Optional[str] = None) -> str: + ret: str = click.style(link, fg="blue", underline=True) + # ret = f"\x1b[m{text or link}\x1b[0m" + # ret = f"\x1b]8;;{link}\x1b\\{ret}\x1b]8;;\x1b\\" + return ret + + def emoji(self, name: str) -> str: + emojis = dict() + if platform.system() != "Windows" and wandb.util.is_unicode_safe(sys.stdout): + emojis = dict( + star="⭐️", + broom="🧹", + rocket="🚀", + gorilla="🦍", + turtle="🐢", + lightning="️⚡", + ) + + return emojis.get(name, "") + + def status(self, text: str, failure: Optional[bool] = None) -> str: + color = "red" if failure else "green" + ret: str = click.style(text, fg=color) + return ret + + def files(self, text: str) -> str: + ret: str = click.style(text, fg="magenta", bold=True) + return ret + + def grid(self, rows: List[List[str]], title: Optional[str] = None) -> str: + max_len = max(len(row[0]) for row in rows) + format_row = " ".join(["{:>{max_len}}", "{}" * (len(rows[0]) - 1)]) + grid = "\n".join([format_row.format(*row, max_len=max_len) for row in rows]) + if title: + return f"{title}\n{grid}\n" + return f"{grid}\n" + + def panel(self, columns: List[str]) -> str: + return "\n" + "\n".join(columns) + + +class PrinterJupyter(_Printer): + def __init__(self) -> None: + super().__init__() + self._html = True + self._progress = ipython.jupyter_progress_bar() + + def _display( + self, + text: Union[str, List[str], Tuple[str]], + *, + level: Optional[Union[str, int]] = None, + default_text: Optional[Union[str, List[str], Tuple[str]]] = None, + ) -> None: + text = "
".join(text) if isinstance(text, (list, tuple)) else text + if default_text is not None: + default_text = ( + "
".join(default_text) + if isinstance(default_text, (list, tuple)) + else default_text + ) + text = text or default_text + self._display_fn_mapping(level)(text) + + @staticmethod + def _display_fn_mapping(level: Optional[Union[str, int]]) -> Callable[[str], None]: + level = _Printer._sanitize_level(level) + + if level >= CRITICAL: + return ipython.display_html + elif ERROR <= level < CRITICAL: + return ipython.display_html + elif WARNING <= level < ERROR: + return ipython.display_html + elif INFO <= level < WARNING: + return ipython.display_html + elif DEBUG <= level < INFO: + return ipython.display_html + else: + return ipython.display_html + + def code(self, text: str) -> str: + return f"{text}" + + def name(self, text: str) -> str: + return f'{text}' + + def link(self, link: str, text: Optional[str] = None) -> str: + return f'{text or link}' + + def emoji(self, name: str) -> str: + return "" + + def status(self, text: str, failure: Optional[bool] = None) -> str: + color = "red" if failure else "green" + return f'{text}' + + def files(self, text: str) -> str: + return f"{text}" + + def progress_update(self, text: str, percent_done: float) -> None: + if self._progress: + self._progress.update(percent_done, text) + + def progress_close(self, _: Optional[str] = None) -> None: + if self._progress: + self._progress.close() + + def grid(self, rows: List[List[str]], title: Optional[str] = None) -> str: + format_row = "".join(["", "{}" * len(rows[0]), ""]) + grid = "".join([format_row.format(*row) for row in rows]) + grid = f'{grid}
' + if title: + return f"

{title}


{grid}
" + return f"{grid}
" + + def panel(self, columns: List[str]) -> str: + row = "".join([f'
{col}
' for col in columns]) + return f'{ipython.TABLE_STYLES}
{row}
' + + +Printer = Union[PrinterTerm, PrinterJupyter] + + +def get_printer(_jupyter: bool) -> Printer: + if _jupyter: + return PrinterJupyter() + return PrinterTerm() diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/proto_util.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/proto_util.py new file mode 100644 index 0000000000000000000000000000000000000000..f3a24fc33ed51e15959178692324c5bcd23096d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/proto_util.py @@ -0,0 +1,90 @@ +# +import json +from typing import TYPE_CHECKING, Any, Dict, Union + +from wandb.proto import wandb_internal_pb2 as pb + +if TYPE_CHECKING: # pragma: no cover + from google.protobuf.internal.containers import RepeatedCompositeFieldContainer + from google.protobuf.message import Message + + from wandb.proto import wandb_telemetry_pb2 as tpb + + +def dict_from_proto_list(obj_list: "RepeatedCompositeFieldContainer") -> Dict[str, Any]: + result: Dict[str, Any] = {} + + for item in obj_list: + # Start from the root of the result dict + current_level = result + + if len(item.nested_key) > 0: + keys = list(item.nested_key) + else: + keys = [item.key] + + for key in keys[:-1]: + if key not in current_level: + current_level[key] = {} + # Move the reference deeper into the nested dictionary + current_level = current_level[key] + + # Set the value at the final key location, parsing JSON from the value_json field + final_key = keys[-1] + current_level[final_key] = json.loads(item.value_json) + + return result + + +def _result_from_record(record: "pb.Record") -> "pb.Result": + result = pb.Result(uuid=record.uuid, control=record.control) + return result + + +def _assign_record_num(record: "pb.Record", record_num: int) -> None: + record.num = record_num + + +def _assign_end_offset(record: "pb.Record", end_offset: int) -> None: + record.control.end_offset = end_offset + + +def proto_encode_to_dict( + pb_obj: Union["tpb.TelemetryRecord", "pb.MetricRecord"], +) -> Dict[int, Any]: + data: Dict[int, Any] = dict() + fields = pb_obj.ListFields() + for desc, value in fields: + if desc.name.startswith("_"): + continue + if desc.type == desc.TYPE_STRING: + data[desc.number] = value + elif desc.type == desc.TYPE_INT32: + data[desc.number] = value + elif desc.type == desc.TYPE_ENUM: + data[desc.number] = value + elif desc.type == desc.TYPE_MESSAGE: + nested = value.ListFields() + bool_msg = all(d.type == d.TYPE_BOOL for d, _ in nested) + if bool_msg: + items = [d.number for d, v in nested if v] + if items: + data[desc.number] = items + else: + # TODO: for now this code only handles sub-messages with strings + md = {} + for d, v in nested: + if not v or d.type != d.TYPE_STRING: + continue + md[d.number] = v + data[desc.number] = md + return data + + +def message_to_dict( + message: "Message", +) -> Dict[str, Any]: + """Convert a protobuf message into a dictionary.""" + from google.protobuf.json_format import MessageToDict + + return MessageToDict(message, preserving_proto_field_name=True) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/redirect.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/redirect.py new file mode 100644 index 0000000000000000000000000000000000000000..bfe232d617d7ed046c44236d6c99230ce9d7e29e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/redirect.py @@ -0,0 +1,845 @@ +try: + import fcntl + import pty + import termios + import tty +except ImportError: # windows + pty = tty = termios = fcntl = None # type: ignore + +import itertools +import logging +import os +import queue +import re +import signal +import struct +import sys +import threading +import time +from collections import defaultdict + +import wandb + + +class _Numpy: # fallback in case numpy is not available + def where(self, x): + return ([i for i in range(len(x)) if x[i]],) + + def diff(self, x): + return [x[i + 1] - x[i] for i in range(len(x) - 1)] + + def arange(self, x): + class Arr(list): + def __getitem__(self, s): + if isinstance(s, slice): + self._start = s.start + return self + return super().__getitem__(s) + + def __getslice__(self, i, j): + self._start = i + return self + + def __iadd__(self, i): # type: ignore + for j in range(self._start, len(self)): + self[j] += i + + return Arr(range(x)) + + +try: + import numpy as np # type: ignore +except ImportError: + np = _Numpy() # type: ignore + + +logger = logging.getLogger("wandb") + +_redirects = {"stdout": None, "stderr": None} + + +ANSI_CSI_RE = re.compile("\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?") +ANSI_OSC_RE = re.compile("\001?\033\\]([^\a]*)(\a)\002?") + +_LAST_WRITE_TOKEN = b"L@stWr!t3T0k3n" + +SEP_RE = re.compile( + "\r|\n|" + # Unprintable ascii characters: + + "|".join([chr(i) for i in range(2**8) if repr(chr(i)).startswith("'\\x")]) +) + +ANSI_FG = list(map(str, itertools.chain(range(30, 40), range(90, 98)))) +ANSI_BG = list(map(str, itertools.chain(range(40, 50), range(100, 108)))) + +ANSI_FG_DEFAULT = "39" +ANSI_BG_DEFAULT = "49" + +ANSI_RESET = "0" + +ANSI_STYLES = { + "1": "bold", + "2": "/bold", + "3": "italics", + "4": "underscore", + "5": "blink", + "7": "reverse", + "9": "strikethrough", + "22": "/bold", + "23": "/italics", + "24": "/underscore", + "25": "/blink", + "27": "/reverse", + "29": "/strikethrough", +} + +ANSI_STYLES_REV = {v: k for k, v in ANSI_STYLES.items()} + + +CSI = "\033[" + + +def _get_char(code): + return "\033[" + str(code) + "m" + + +class Char: + """Class encapsulating a single character, its foreground, background and style attributes.""" + + __slots__ = ( + "data", + "fg", + "bg", + "bold", + "italics", + "underscore", + "blink", + "strikethrough", + "reverse", + ) + + def __init__( + self, + data=" ", + fg=ANSI_FG_DEFAULT, + bg=ANSI_BG_DEFAULT, + bold=False, + italics=False, + underscore=False, + blink=False, + strikethrough=False, + reverse=False, + ): + self.data = data + self.fg = fg + self.bg = bg + self.bold = bold + self.italics = italics + self.underscore = underscore + self.blink = blink + self.strikethrough = strikethrough + self.reverse = reverse + + def reset(self): + # Reset everything other than data to defaults + default = self.__class__() + for k in self.__slots__[1:]: + self[k] = default[k] + + def __getitem__(self, k): + return getattr(self, k) + + def __setitem__(self, k, v): + setattr(self, k, v) + + def copy(self, **kwargs): + attrs = {} + for k in self.__slots__: + if k in kwargs: + attrs[k] = kwargs[k] + else: + attrs[k] = self[k] + return self.__class__(**attrs) + + def __eq__(self, other): + for k in self.__slots__: + if self[k] != other[k]: + return False + return True + + +_defchar = Char() + + +class Cursor: + """A 2D cursor. + + Attributes: + x: x-coordinate. + y: y-coordinate. + char: the character to inherit colors and styles from. + """ + + __slots__ = ("x", "y", "char") + + def __init__(self, x=0, y=0, char=None): + if char is None: + char = Char() + self.x = x + self.y = y + self.char = char + + +class TerminalEmulator: + """An FSM emulating a terminal. + + Characters are stored in a 2D matrix (buffer) indexed by the cursor. + """ + + _MAX_LINES = 100 + + def __init__(self): + self.buffer = defaultdict(lambda: defaultdict(lambda: _defchar)) + self.cursor = Cursor() + self._num_lines = None # Cache + + # For diffing: + self._prev_num_lines = None + self._prev_last_line = None + + def cursor_up(self, n=1): + n = min(n, self.cursor.y) + self.cursor.y -= n + + def cursor_down(self, n=1): + self.cursor.y += n + + def cursor_left(self, n=1): + n = min(n, self.cursor.x) + self.cursor.x -= n + + def cursor_right(self, n=1): + self.cursor.x += n + + def carriage_return(self): + self.cursor.x = 0 + + def cursor_position(self, line, column): + self.cursor.x = min(column, 1) - 1 + self.cursor.y = min(line, 1) - 1 + + def cursor_column(self, column): + self.cursor.x = min(column, 1) - 1 + + def cursor_line(self, line): + self.cursor.y = min(line, 1) - 1 + + def linefeed(self): + self.cursor_down() + self.carriage_return() + + def _get_line_len(self, n): + if n not in self.buffer: + return 0 + line = self.buffer[n] + if not line: + return 0 + n = max(line.keys()) + for i in range(n, -1, -1): + if line[i] != _defchar: + return i + 1 + return 0 + + @property + def num_lines(self): + if self._num_lines is not None: + return self._num_lines + ret = 0 + if self.buffer: + n = max(self.buffer.keys()) + for i in range(n, -1, -1): + if self._get_line_len(i): + ret = i + 1 + break + self._num_lines = ret + return ret + + def display(self): + return [ + [self.buffer[i][j].data for j in range(self._get_line_len(i))] + for i in range(self.num_lines) + ] + + def erase_screen(self, mode=0): + if mode == 0: + for i in range(self.cursor.y + 1, self.num_lines): + if i in self.buffer: + del self.buffer[i] + self.erase_line(mode) + if mode == 1: + for i in range(self.cursor.y): + if i in self.buffer: + del self.buffer[i] + self.erase_line(mode) + elif mode == 2 or mode == 3: + self.buffer.clear() + + def erase_line(self, mode=0): + curr_line = self.buffer[self.cursor.y] + if mode == 0: + for i in range(self.cursor.x, self._get_line_len(self.cursor.y)): + if i in curr_line: + del curr_line[i] + elif mode == 1: + for i in range(self.cursor.x + 1): + if i in curr_line: + del curr_line[i] + else: + curr_line.clear() + + def insert_lines(self, n=1): + for i in range(self.num_lines - 1, self.cursor.y, -1): + self.buffer[i + n] = self.buffer[i] + for i in range(self.cursor.y + 1, self.cursor.y + 1 + n): + if i in self.buffer: + del self.buffer[i] + + def _write_plain_text(self, plain_text): + self.buffer[self.cursor.y].update( + [ + (self.cursor.x + i, self.cursor.char.copy(data=c)) + for i, c in enumerate(plain_text) + ] + ) + self.cursor.x += len(plain_text) + + def _write_text(self, text): + prev_end = 0 + for match in SEP_RE.finditer(text): + start, end = match.span() + self._write_plain_text(text[prev_end:start]) + prev_end = end + c = match.group() + if c == "\n": + self.linefeed() + elif c == "\r": + self.carriage_return() + elif c == "\b": + self.cursor_left() + else: + continue + self._write_plain_text(text[prev_end:]) + + def _remove_osc(self, text): + return re.sub(ANSI_OSC_RE, "", text) + + def write(self, data): + self._num_lines = None # invalidate cache + data = self._remove_osc(data) + prev_end = 0 + for match in ANSI_CSI_RE.finditer(data): + start, end = match.span() + text = data[prev_end:start] + csi = data[start:end] + prev_end = end + self._write_text(text) + self._handle_csi(csi, *match.groups()) + self._write_text(data[prev_end:]) + + def _handle_csi(self, csi, params, command): + try: + if command == "m": + p = params.split(";")[0] + if not p: + p = "0" + if p in ANSI_FG: + self.cursor.char.fg = p + elif p in ANSI_BG: + self.cursor.char.bg = p + elif p == ANSI_RESET: + self.cursor.char.reset() + elif p in ANSI_STYLES: + style = ANSI_STYLES[p] + off = style.startswith("/") + if off: + style = style[1:] + self.cursor.char[style] = not off + else: + abcd = { + "A": "cursor_up", + "B": "cursor_down", + "C": "cursor_right", + "D": "cursor_left", + } + cursor_fn = abcd.get(command) + if cursor_fn: + getattr(self, cursor_fn)(int(params) if params else 1) + elif command == "J": + p = params.split(";")[0] + p = int(p) if p else 0 + self.erase_screen(p) + elif command == "K": + p = params.split(";")[0] + p = int(p) if p else 0 + self.erase_line(p) + elif command == "L": + p = int(params) if params else 1 + self.insert_lines(p) + elif command in "Hf": + p = params.split(";") + if len(p) == 2: + p = (int(p[0]), int(p[1])) + elif len(p) == 1: + p = (int(p[0]), 1) + else: + p = (1, 1) + self.cursor_position(*p) + except Exception: + pass + + def _get_line(self, n): + line = self.buffer[n] + line_len = self._get_line_len(n) + # We have to loop through each character in the line and check if foreground, + # background and other attributes (italics, bold, underline, etc) of the ith + # character are different from those of the (i-1)th character. If different, the + # appropriate ascii character for switching the color/attribute should be + # appended to the output string before appending the actual character. This loop + # and subsequent checks can be expensive, especially because 99% of terminal + # output use default colors and formatting. Even in outputs that do contain + # colors and styles, its unlikely that they will change on a per character + # basis. + + # So instead we create a character list without any ascii codes (`out`), and a + # list of all the foregrounds in the line (`fgs`) on which we call np.diff() and + # np.where() to find the indices where the foreground change, and insert the + # ascii characters in the output list (`out`) on those indices. All of this is + # the done only if there are more than 1 foreground color in the line in the + # first place (`if len(set(fgs)) > 1 else None`). Same logic is repeated for + # background colors and other attributes. + + out = [line[i].data for i in range(line_len)] + + # for dynamic insert using original indices + idxs = np.arange(line_len) + insert = lambda i, c: (out.insert(idxs[i], c), idxs[i:].__iadd__(1)) # noqa + + fgs = [int(_defchar.fg)] + [int(line[i].fg) for i in range(line_len)] + [ + insert(i, _get_char(line[int(i)].fg)) for i in np.where(np.diff(fgs))[0] + ] if len(set(fgs)) > 1 else None + bgs = [int(_defchar.bg)] + [int(line[i].bg) for i in range(line_len)] + [ + insert(i, _get_char(line[int(i)].bg)) for i in np.where(np.diff(bgs))[0] + ] if len(set(bgs)) > 1 else None + attrs = { + k: [False] + [line[i][k] for i in range(line_len)] + for k in Char.__slots__[3:] + } + [ + [ + insert(i, _get_char(ANSI_STYLES_REV[k if line[int(i)][k] else "/" + k])) + for i in np.where(np.diff(v))[0] + ] + for k, v in attrs.items() + if any(v) + ] + return "".join(out) + + def read(self): + num_lines = self.num_lines + if self._prev_num_lines is None: + ret = os.linesep.join(map(self._get_line, range(num_lines))) + if ret: + ret += os.linesep + else: + return ret + else: + curr_line = self._get_line(self._prev_num_lines - 1) + if curr_line == self._prev_last_line: + if num_lines == self._prev_num_lines: + return "" + ret = ( + os.linesep.join( + map(self._get_line, range(self._prev_num_lines, num_lines)) + ) + + os.linesep + ) + else: + ret = ( + "\r" + + os.linesep.join( + map(self._get_line, range(self._prev_num_lines - 1, num_lines)) + ) + + os.linesep + ) + if num_lines > self._MAX_LINES: + shift = num_lines - self._MAX_LINES + for i in range(shift, num_lines): + self.buffer[i - shift] = self.buffer[i] + for i in range(self._MAX_LINES, max(self.buffer.keys())): + if i in self.buffer: + del self.buffer[i] + self.cursor.y -= min(self.cursor.y, shift) + self._num_lines = num_lines = self._MAX_LINES + self._prev_num_lines = num_lines + self._prev_last_line = self._get_line(num_lines - 1) + return ret + + +_MIN_CALLBACK_INTERVAL = 2 # seconds + + +class RedirectBase: + def __init__(self, src, cbs=()): + """# Arguments. + + `src`: Source stream to be redirected. "stdout" or "stderr". + `cbs`: tuple/list of callbacks. Each callback should take exactly 1 argument (bytes). + + """ + assert hasattr(sys, src) + self.src = src + self.cbs = cbs + + @property + def src_stream(self): + return getattr(sys, "__{}__".format(self.src)) + + @property + def src_fd(self): + return self.src_stream.fileno() + + @property + def src_wrapped_stream(self): + return getattr(sys, self.src) + + def save(self): + pass + + def install(self): + curr_redirect = _redirects.get(self.src) + if curr_redirect and curr_redirect != self: + curr_redirect.uninstall() + _redirects[self.src] = self + + def uninstall(self): + if _redirects[self.src] != self: + return + _redirects[self.src] = None + + +class StreamWrapper(RedirectBase): + """Patches the write method of current sys.stdout/sys.stderr.""" + + def __init__(self, src, cbs=()): + super().__init__(src=src, cbs=cbs) + self._installed = False + self._emulator = TerminalEmulator() + + def _emulator_write(self): + while True: + if self._queue.empty(): + if self._stopped.is_set(): + return + time.sleep(0.5) + continue + data = [] + while not self._queue.empty(): + data.append(self._queue.get()) + if self._stopped.is_set() and sum(map(len, data)) > 100000: + wandb.termlog("Terminal output too large. Logging without processing.") + self.flush() + [self.flush(line.encode("utf-8")) for line in data] + return + try: + self._emulator.write("".join(data)) + except Exception: + pass + + def _callback(self): + while not (self._stopped.is_set() and self._queue.empty()): + self.flush() + time.sleep(_MIN_CALLBACK_INTERVAL) + + def install(self): + super().install() + if self._installed: + return + stream = self.src_wrapped_stream + old_write = stream.write + self._prev_callback_timestamp = time.time() + self._old_write = old_write + + def write(data): + self._old_write(data) + self._queue.put(data) + + stream.write = write + + self._queue = queue.Queue() + self._stopped = threading.Event() + self._emulator_write_thread = threading.Thread(target=self._emulator_write) + self._emulator_write_thread.daemon = True + self._emulator_write_thread.start() + + if not wandb.run or wandb.run._settings.mode == "online": + self._callback_thread = threading.Thread(target=self._callback) + self._callback_thread.daemon = True + self._callback_thread.start() + + self._installed = True + + def flush(self, data=None): + if data is None: + try: + data = self._emulator.read().encode("utf-8") + except Exception: + pass + if data: + for cb in self.cbs: + try: + cb(data) + except Exception: + pass # TODO(frz) + + def uninstall(self): + if not self._installed: + return + self.src_wrapped_stream.write = self._old_write + + self._stopped.set() + self._emulator_write_thread.join(timeout=5) + if self._emulator_write_thread.is_alive(): + wandb.termlog(f"Processing terminal output ({self.src})...") + self._emulator_write_thread.join() + wandb.termlog("Done.") + self.flush() + + self._installed = False + super().uninstall() + + +class StreamRawWrapper(RedirectBase): + """Patches the write method of current sys.stdout/sys.stderr. + + Captures data in a raw form rather than using the emulator + """ + + def __init__(self, src, cbs=()): + super().__init__(src=src, cbs=cbs) + self._installed = False + + def save(self): + stream = self.src_wrapped_stream + self._old_write = stream.write + + def install(self): + super().install() + if self._installed: + return + stream = self.src_wrapped_stream + self._prev_callback_timestamp = time.time() + + def write(data): + self._old_write(data) + for cb in self.cbs: + try: + cb(data) + except Exception: + # TODO: Figure out why this was needed and log or error out appropriately + # it might have been strange terminals? maybe shutdown cases? + pass + + stream.write = write + self._installed = True + + def uninstall(self): + if not self._installed: + return + self.src_wrapped_stream.write = self._old_write + self._installed = False + super().uninstall() + + +class _WindowSizeChangeHandler: + def __init__(self): + self._fds = set() + + def _register(self): + old_handler = signal.signal(signal.SIGWINCH, lambda *_: None) + + def handler(signum, frame): + if callable(old_handler): + old_handler(signum, frame) + self.handle_window_size_change() + + signal.signal(signal.SIGWINCH, handler) + self._old_handler = old_handler + + def _unregister(self): + signal.signal(signal.SIGWINCH, self._old_handler) + + def add_fd(self, fd): + if not self._fds: + self._register() + self._fds.add(fd) + self.handle_window_size_change() + + def remove_fd(self, fd): + if fd in self._fds: + self._fds.remove(fd) + if not self._fds: + self._unregister() + + def handle_window_size_change(self): + try: + win_size = fcntl.ioctl(0, termios.TIOCGWINSZ, "\0" * 8) + rows, cols, xpix, ypix = struct.unpack("HHHH", win_size) + # Note: IOError not subclass of OSError in python 2.x + except OSError: # eg. in MPI we can't do this. + return + if cols == 0: + return + win_size = struct.pack("HHHH", rows, cols, xpix, ypix) + for fd in self._fds: + fcntl.ioctl(fd, termios.TIOCSWINSZ, win_size) + + +_WSCH = _WindowSizeChangeHandler() + + +class Redirect(RedirectBase): + """Redirect low level file descriptors.""" + + def __init__(self, src, cbs=()): + super().__init__(src=src, cbs=cbs) + self._installed = False + self._emulator = TerminalEmulator() + + def _pipe(self): + if pty: + r, w = pty.openpty() + else: + r, w = os.pipe() + return r, w + + def install(self): + super().install() + if self._installed: + return + self._pipe_read_fd, self._pipe_write_fd = self._pipe() + if os.isatty(self._pipe_read_fd): + _WSCH.add_fd(self._pipe_read_fd) + self._orig_src_fd = os.dup(self.src_fd) + self._orig_src = os.fdopen(self._orig_src_fd, "wb", 0) + os.dup2(self._pipe_write_fd, self.src_fd) + self._installed = True + self._queue = queue.Queue() + self._stopped = threading.Event() + self._pipe_relay_thread = threading.Thread(target=self._pipe_relay) + self._pipe_relay_thread.daemon = True + self._pipe_relay_thread.start() + self._emulator_write_thread = threading.Thread(target=self._emulator_write) + self._emulator_write_thread.daemon = True + self._emulator_write_thread.start() + if not wandb.run or wandb.run._settings.mode == "online": + self._callback_thread = threading.Thread(target=self._callback) + self._callback_thread.daemon = True + self._callback_thread.start() + + def uninstall(self): + if not self._installed: + return + self._installed = False + # If the user printed a very long string (millions of chars) right before wandb.finish(), + # it will take a while for it to reach pipe relay. 1 second is enough time for ~5 million chars. + time.sleep(1) + self._stopped.set() + os.dup2(self._orig_src_fd, self.src_fd) + os.write(self._pipe_write_fd, _LAST_WRITE_TOKEN) + self._pipe_relay_thread.join() + os.close(self._pipe_read_fd) + os.close(self._pipe_write_fd) + + t = threading.Thread( + target=self.src_wrapped_stream.flush + ) # Calling flush() from the current thread does not flush the buffer instantly. + t.start() + t.join(timeout=10) + + self._emulator_write_thread.join(timeout=5) + if self._emulator_write_thread.is_alive(): + wandb.termlog(f"Processing terminal output ({self.src})...") + self._emulator_write_thread.join() + wandb.termlog("Done.") + self.flush() + + _WSCH.remove_fd(self._pipe_read_fd) + super().uninstall() + + def flush(self, data=None): + if data is None: + try: + data = self._emulator.read().encode("utf-8") + except Exception: + pass + if data: + for cb in self.cbs: + try: + cb(data) + except Exception: + pass # TODO(frz) + + def _callback(self): + while not self._stopped.is_set(): + self.flush() + time.sleep(_MIN_CALLBACK_INTERVAL) + + def _pipe_relay(self): + while True: + try: + brk = False + data = os.read(self._pipe_read_fd, 4096) + if self._stopped.is_set(): + if _LAST_WRITE_TOKEN not in data: + # _LAST_WRITE_TOKEN could have gotten split up at the 4096 border + n = len(_LAST_WRITE_TOKEN) + while n and data[-n:] != _LAST_WRITE_TOKEN[:n]: + n -= 1 + if n: + data += os.read( + self._pipe_read_fd, len(_LAST_WRITE_TOKEN) - n + ) + if _LAST_WRITE_TOKEN in data: + data = data.replace(_LAST_WRITE_TOKEN, b"") + brk = True + i = self._orig_src.write(data) + if i is not None: # python 3 w/ unbuffered i/o: we need to keep writing + while i < len(data): + i += self._orig_src.write(data[i:]) + self._queue.put(data) + if brk: + return + except OSError: + return + + def _emulator_write(self): + while True: + if self._queue.empty(): + if self._stopped.is_set(): + return + time.sleep(0.5) + continue + data = [] + while not self._queue.empty(): + data.append(self._queue.get()) + if self._stopped.is_set() and sum(map(len, data)) > 100000: + wandb.termlog("Terminal output too large. Logging without processing.") + self.flush() + [self.flush(line) for line in data] + return + try: + self._emulator.write(b"".join(data).decode("utf-8")) + except Exception: + pass diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/reporting.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/reporting.py new file mode 100644 index 0000000000000000000000000000000000000000..a9e6cad9d2a328986d6eca79d2600f98e4613ef5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/reporting.py @@ -0,0 +1,99 @@ +"""reporting.""" + +import logging + +logger = logging.getLogger("wandb") + + +class _Reporter: + def __init__(self, settings): + self._settings = settings + self._errors = [] + self._warnings = [] + self._num_errors = 0 + self._num_warnings = 0 + self._context = dict() + + def error(self, __s, *args): + pass + + def warning(self, __s, *args): + show = self._settings.show_warnings + summary = self._settings.summary_warnings + if show is not None or summary is not None: + s = __s % args + self._num_warnings += 1 + if show is not None: + if self._num_warnings <= show or show == 0: + print("[WARNING]", s) + if self._num_warnings == show: + print("not showing any more warnings") + if summary is not None: + if self._num_warnings <= summary or summary == 0: + self._warnings.append(s) + + def info(self, __s, *args): + if self._settings.show_info: + print(("[INFO]" + __s) % args) + + def internal(self, __s, *args): + pass + + def problem(self, bool, __s=None, *args): + pass + + def set_context(self, __d=None, **kwargs): + if __d: + self._context.update(__d) + self._context.update(**kwargs) + + def clear_context(self, keys=None): + if keys is None: + self._context = dict() + return + for k in keys: + self._context.pop(k, None) + + @property + def warning_count(self): + return self._num_warnings + + @property + def error_count(self): + return self._num_errors + + @property + def warning_lines(self): + return self._warnings + + @property + def error_lines(self): + return self._errors + + +class Reporter: + _instance = None + + def __init__(self, settings=None): + if Reporter._instance is not None: + return + if settings is None: + logging.error("internal issue: reporter not setup") + + Reporter._instance = _Reporter(settings) + + def __getattr__(self, name): + return getattr(self._instance, name) + + +def setup_reporter(settings): + # fixme: why? + # if not settings.is_frozen(): + # logging.error("internal issue: settings not frozen") + r = Reporter(settings=settings) + return r + + +def get_reporter(): + r = Reporter() + return r diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/retry.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/retry.py new file mode 100644 index 0000000000000000000000000000000000000000..58df2874d2a73a03620b1588fb5efc54a9fb2fdf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/retry.py @@ -0,0 +1,289 @@ +import abc +import asyncio +import datetime +import functools +import logging +import os +import random +import threading +import time +from typing import Any, Awaitable, Callable, Generic, Optional, Tuple, Type, TypeVar + +from requests import HTTPError + +import wandb +from wandb.util import CheckRetryFnType + +from .mailbox import ContextCancelledError + +logger = logging.getLogger(__name__) + + +# To let tests mock out the retry logic's now()/sleep() funcs, this file +# should only use these variables, not call the stdlib funcs directly. +NOW_FN = datetime.datetime.now +SLEEP_FN = time.sleep +SLEEP_ASYNC_FN = asyncio.sleep + + +class TransientError(Exception): + """Exception type designated for errors that may only be temporary. + + Can have its own message and/or wrap another exception. + """ + + def __init__( + self, msg: Optional[str] = None, exc: Optional[BaseException] = None + ) -> None: + super().__init__(msg) + self.message = msg + self.exception = exc + + +_R = TypeVar("_R") + + +class Retry(Generic[_R]): + """Create a retryable version of a function. + + Calling this will call the passed function, retrying if any exceptions in + retryable_exceptions are caught, with exponential backoff. + """ + + MAX_SLEEP_SECONDS = 5 * 60 + + def __init__( + self, + call_fn: Callable[..., _R], + retry_timedelta: Optional[datetime.timedelta] = None, + retry_cancel_event: Optional[threading.Event] = None, + num_retries: Optional[int] = None, + check_retry_fn: CheckRetryFnType = lambda e: True, + retryable_exceptions: Optional[Tuple[Type[Exception], ...]] = None, + error_prefix: str = "Network error", + retry_callback: Optional[Callable[[int, str], Any]] = None, + ) -> None: + self._call_fn = call_fn + self._check_retry_fn = check_retry_fn + self._error_prefix = error_prefix + self._last_print = datetime.datetime.now() - datetime.timedelta(minutes=1) + self._retry_timedelta = retry_timedelta + self._retry_cancel_event = retry_cancel_event + self._num_retries = num_retries + if retryable_exceptions is not None: + self._retryable_exceptions = retryable_exceptions + else: + self._retryable_exceptions = (TransientError,) + self._index = 0 + self.retry_callback = retry_callback + + def _sleep_check_cancelled( + self, wait_seconds: float, cancel_event: Optional[threading.Event] + ) -> bool: + if not cancel_event: + SLEEP_FN(wait_seconds) + return False + cancelled = cancel_event.wait(wait_seconds) + return cancelled + + @property + def num_iters(self) -> int: + """The number of iterations the previous __call__ retried.""" + return self._num_iter + + def __call__(self, *args: Any, **kwargs: Any) -> _R: # noqa: C901 + """Call the wrapped function, with retries. + + Arguments: + retry_timedelta (kwarg): amount of time to retry before giving up. + sleep_base (kwarg): amount of time to sleep upon first failure, all other sleeps + are derived from this one. + """ + retry_timedelta = kwargs.pop("retry_timedelta", self._retry_timedelta) + if retry_timedelta is None: + retry_timedelta = datetime.timedelta(days=365) + + retry_cancel_event = kwargs.pop("retry_cancel_event", self._retry_cancel_event) + + num_retries = kwargs.pop("num_retries", self._num_retries) + if num_retries is None: + num_retries = 1000000 + + if os.environ.get("WANDB_TEST"): + num_retries = 0 + + sleep_base: float = kwargs.pop("retry_sleep_base", 1) + + # an extra function to allow performing more logic on the filtered exception + check_retry_fn: CheckRetryFnType = kwargs.pop( + "check_retry_fn", self._check_retry_fn + ) + + sleep = sleep_base + now = NOW_FN() + start_time = now + start_time_triggered = None + + self._num_iter = 0 + + while True: + try: + result = self._call_fn(*args, **kwargs) + # Only print resolved attempts once every minute + if self._num_iter > 2 and now - self._last_print > datetime.timedelta( + minutes=1 + ): + self._last_print = NOW_FN() + if self.retry_callback: + self.retry_callback( + 200, + "{} resolved after {}, resuming normal operation.".format( + self._error_prefix, NOW_FN() - start_time + ), + ) + return result + except self._retryable_exceptions as e: + # if the secondary check fails, re-raise + retry_timedelta_triggered = check_retry_fn(e) + if not retry_timedelta_triggered: + raise + + # always enforce num_retries no matter which type of exception was seen + if self._num_iter >= num_retries: + raise + + now = NOW_FN() + + # handle a triggered secondary check which could have a shortened timeout + if isinstance(retry_timedelta_triggered, datetime.timedelta): + # save the time of the first secondary trigger + if not start_time_triggered: + start_time_triggered = now + + # make sure that we haven't run out of time from secondary trigger + if now - start_time_triggered >= retry_timedelta_triggered: + raise + + # always enforce the default timeout from start of retries + if now - start_time >= retry_timedelta: + raise + + if self._num_iter == 2: + logger.info("Retry attempt failed:", exc_info=e) + if ( + isinstance(e, HTTPError) + and e.response is not None + and self.retry_callback is not None + ): + self.retry_callback(e.response.status_code, e.response.text) + else: + # todo: would like to catch other errors, eg wandb.errors.Error, ConnectionError etc + # but some of these can be raised before the retry handler thread (RunStatusChecker) is + # spawned in wandb_init + wandb.termlog( + "{} ({}), entering retry loop.".format( + self._error_prefix, e.__class__.__name__ + ) + ) + # if wandb.env.is_debug(): + # traceback.print_exc() + cancelled = self._sleep_check_cancelled( + sleep + random.random() * 0.25 * sleep, cancel_event=retry_cancel_event + ) + if cancelled: + raise ContextCancelledError("retry timeout") + sleep *= 2 + if sleep > self.MAX_SLEEP_SECONDS: + sleep = self.MAX_SLEEP_SECONDS + now = NOW_FN() + + self._num_iter += 1 + + +_F = TypeVar("_F", bound=Callable) + + +def retriable(*args: Any, **kargs: Any) -> Callable[[_F], _F]: + def decorator(fn: _F) -> _F: + retrier: Retry[Any] = Retry(fn, *args, **kargs) + + @functools.wraps(fn) + def wrapped_fn(*args: Any, **kargs: Any) -> Any: + return retrier(*args, **kargs) + + return wrapped_fn # type: ignore + + return decorator + + +class Backoff(abc.ABC): + """A backoff strategy: decides whether to sleep or give up when an exception is raised.""" + + @abc.abstractmethod + def next_sleep_or_reraise(self, exc: Exception) -> datetime.timedelta: + raise NotImplementedError # pragma: no cover + + +class ExponentialBackoff(Backoff): + """Jittered exponential backoff: sleep times increase ~exponentially up to some limit.""" + + def __init__( + self, + initial_sleep: datetime.timedelta, + max_sleep: datetime.timedelta, + max_retries: Optional[int] = None, + timeout_at: Optional[datetime.datetime] = None, + ) -> None: + self._next_sleep = min(max_sleep, initial_sleep) + self._max_sleep = max_sleep + self._remaining_retries = max_retries + self._timeout_at = timeout_at + + def next_sleep_or_reraise(self, exc: Exception) -> datetime.timedelta: + if self._remaining_retries is not None: + if self._remaining_retries <= 0: + raise exc + self._remaining_retries -= 1 + + if self._timeout_at is not None and NOW_FN() > self._timeout_at: + raise exc + + result, self._next_sleep = ( + self._next_sleep, + min(self._max_sleep, self._next_sleep * (1 + random.random())), + ) + + return result + + +class FilteredBackoff(Backoff): + """Re-raise any exceptions that fail a predicate; delegate others to another Backoff.""" + + def __init__(self, filter: Callable[[Exception], bool], wrapped: Backoff) -> None: + self._filter = filter + self._wrapped = wrapped + + def next_sleep_or_reraise(self, exc: Exception) -> datetime.timedelta: + if not self._filter(exc): + raise exc + return self._wrapped.next_sleep_or_reraise(exc) + + +async def retry_async( + backoff: Backoff, + fn: Callable[..., Awaitable[_R]], + *args: Any, + on_exc: Optional[Callable[[Exception], None]] = None, + **kwargs: Any, +) -> _R: + """Call `fn` repeatedly until either it succeeds, or `backoff` decides we should give up. + + Each time `fn` fails, `on_exc` is called with the exception. + """ + while True: + try: + return await fn(*args, **kwargs) + except Exception as e: + if on_exc is not None: + on_exc(e) + await SLEEP_ASYNC_FN(backoff.next_sleep_or_reraise(e).total_seconds()) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/run_moment.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/run_moment.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ba9cdc14e7c3c3dc94b6968396aac182c247c9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/run_moment.py @@ -0,0 +1,78 @@ +import sys +from dataclasses import dataclass +from typing import Union, cast +from urllib import parse + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_STEP = Literal["_step"] + + +@dataclass +class RunMoment: + """A moment in a run.""" + + run: str # run name + + # currently, the _step value to fork from. in future, this will be optional + value: Union[int, float] + + # only step for now, in future this will be relaxed to be any metric + metric: _STEP = "_step" + + def __post_init__(self): + if self.metric != "_step": + raise ValueError( + f"Only the metric '_step' is supported, got '{self.metric}'." + ) + if not isinstance(self.value, (int, float)): + raise ValueError( + f"Only int or float values are supported, got '{self.value}'." + ) + if not isinstance(self.run, str): + raise ValueError(f"Only string run names are supported, got '{self.run}'.") + + @classmethod + def from_uri(cls, uri: str) -> "RunMoment": + parsable = "runmoment://" + uri + parse_err = ValueError( + f"Could not parse passed run moment string '{uri}', " + f"expected format '?='. " + f"Currently, only the metric '_step' is supported. " + f"Example: 'ans3bsax?_step=123'." + ) + + try: + parsed = parse.urlparse(parsable) + except ValueError as e: + raise parse_err from e + + if parsed.scheme != "runmoment": + raise parse_err + + # extract run, metric, value from parsed + if not parsed.netloc: + raise parse_err + + run = parsed.netloc + + if parsed.path or parsed.params or parsed.fragment: + raise parse_err + + query = parse.parse_qs(parsed.query) + if len(query) != 1: + raise parse_err + + metric = list(query.keys())[0] + if metric != "_step": + raise parse_err + value: str = query[metric][0] + try: + num_value = int(value) if value.isdigit() else float(value) + except ValueError as e: + raise parse_err from e + + return cls(run=run, metric=cast(_STEP, metric), value=num_value) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/runid.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/runid.py new file mode 100644 index 0000000000000000000000000000000000000000..355d24df01a2fc9b5a6174a343e84ed083696277 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/runid.py @@ -0,0 +1,12 @@ +"""runid util.""" + +import secrets +import string + + +def generate_id(length: int = 8) -> str: + """Generate a random base-36 string of `length` digits.""" + # There are ~2.8T base-36 8-digit strings. If we generate 210k ids, + # we'll have a ~1% chance of collision. + alphabet = string.ascii_lowercase + string.digits + return "".join(secrets.choice(alphabet) for _ in range(length)) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/sock_client.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/sock_client.py new file mode 100644 index 0000000000000000000000000000000000000000..188f87646bafc710b2d90e93ceb9c037243fc987 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/sock_client.py @@ -0,0 +1,291 @@ +import socket +import struct +import threading +import time +import uuid +from typing import TYPE_CHECKING, Any, List, Optional + +from wandb.proto import wandb_server_pb2 as spb + +from . import tracelog + +if TYPE_CHECKING: + from wandb.proto import wandb_internal_pb2 as pb + + +class SockClientClosedError(Exception): + """Socket has been closed.""" + + pass + + +class SockBuffer: + _buf_list: List[bytes] + _buf_lengths: List[int] + _buf_total: int + + def __init__(self) -> None: + self._buf_list = [] + self._buf_lengths = [] + self._buf_total = 0 + + @property + def length(self) -> int: + return self._buf_total + + def _get(self, start: int, end: int, peek: bool = False) -> bytes: + index: Optional[int] = None + buffers = [] + need = end + + # compute buffers needed + for i, (buf_len, buf_data) in enumerate(zip(self._buf_lengths, self._buf_list)): + buffers.append(buf_data[:need] if need < buf_len else buf_data) + if need <= buf_len: + index = i + break + need -= buf_len + + # buffer not large enough, caller should have made sure there was enough data + if index is None: + raise IndexError("SockBuffer index out of range") + + # advance buffer internals if we are not peeking into the data + if not peek: + self._buf_total -= end + if need < buf_len: + # update partially used buffer list + self._buf_list = self._buf_list[index:] + self._buf_lengths = self._buf_lengths[index:] + self._buf_list[0] = self._buf_list[0][need:] + self._buf_lengths[0] -= need + else: + # update fully used buffer list + self._buf_list = self._buf_list[index + 1 :] + self._buf_lengths = self._buf_lengths[index + 1 :] + + return b"".join(buffers)[start:end] + + def get(self, start: int, end: int) -> bytes: + return self._get(start, end) + + def peek(self, start: int, end: int) -> bytes: + return self._get(start, end, peek=True) + + def put(self, data: bytes, data_len: int) -> None: + self._buf_list.append(data) + self._buf_lengths.append(data_len) + self._buf_total += data_len + + +class SockClient: + _sock: socket.socket + _sockid: str + _retry_delay: float + _lock: "threading.Lock" + _bufsize: int + _buffer: SockBuffer + + # current header is magic byte "W" followed by 4 byte length of the message + HEADLEN = 1 + 4 + + def __init__(self) -> None: + # TODO: use safe uuid's (python3.7+) or emulate this + self._sockid = uuid.uuid4().hex + self._retry_delay = 0.1 + self._lock = threading.Lock() + self._bufsize = 4096 + self._buffer = SockBuffer() + + def connect(self, port: int) -> None: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(("localhost", port)) + self._sock = s + self._detect_bufsize() + + def _detect_bufsize(self) -> None: + sndbuf_size = self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) + rcvbuf_size = self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) + self._bufsize = min(sndbuf_size, rcvbuf_size, 65536) + + def close(self) -> None: + self._sock.close() + + def shutdown(self, val: int) -> None: + self._sock.shutdown(val) + + def set_socket(self, sock: socket.socket) -> None: + self._sock = sock + self._detect_bufsize() + + def _sendall_with_error_handle(self, data: bytes) -> None: + # This is a helper function for sending data in a retry fashion. + # Similar to the sendall() function in the socket module, but with + # an error handling in case of timeout. + total_sent = 0 + total_data = len(data) + while total_sent < total_data: + start_time = time.monotonic() + try: + sent = self._sock.send(data) + # sent equal to 0 indicates a closed socket + if sent == 0: + raise SockClientClosedError("socket connection broken") + total_sent += sent + # truncate our data to save memory + data = data[sent:] + # we handle the timeout case for the cases when timeout is set + # on a system level by another application + except socket.timeout: + # adding sleep to avoid tight loop + delta_time = time.monotonic() - start_time + if delta_time < self._retry_delay: + time.sleep(self._retry_delay - delta_time) + + def _send_message(self, msg: Any) -> None: + tracelog.log_message_send(msg, self._sockid) + raw_size = msg.ByteSize() + data = msg.SerializeToString() + assert len(data) == raw_size, "invalid serialization" + header = struct.pack(" None: + self._send_message(msg) + + def send_server_response(self, msg: Any) -> None: + try: + self._send_message(msg) + except BrokenPipeError: + # TODO(jhr): user thread might no longer be around to receive responses to + # things like network status poll loop, there might be a better way to quiesce + pass + + def send_and_recv( + self, + *, + inform_init: Optional[spb.ServerInformInitRequest] = None, + inform_start: Optional[spb.ServerInformStartRequest] = None, + inform_attach: Optional[spb.ServerInformAttachRequest] = None, + inform_finish: Optional[spb.ServerInformFinishRequest] = None, + inform_teardown: Optional[spb.ServerInformTeardownRequest] = None, + ) -> spb.ServerResponse: + self.send( + inform_init=inform_init, + inform_start=inform_start, + inform_attach=inform_attach, + inform_finish=inform_finish, + inform_teardown=inform_teardown, + ) + # TODO: this solution is fragile, but for checking attach + # it should be relatively stable. + # This pass would be solved as part of the fix in https://wandb.atlassian.net/browse/WB-8709 + response = self.read_server_response(timeout=1) + if response is None: + raise Exception("No response") + return response + + def send( + self, + *, + inform_init: Optional[spb.ServerInformInitRequest] = None, + inform_start: Optional[spb.ServerInformStartRequest] = None, + inform_attach: Optional[spb.ServerInformAttachRequest] = None, + inform_finish: Optional[spb.ServerInformFinishRequest] = None, + inform_teardown: Optional[spb.ServerInformTeardownRequest] = None, + ) -> None: + server_req = spb.ServerRequest() + if inform_init: + server_req.inform_init.CopyFrom(inform_init) + elif inform_start: + server_req.inform_start.CopyFrom(inform_start) + elif inform_attach: + server_req.inform_attach.CopyFrom(inform_attach) + elif inform_finish: + server_req.inform_finish.CopyFrom(inform_finish) + elif inform_teardown: + server_req.inform_teardown.CopyFrom(inform_teardown) + else: + raise Exception("unmatched") + self.send_server_request(server_req) + + def send_record_communicate(self, record: "pb.Record") -> None: + server_req = spb.ServerRequest() + server_req.record_communicate.CopyFrom(record) + self.send_server_request(server_req) + + def send_record_publish(self, record: "pb.Record") -> None: + server_req = spb.ServerRequest() + server_req.record_publish.CopyFrom(record) + self.send_server_request(server_req) + + def _extract_packet_bytes(self) -> Optional[bytes]: + # Do we have enough data to read the header? + start_offset = self.HEADLEN + if self._buffer.length >= start_offset: + header = self._buffer.peek(0, start_offset) + fields = struct.unpack("= end_offset: + rec_data = self._buffer.get(start_offset, end_offset) + return rec_data + return None + + def _read_packet_bytes(self, timeout: Optional[int] = None) -> Optional[bytes]: + """Read full message from socket. + + Args: + timeout: number of seconds to wait on socket data. + + Raises: + SockClientClosedError: socket has been closed. + """ + while True: + rec = self._extract_packet_bytes() + if rec: + return rec + + if timeout: + self._sock.settimeout(timeout) + try: + data = self._sock.recv(self._bufsize) + except socket.timeout: + break + except ConnectionResetError: + raise SockClientClosedError + except OSError: + raise SockClientClosedError + finally: + if timeout: + self._sock.settimeout(None) + data_len = len(data) + if data_len == 0: + # socket.recv() will return 0 bytes if socket was shutdown + # caller will handle this condition like other connection problems + raise SockClientClosedError + self._buffer.put(data, data_len) + return None + + def read_server_request(self) -> Optional[spb.ServerRequest]: + data = self._read_packet_bytes() + if not data: + return None + rec = spb.ServerRequest() + rec.ParseFromString(data) + tracelog.log_message_recv(rec, self._sockid) + return rec + + def read_server_response( + self, timeout: Optional[int] = None + ) -> Optional[spb.ServerResponse]: + data = self._read_packet_bytes(timeout=timeout) + if not data: + return None + rec = spb.ServerResponse() + rec.ParseFromString(data) + tracelog.log_message_recv(rec, self._sockid) + return rec diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/sparkline.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/sparkline.py new file mode 100644 index 0000000000000000000000000000000000000000..8f4e131b619482ee84860c6a25457dc7d3307d50 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/sparkline.py @@ -0,0 +1,45 @@ +# +# From pysparklines (BSD License): https://pypi.python.org/pypi/pysparklines + +import math +from typing import List, Union + +spark_chars = "▁▂▃▄▅▆▇█" + + +# math.isfinite doesn't exist in python2, so provider our own +def isfinite(f): + return not (math.isinf(f) or math.isnan(f)) + + +def sparkify(series: List[Union[float, int]]) -> str: + """Convert to a sparkline string. + + Example: + >>> sparkify([ 0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1, + ... 1.9 ]) + u'▁▁▂▄▅▇▇██▆▄▂' + + >>> sparkify([1, 1, -2, 3, -5, 8, -13]) + u'▆▆▅▆▄█▁' + + Raises ValueError if input data cannot be converted to float. + Raises TypeError if series is not an iterable. + """ + series = [float(i) for i in series] + finite_series = [x for x in series if isfinite(x)] + if not finite_series: + return "" + minimum = min(finite_series) + maximum = max(finite_series) + data_range = maximum - minimum + if data_range == 0.0: + # Graph a baseline if every input value is equal. + return "".join([spark_chars[0] if isfinite(x) else " " for x in series]) + coefficient = (len(spark_chars) - 1.0) / data_range + return "".join( + [ + spark_chars[int(round((x - minimum) * coefficient))] if isfinite(x) else " " + for x in series + ] + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/telemetry.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/telemetry.py new file mode 100644 index 0000000000000000000000000000000000000000..12c844fe4f1e2854d929606b13970aecefa1964e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/telemetry.py @@ -0,0 +1,100 @@ +import re +import sys +from types import TracebackType +from typing import TYPE_CHECKING, ContextManager, Dict, List, Optional, Set, Type + +import wandb +from wandb.proto.wandb_telemetry_pb2 import Imports as TelemetryImports +from wandb.proto.wandb_telemetry_pb2 import TelemetryRecord + +# avoid cycle, use string type reference + +if TYPE_CHECKING: + from .. import wandb_run + + +_LABEL_TOKEN: str = "@wandbcode{" + + +class _TelemetryObject: + _run: Optional["wandb_run.Run"] + _obj: TelemetryRecord + + def __init__( + self, + run: Optional["wandb_run.Run"] = None, + obj: Optional[TelemetryRecord] = None, + ) -> None: + self._run = run or wandb.run + self._obj = obj or TelemetryRecord() + + def __enter__(self) -> TelemetryRecord: + return self._obj + + def __exit__( + self, + exctype: Optional[Type[BaseException]], + excinst: Optional[BaseException], + exctb: Optional[TracebackType], + ) -> None: + if not self._run: + return + self._run._telemetry_callback(self._obj) + + +def context( + run: Optional["wandb_run.Run"] = None, obj: Optional[TelemetryRecord] = None +) -> ContextManager[TelemetryRecord]: + return _TelemetryObject(run=run, obj=obj) + + +MATCH_RE = re.compile(r"(?P[a-zA-Z0-9_-]+)[,}](?P.*)") + + +def _parse_label_lines(lines: List[str]) -> Dict[str, str]: + seen = False + ret = {} + for line in lines: + idx = line.find(_LABEL_TOKEN) + if idx < 0: + # Stop parsing on first non token line after match + if seen: + break + continue + seen = True + label_str = line[idx + len(_LABEL_TOKEN) :] + + # match identifier (first token without key=value syntax (optional) + # Note: Parse is fairly permissive as it doesnt enforce strict syntax + r = MATCH_RE.match(label_str) + if r: + ret["code"] = r.group("code").replace("-", "_") + label_str = r.group("rest") + + # match rest of tokens on one line + tokens = re.findall( + r'([a-zA-Z0-9_]+)\s*=\s*("[a-zA-Z0-9_-]*"|[a-zA-Z0-9_-]*)[,}]', label_str + ) + for k, v in tokens: + ret[k] = v.strip('"').replace("-", "_") + return ret + + +def list_telemetry_imports(only_imported: bool = False) -> Set[str]: + import_telemetry_set = { + desc.name + for desc in TelemetryImports.DESCRIPTOR.fields + if desc.type == desc.TYPE_BOOL + } + if only_imported: + imported_modules_set = set(sys.modules) + return imported_modules_set.intersection(import_telemetry_set) + return import_telemetry_set + + +__all__ = [ + "TelemetryImports", + "TelemetryRecord", + "context", + "list_telemetry_imports", +] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/timed_input.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/timed_input.py new file mode 100644 index 0000000000000000000000000000000000000000..6d963ae0521263453ac7f68ed2906934dd7c0f95 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/timed_input.py @@ -0,0 +1,133 @@ +"""timed_input: add a timeout to standard input. + +Approach was inspired by: https://github.com/johejo/inputimeout +""" + +import sys +import threading + +import wandb + +SP = " " +CR = "\r" +LF = "\n" +CRLF = CR + LF + + +def _echo(prompt: str) -> None: + sys.stdout.write(prompt) + sys.stdout.flush() + + +def _posix_timed_input(prompt: str, timeout: float) -> str: + _echo(prompt) + sel = selectors.DefaultSelector() + sel.register(sys.stdin, selectors.EVENT_READ, data=sys.stdin.readline) + events = sel.select(timeout=timeout) + + for key, _ in events: + input_callback = key.data + input_data: str = input_callback() + if not input_data: # end-of-file - treat as timeout + raise TimeoutError + return input_data.rstrip(LF) + + _echo(LF) + termios.tcflush(sys.stdin, termios.TCIFLUSH) + raise TimeoutError + + +def _windows_timed_input(prompt: str, timeout: float) -> str: + interval = 0.1 + + _echo(prompt) + begin = time.monotonic() + end = begin + timeout + line = "" + + while time.monotonic() < end: + if msvcrt.kbhit(): # type: ignore[attr-defined] + c = msvcrt.getwche() # type: ignore[attr-defined] + if c in (CR, LF): + _echo(CRLF) + return line + if c == "\003": + raise KeyboardInterrupt + if c == "\b": + line = line[:-1] + cover = SP * len(prompt + line + SP) + _echo("".join([CR, cover, CR, prompt, line])) + else: + line += c + time.sleep(interval) + + _echo(CRLF) + raise TimeoutError + + +def _jupyter_timed_input(prompt: str, timeout: float) -> str: + clear = True + try: + from IPython.core.display import clear_output # type: ignore + except ImportError: + clear = False + wandb.termwarn( + "Unable to clear output, can't import clear_output from ipython.core" + ) + + _echo(prompt) + + user_inp = None + event = threading.Event() + + def get_input() -> None: + nonlocal user_inp + raw = input() + if event.is_set(): + return + user_inp = raw + + t = threading.Thread(target=get_input) + t.start() + t.join(timeout) + event.set() + if user_inp: + return user_inp + if clear: + clear_output() + raise TimeoutError + + +def timed_input( + prompt: str, timeout: float, show_timeout: bool = True, jupyter: bool = False +) -> str: + """Behaves like builtin `input()` but adds timeout. + + Args: + prompt (str): Prompt to output to stdout. + timeout (float): Timeout to wait for input. + show_timeout (bool): Show timeout in prompt + jupyter (bool): If True, use jupyter specific code. + + Raises: + TimeoutError: exception raised if timeout occurred. + """ + if show_timeout: + prompt = f"{prompt}({timeout:.0f} second timeout) " + if jupyter: + return _jupyter_timed_input(prompt=prompt, timeout=timeout) + + return _timed_input(prompt=prompt, timeout=timeout) + + +try: + import msvcrt +except ImportError: + import selectors + import termios + + _timed_input = _posix_timed_input +else: + import time + + _timed_input = _windows_timed_input diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/timer.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..0ff2be95dc9dcfd5a307558ad936d1b22242f882 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/timer.py @@ -0,0 +1,19 @@ +import time +from typing import Any + + +class Timer: + def __init__(self) -> None: + self.start_time: float = time.time() + self.start: float = time.perf_counter() + self.stop: float = self.start + + def __enter__(self) -> "Timer": + return self + + def __exit__(self, *args: Any) -> None: + self.stop = time.perf_counter() + + @property + def elapsed(self) -> float: + return self.stop - self.start diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/tracelog.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/tracelog.py new file mode 100644 index 0000000000000000000000000000000000000000..b32656e35184d01bff213ec857d5be4d8473b6e7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/tracelog.py @@ -0,0 +1,255 @@ +"""tracelog. + +Functions: + log_message_queue - message put() to queue + log_message_dequeue - message get() from queue + log_message_send - message sent to socket + log_message_recv - message received from socket + log_message_process - message processed by thread + log_message_link - message linked to another message + log_message_assert - message encountered problem + +""" + +import datetime +import logging +import secrets +import sys +import threading +from typing import TYPE_CHECKING, Optional, cast + +if TYPE_CHECKING: + import multiprocessing + import queue + import socket + from typing import Union + + from wandb.proto import wandb_internal_pb2 as pb + from wandb.proto import wandb_server_pb2 as spb + + MessageQueueType = Union[pb.Record, pb.Result] + MessageType = Union[pb.Record, pb.Result, spb.ServerRequest, spb.ServerResponse] + QueueType = Union[multiprocessing.Queue, queue.Queue] + TransportType = Union[socket.socket, str] + + +# Supported modes: +# logger - tracelog output goes to python logging (default) +# stdout - tracelog output goes to stdout +# stderr - tracelog output goes to stderr +tracelog_mode: Optional[str] = "logger" + +logger = logging.getLogger(__name__) + + +ANNOTATE_QUEUE_NAME = "_DEBUGLOG_QUEUE_NAME" + +# capture stdout and stderr before anyone messes with them +stdout_write = sys.__stdout__.write # type: ignore +stderr_write = sys.__stderr__.write # type: ignore + + +def _log( + msg_type: str, + log_type: str, + is_response: bool = False, + record: Optional["pb.Record"] = None, + result: Optional["pb.Result"] = None, + resource: Optional[str] = None, +) -> None: + prefix = "TRACELOG(1)" + tname = threading.current_thread().name + now = datetime.datetime.now() + ts = now.strftime("%H%M%S.%f") + arrow = "<-" if is_response else "->" + resource = resource or "unknown" + uuid = "" + data = record or result + record_id = "" + if data: + uuid = data.uuid or uuid + record_id = data._info._tracelog_id + uuid = uuid or "-" + record_id = record_id or "-" + relay = "" + if data and data.control and data.control.relay_id: + relay = data.control.relay_id + relay = relay or "-" + line = f"{prefix} {arrow} {ts} {record_id:16} {log_type:7} {resource:8} {tname:16} {msg_type:32} {uuid:32} {relay:32}" + if tracelog_mode == "stdout": + stdout_write(f"{line}\n") + elif tracelog_mode == "stderr": + stderr_write(f"{line}\n") + elif tracelog_mode == "logger": + logger.info(line) + + +def _record_msg_type(record: "pb.Record") -> str: + msg_type = str(record.WhichOneof("record_type")) + if msg_type == "request": + request = record.request + msg_type = str(request.WhichOneof("request_type")) + return msg_type + + +def _result_msg_type(result: "pb.Result") -> str: + msg_type = str(result.WhichOneof("result_type")) + if msg_type == "response": + response = result.response + msg_type = str(response.WhichOneof("response_type")) + return msg_type + + +def _log_message( + msg: "MessageType", log_type: str, resource: Optional[str] = None +) -> None: + record: Optional[pb.Record] = None + result: Optional[pb.Result] = None + is_response = False + msg_type: str + # Note: using strings to avoid an import + message_type_str = type(msg).__name__ + if message_type_str == "Record": + record = cast("pb.Record", msg) + msg_type = _record_msg_type(record) + elif message_type_str == "Result": + is_response = True + result = cast("pb.Result", msg) + msg_type = _result_msg_type(result) + elif message_type_str == "ServerRequest": + server_request = cast("spb.ServerRequest", msg) + msg_type = str(server_request.WhichOneof("server_request_type")) + if msg_type == "record_publish": + record = server_request.record_publish + sub_msg_type = _record_msg_type(record) + msg_type = f"pub-{sub_msg_type}" + elif msg_type == "record_communicate": + record = server_request.record_communicate + sub_msg_type = _record_msg_type(record) + msg_type = f"comm-{sub_msg_type}" + # print("SRV", server_request) + elif message_type_str == "ServerResponse": + is_response = True + server_response = cast("spb.ServerResponse", msg) + msg_type = str(server_response.WhichOneof("server_response_type")) + if msg_type == "result_communicate": + result = server_response.result_communicate + sub_msg_type = _result_msg_type(result) + msg_type = f"comm-{sub_msg_type}" + else: + raise AssertionError(f"Unknown message type {message_type_str}") + _log( + msg_type, + is_response=is_response, + record=record, + result=result, + log_type=log_type, + resource=resource, + ) + + +def _log_message_queue(msg: "MessageQueueType", q: "QueueType") -> None: + _annotate_message(msg) + resource = getattr(q, ANNOTATE_QUEUE_NAME, None) + _log_message(msg, "queue", resource=resource) + + +def _log_message_dequeue(msg: "MessageQueueType", q: "QueueType") -> None: + resource = getattr(q, ANNOTATE_QUEUE_NAME, None) + _log_message(msg, "dequeue", resource=resource) + + +def _log_message_send(msg: "MessageType", t: "TransportType") -> None: + _log_message(msg, "send") + + +def _log_message_recv(msg: "MessageType", t: "TransportType") -> None: + _log_message(msg, "recv") + + +def _log_message_process(msg: "MessageType") -> None: + _log_message(msg, "process") + + +def _log_message_link(src: "MessageType", dest: "MessageType") -> None: + _log_message(src, "source") + _log_message(dest, "dest") + + +def _log_message_assert(msg: "MessageType") -> None: + _log_message(msg, "assert") + + +def _annotate_queue(q: "QueueType", name: str) -> None: + setattr(q, ANNOTATE_QUEUE_NAME, name) + + +def _annotate_message(msg: "MessageQueueType") -> None: + record_id = secrets.token_hex(8) + msg._info._tracelog_id = record_id + + +# +# Default functions when logging is disabled +# + + +def log_message_queue(msg: "MessageQueueType", q: "QueueType") -> None: + return None + + +def log_message_dequeue(msg: "MessageQueueType", q: "QueueType") -> None: + return None + + +def log_message_send(msg: "MessageType", t: "TransportType") -> None: + return None + + +def log_message_recv(msg: "MessageType", t: "TransportType") -> None: + return None + + +def log_message_process(msg: "MessageType") -> None: + return None + + +def log_message_link(src: "MessageType", dest: "MessageType") -> None: + return None + + +def log_message_assert(msg: "MessageType") -> None: + return None + + +def annotate_queue(q: "QueueType", name: str) -> None: + return None + + +def annotate_message(msg: "MessageQueueType") -> None: + return None + + +def enable(log_mode: Optional[str] = None) -> None: + global tracelog_mode + if log_mode: + tracelog_mode = log_mode + + global log_message_queue + global log_message_dequeue + global log_message_send + global log_message_recv + global log_message_process + global log_message_link + global log_message_assert + global annotate_queue + global annotate_message + log_message_queue = _log_message_queue + log_message_dequeue = _log_message_dequeue + log_message_send = _log_message_send + log_message_recv = _log_message_recv + log_message_process = _log_message_process + log_message_link = _log_message_link + log_message_assert = _log_message_assert + annotate_queue = _annotate_queue + annotate_message = _annotate_message diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/lib/viz.py b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/viz.py new file mode 100644 index 0000000000000000000000000000000000000000..866666beac919d20fa17d0fd85ba48be56217618 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/lib/viz.py @@ -0,0 +1,123 @@ +from typing import Any, Dict, Optional, Tuple + +from wandb.data_types import Table +from wandb.errors import Error + + +class Visualize: + def __init__(self, id: str, data: Table) -> None: + self._id = id + self._data = data + + def get_config_value(self, key: str) -> Dict[str, Any]: + return { + "id": self._id, + "historyFieldSettings": {"x-axis": "_step", "key": key}, + } + + @staticmethod + def get_config_key(key: str) -> Tuple[str, str, str]: + return "_wandb", "viz", key + + @property + def value(self) -> Table: + return self._data + + +class CustomChart: + def __init__( + self, + id: str, + data: Table, + fields: Dict[str, Any], + string_fields: Dict[str, Any], + split_table: Optional[bool] = False, + ) -> None: + self._id = id + self._data = data + self._fields = fields + self._string_fields = string_fields + self._split_table = split_table + + def get_config_value( + self, + panel_type: str, + query: Dict[str, Any], + ) -> Dict[str, Any]: + return { + "panel_type": panel_type, + "panel_config": { + "panelDefId": self._id, + "fieldSettings": self._fields, + "stringSettings": self._string_fields, + "transform": {"name": "tableWithLeafColNames"}, + "userQuery": query, + }, + } + + @staticmethod + def get_config_key(key: str) -> Tuple[str, str, str]: + return "_wandb", "visualize", key + + @staticmethod + def user_query(table_key: str) -> Dict[str, Any]: + return { + "queryFields": [ + { + "name": "runSets", + "args": [{"name": "runSets", "value": "${runSets}"}], + "fields": [ + {"name": "id", "fields": []}, + {"name": "name", "fields": []}, + {"name": "_defaultColorIndex", "fields": []}, + { + "name": "summaryTable", + "args": [{"name": "tableKey", "value": table_key}], + "fields": [], + }, + ], + } + ], + } + + @property + def table(self) -> Table: + return self._data + + @property + def fields(self) -> Dict[str, Any]: + return self._fields + + @property + def string_fields(self) -> Dict[str, Any]: + return self._string_fields + + +def custom_chart( + vega_spec_name: str, + data_table: Table, + fields: Dict[str, Any], + string_fields: Optional[Dict[str, Any]] = None, + split_table: Optional[bool] = False, +) -> CustomChart: + if string_fields is None: + string_fields = {} + if not isinstance(data_table, Table): + raise Error( + f"Expected `data_table` to be `wandb.Table` type, instead got {type(data_table).__name__}" + ) + return CustomChart( + id=vega_spec_name, + data=data_table, + fields=fields, + string_fields=string_fields, + split_table=split_table, + ) + + +def visualize(id: str, value: Table) -> Visualize: + if not isinstance(value, Table): + raise Error( + f"Expected `value` to be `wandb.Table` type, instead got {type(value).__name__}" + ) + return Visualize(id=id, data=value)