diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e8a1d190e9a77b772ae52e55d013b9da04ca056 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_dtypes.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acba4a773dcc6eebb39a4e3fec973e3df2480be7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_dtypes.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_private.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_private.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2528e67a07c26b8657438b492bbe2653a13c896f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_private.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/audio.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/audio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a42daa318f62b55e55b334c6fdcfb13a3bec1c13 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/audio.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/bokeh.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/bokeh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7edaea08d631299a5e242e11c0e8c7430d174bb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/bokeh.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/graph.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f6fba53cd39fb222b027c3312d1b5d320433031 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/graph.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/histogram.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/histogram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f21135cf5082877b2deb0b17bedb330eab519da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/histogram.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/html.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08728cfa1c812db5d15a4fc36526c6964423aa8c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/html.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/image.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24ebf6de4478da5f5e7bfbd2580fd4075913d757 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/image.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/molecule.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/molecule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48a44f6f8cc364fa3c23bd7e2e3b4dde769c05a5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/molecule.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/object_3d.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/object_3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bd26e18350b3d09969479263a554df634f7c9d0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/object_3d.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/plotly.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/plotly.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0834a096df01a4766307a08244f0f70a40ca384e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/plotly.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/saved_model.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/saved_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90b0260e759797cfc786a80a1e92c53cb97e6e41 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/saved_model.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/table.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b76e6265b43531dc4b085351088739219570331 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/table.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/trace_tree.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/trace_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0993877399e06c56c2dfabb36e83716ceb8d5486 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/trace_tree.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4a836be9d3aae9cf9d8d1addc71702c4977a390 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/video.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/video.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54b35c6c9c948ea105326c87f1a6636b1178f33b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/video.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c8a36d43686c62ac22b761f4573ba930eacf27e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/json_metadata.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/json_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b563b1f16334f10ef38d25852fb6831d742508b5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/json_metadata.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/media.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/media.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed411eb08e62efadfd29b156faadbe0131bfdd61 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/media.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/wb_value.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/wb_value.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36daaba91f6185184e55693f5e53195f6dcd7169 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/wb_value.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/json_metadata.py b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/json_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..b644584abcf1a640c35a1c98e7b7083999f3b446 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/json_metadata.py @@ -0,0 +1,55 @@ +import codecs +import os +from typing import TYPE_CHECKING, Type, Union + +from wandb import util +from wandb.sdk.lib import runid + +from .._private import MEDIA_TMP +from .media import Media + +if TYPE_CHECKING: # pragma: no cover + from wandb.sdk.artifacts.artifact import Artifact + + from ...wandb_run import Run as LocalRun + + +# Allows encoding of arbitrary JSON structures +# as a file +# +# This class should be used as an abstract class +# extended to have validation methods + + +class JSONMetadata(Media): + """JSONMetadata is a type for encoding arbitrary metadata as files.""" + + def __init__(self, val: dict) -> None: + super().__init__() + + self.validate(val) + self._val = val + + ext = "." + self.type_name() + ".json" + tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ext) + with codecs.open(tmp_path, "w", encoding="utf-8") as fp: + util.json_dump_uncompressed(self._val, fp) + self._set_file(tmp_path, is_tmp=True, extension=ext) + + @classmethod + def get_media_subdir(cls: Type["JSONMetadata"]) -> str: + return os.path.join("media", "metadata", cls.type_name()) + + def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict: + json_dict = super().to_json(run_or_artifact) + json_dict["_type"] = self.type_name() + + return json_dict + + # These methods should be overridden in the child class + @classmethod + def type_name(cls) -> str: + return "metadata" + + def validate(self, val: dict) -> bool: + return True diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/media.py b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/media.py new file mode 100644 index 0000000000000000000000000000000000000000..89dbd13e843a9e05422805814f40fa2db8b75394 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/media.py @@ -0,0 +1,315 @@ +import hashlib +import os +import platform +import re +import shutil +from typing import TYPE_CHECKING, Optional, Sequence, Type, Union, cast + +import wandb +from wandb import util +from wandb._globals import _datatypes_callback +from wandb.sdk.lib import filesystem +from wandb.sdk.lib.paths import LogicalPath + +from .wb_value import WBValue + +if TYPE_CHECKING: # pragma: no cover + import numpy as np + + from wandb.sdk.artifacts.artifact import Artifact + + from ...wandb_run import Run as LocalRun + + +SYS_PLATFORM = platform.system() + + +def _wb_filename( + key: Union[str, int], step: Union[str, int], id: Union[str, int], extension: str +) -> str: + return f"{str(key)}_{str(step)}_{str(id)}{extension}" + + +class Media(WBValue): + """A WBValue stored as a file outside JSON that can be rendered in a media panel. + + If necessary, we move or copy the file into the Run's media directory so that it + gets uploaded. + """ + + _path: Optional[str] + _run: Optional["LocalRun"] + _caption: Optional[str] + _is_tmp: Optional[bool] + _extension: Optional[str] + _sha256: Optional[str] + _size: Optional[int] + + def __init__(self, caption: Optional[str] = None) -> None: + super().__init__() + self._path = None + # The run under which this object is bound, if any. + self._run = None + self._caption = caption + + def _set_file( + self, path: str, is_tmp: bool = False, extension: Optional[str] = None + ) -> None: + self._path = path + self._is_tmp = is_tmp + self._extension = extension + assert extension is None or path.endswith( + extension + ), f'Media file extension "{extension}" must occur at the end of path "{path}".' + + with open(self._path, "rb") as f: + self._sha256 = hashlib.sha256(f.read()).hexdigest() + self._size = os.path.getsize(self._path) + + @classmethod + def get_media_subdir(cls: Type["Media"]) -> str: + raise NotImplementedError + + @staticmethod + def captions( + media_items: Sequence["Media"], + ) -> Union[bool, Sequence[Optional[str]]]: + if media_items[0]._caption is not None: + return [m._caption for m in media_items] + else: + return False + + def is_bound(self) -> bool: + return self._run is not None + + def file_is_set(self) -> bool: + return self._path is not None and self._sha256 is not None + + def bind_to_run( + self, + run: "LocalRun", + key: Union[int, str], + step: Union[int, str], + id_: Optional[Union[int, str]] = None, + ignore_copy_err: Optional[bool] = None, + ) -> None: + """Bind this object to a particular Run. + + Calling this function is necessary so that we have somewhere specific to put the + file associated with this object, from which other Runs can refer to it. + """ + assert self.file_is_set(), "bind_to_run called before _set_file" + + if SYS_PLATFORM == "Windows" and not util.check_windows_valid_filename(key): + raise ValueError( + f"Media {key} is invalid. Please remove invalid filename characters" + ) + + # The following two assertions are guaranteed to pass + # by definition file_is_set, but are needed for + # mypy to understand that these are strings below. + assert isinstance(self._path, str) + assert isinstance(self._sha256, str) + + assert run is not None, 'Argument "run" must not be None.' + self._run = run + + if self._extension is None: + _, extension = os.path.splitext(os.path.basename(self._path)) + else: + extension = self._extension + + if id_ is None: + id_ = self._sha256[:20] + + file_path = _wb_filename(key, step, id_, extension) + media_path = os.path.join(self.get_media_subdir(), file_path) + new_path = os.path.join(self._run.dir, media_path) + filesystem.mkdir_exists_ok(os.path.dirname(new_path)) + + if self._is_tmp: + shutil.move(self._path, new_path) + self._path = new_path + self._is_tmp = False + _datatypes_callback(media_path) + else: + try: + shutil.copy(self._path, new_path) + except shutil.SameFileError as e: + if not ignore_copy_err: + raise e + self._path = new_path + _datatypes_callback(media_path) + + def to_json(self, run: Union["LocalRun", "Artifact"]) -> dict: + """Serialize the object into a JSON blob. + + Uses run or artifact to store additional data. If `run_or_artifact` is a + wandb.Run then `self.bind_to_run()` must have been previously been called. + + Args: + run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which + this object should be generating JSON for - this is useful to store + additional data if needed. + + Returns: + dict: JSON representation + """ + # NOTE: uses of Audio in this class are a temporary hack -- when Ref support moves up + # into Media itself we should get rid of them + from wandb import Image + from wandb.data_types import Audio + from wandb.sdk.wandb_run import Run + + json_obj = {} + + if isinstance(run, Run): + json_obj.update( + { + "_type": "file", # TODO(adrian): This isn't (yet) a real media type we support on the frontend. + "sha256": self._sha256, + "size": self._size, + } + ) + artifact_entry_url = self._get_artifact_entry_ref_url() + if artifact_entry_url is not None: + json_obj["artifact_path"] = artifact_entry_url + artifact_entry_latest_url = self._get_artifact_entry_latest_ref_url() + if artifact_entry_latest_url is not None: + json_obj["_latest_artifact_path"] = artifact_entry_latest_url + + if artifact_entry_url is None or self.is_bound(): + assert self.is_bound(), "Value of type {} must be bound to a run with bind_to_run() before being serialized to JSON.".format( + type(self).__name__ + ) + + assert ( + self._run is run + ), "We don't support referring to media files across runs." + + # The following two assertions are guaranteed to pass + # by definition is_bound, but are needed for + # mypy to understand that these are strings below. + assert isinstance(self._path, str) + json_obj["path"] = LogicalPath( + os.path.relpath(self._path, self._run.dir) + ) + + elif isinstance(run, wandb.Artifact): + if self.file_is_set(): + # The following two assertions are guaranteed to pass + # by definition of the call above, but are needed for + # mypy to understand that these are strings below. + assert isinstance(self._path, str) + assert isinstance(self._sha256, str) + artifact = run # Checks if the concrete image has already been added to this artifact + name = artifact.get_added_local_path_name(self._path) + if name is None: + if self._is_tmp: + name = os.path.join( + self.get_media_subdir(), os.path.basename(self._path) + ) + else: + # If the files is not temporary, include the first 8 characters of the file's SHA256 to + # avoid name collisions. This way, if there are two images `dir1/img.png` and `dir2/img.png` + # we end up with a unique path for each. + name = os.path.join( + self.get_media_subdir(), + self._sha256[:20], + os.path.basename(self._path), + ) + + # if not, check to see if there is a source artifact for this object + if ( + self._artifact_source is not None + # and self._artifact_source.artifact != artifact + ): + default_root = self._artifact_source.artifact._default_root() + # if there is, get the name of the entry (this might make sense to move to a helper off artifact) + if self._path.startswith(default_root): + name = self._path[len(default_root) :] + name = name.lstrip(os.sep) + + # Add this image as a reference + path = self._artifact_source.artifact.get_entry(name) + artifact.add_reference(path.ref_url(), name=name) + elif ( + isinstance(self, Audio) or isinstance(self, Image) + ) and self.path_is_reference(self._path): + artifact.add_reference(self._path, name=name) + else: + entry = artifact.add_file( + self._path, name=name, is_tmp=self._is_tmp + ) + name = entry.path + + json_obj["path"] = name + json_obj["sha256"] = self._sha256 + json_obj["_type"] = self._log_type + return json_obj + + @classmethod + def from_json( + cls: Type["Media"], json_obj: dict, source_artifact: "Artifact" + ) -> "Media": + """Likely will need to override for any more complicated media objects.""" + return cls(source_artifact.get_entry(json_obj["path"]).download()) + + def __eq__(self, other: object) -> bool: + """Likely will need to override for any more complicated media objects.""" + return ( + isinstance(other, self.__class__) + and hasattr(self, "_sha256") + and hasattr(other, "_sha256") + and self._sha256 == other._sha256 + ) + + @staticmethod + def path_is_reference(path: Optional[str]) -> bool: + return bool(path and re.match(r"^(gs|s3|https?)://", path)) + + +class BatchableMedia(Media): + """Media that is treated in batches. + + E.g. images and thumbnails. Apart from images, we just use these batches to help + organize files by name in the media directory. + """ + + def __init__(self) -> None: + super().__init__() + + @classmethod + def seq_to_json( + cls: Type["BatchableMedia"], + seq: Sequence["BatchableMedia"], + run: "LocalRun", + key: str, + step: Union[int, str], + ) -> dict: + raise NotImplementedError + + +def _numpy_arrays_to_lists( + payload: Union[dict, Sequence, "np.ndarray"], +) -> Union[Sequence, dict, str, int, float, bool]: + # Casts all numpy arrays to lists so we don't convert them to histograms, primarily for Plotly + + if isinstance(payload, dict): + res = {} + for key, val in payload.items(): + res[key] = _numpy_arrays_to_lists(val) + return res + elif isinstance(payload, Sequence) and not isinstance(payload, str): + return [_numpy_arrays_to_lists(v) for v in payload] + elif util.is_numpy_array(payload): + if TYPE_CHECKING: + payload = cast("np.ndarray", payload) + return [ + _numpy_arrays_to_lists(v) + for v in (payload.tolist() if payload.ndim > 0 else [payload.tolist()]) + ] + # Protects against logging non serializable objects + elif isinstance(payload, Media): + return str(payload.__class__.__name__) + return payload # type: ignore diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/wb_value.py b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/wb_value.py new file mode 100644 index 0000000000000000000000000000000000000000..48f4658176cbc3b3ac3888d18ad30a56066ba5ef --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/wb_value.py @@ -0,0 +1,274 @@ +from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Type, Union + +from wandb import util + +if TYPE_CHECKING: # pragma: no cover + from wandb.sdk.artifacts.artifact import Artifact + + from ...wandb_run import Run as LocalRun + + TypeMappingType = Dict[str, Type["WBValue"]] + + +def _server_accepts_client_ids() -> bool: + from wandb.util import parse_version + + # First, if we are offline, assume the backend server cannot + # accept client IDs. Unfortunately, this is the best we can do + # until we are sure that all local versions are > "0.11.0" max_cli_version. + # The practical implication is that tables logged in offline mode + # will not show up in the workspace (but will still show up in artifacts). This + # means we never lose data, and we can still view using weave. If we decided + # to use client ids in offline mode, then the manifests and artifact data + # would never be resolvable and would lead to failed uploads. Our position + # is to never lose data - and instead take the tradeoff in the UI. + if util._is_offline(): + return False + + # If the script is online, request the max_cli_version and ensure the server + # is of a high enough version. + max_cli_version = util._get_max_cli_version() + if max_cli_version is None: + return False + accepts_client_ids: bool = parse_version("0.11.0") <= parse_version(max_cli_version) + return accepts_client_ids + + +class _WBValueArtifactSource: + artifact: "Artifact" + name: Optional[str] + + def __init__(self, artifact: "Artifact", name: Optional[str] = None) -> None: + self.artifact = artifact + self.name = name + + +class _WBValueArtifactTarget: + artifact: "Artifact" + name: Optional[str] + + def __init__(self, artifact: "Artifact", name: Optional[str] = None) -> None: + self.artifact = artifact + self.name = name + + +class WBValue: + """Typed objects that can be logged with `wandb.log()` and visualized by wandb. + + The objects will be serialized as JSON and always have a _type attribute that + indicates how to interpret the other fields. + """ + + # Class Attributes + _type_mapping: ClassVar[Optional["TypeMappingType"]] = None + # override _log_type to indicate the type which the subclass deserializes + _log_type: ClassVar[Optional[str]] = None + + # Instance Attributes + _artifact_source: Optional[_WBValueArtifactSource] + _artifact_target: Optional[_WBValueArtifactTarget] + + def __init__(self) -> None: + self._artifact_source = None + self._artifact_target = None + + def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict: + """Serialize the object into a JSON blob. + + Uses current run or artifact to store additional data. + + Args: + run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which + this object should be generating JSON for - this is useful to to store + additional data if needed. + + Returns: + dict: JSON representation + """ + raise NotImplementedError + + @classmethod + def from_json( + cls: Type["WBValue"], json_obj: dict, source_artifact: "Artifact" + ) -> "WBValue": + """Deserialize a `json_obj` into it's class representation. + + If additional resources were stored in the `run_or_artifact` artifact during the + `to_json` call, then those resources should be in the `source_artifact`. + + Args: + json_obj (dict): A JSON dictionary to deserialize source_artifact + (wandb.Artifact): An artifact which will hold any additional + resources which were stored during the `to_json` function. + """ + raise NotImplementedError + + @classmethod + def with_suffix(cls: Type["WBValue"], name: str, filetype: str = "json") -> str: + """Get the name with the appropriate suffix. + + Args: + name (str): the name of the file + filetype (str, optional): the filetype to use. Defaults to "json". + + Returns: + str: a filename which is suffixed with it's `_log_type` followed by the + filetype. + """ + if cls._log_type is not None: + suffix = cls._log_type + "." + filetype + else: + suffix = filetype + if not name.endswith(suffix): + return name + "." + suffix + return name + + @staticmethod + def init_from_json( + json_obj: dict, source_artifact: "Artifact" + ) -> Optional["WBValue"]: + """Initialize a `WBValue` from a JSON blob based on the class that creatd it. + + Looks through all subclasses and tries to match the json obj with the class + which created it. It will then call that subclass' `from_json` method. + Importantly, this function will set the return object's `source_artifact` + attribute to the passed in source artifact. This is critical for artifact + bookkeeping. If you choose to create a wandb.Value via it's `from_json` method, + make sure to properly set this `artifact_source` to avoid data duplication. + + Args: + json_obj (dict): A JSON dictionary to deserialize. It must contain a `_type` + key. This is used to lookup the correct subclass to use. + source_artifact (wandb.Artifact): An artifact which will hold any additional + resources which were stored during the `to_json` function. + + Returns: + wandb.Value: a newly created instance of a subclass of wandb.Value + """ + class_option = WBValue.type_mapping().get(json_obj["_type"]) + if class_option is not None: + obj = class_option.from_json(json_obj, source_artifact) + obj._set_artifact_source(source_artifact) + return obj + + return None + + @staticmethod + def type_mapping() -> "TypeMappingType": + """Return a map from `_log_type` to subclass. Used to lookup correct types for deserialization. + + Returns: + dict: dictionary of str:class + """ + if WBValue._type_mapping is None: + WBValue._type_mapping = {} + frontier = [WBValue] + explored = set() + while len(frontier) > 0: + class_option = frontier.pop() + explored.add(class_option) + if class_option._log_type is not None: + WBValue._type_mapping[class_option._log_type] = class_option + for subclass in class_option.__subclasses__(): + if subclass not in explored: + frontier.append(subclass) + return WBValue._type_mapping + + def __eq__(self, other: object) -> bool: + return id(self) == id(other) + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + def to_data_array(self) -> List[Any]: + """Convert the object to a list of primitives representing the underlying data.""" + raise NotImplementedError + + def _set_artifact_source( + self, artifact: "Artifact", name: Optional[str] = None + ) -> None: + assert ( + self._artifact_source is None + ), "Cannot update artifact_source. Existing source: {}/{}".format( + self._artifact_source.artifact, self._artifact_source.name + ) + self._artifact_source = _WBValueArtifactSource(artifact, name) + + def _set_artifact_target( + self, artifact: "Artifact", name: Optional[str] = None + ) -> None: + assert ( + self._artifact_target is None + ), "Cannot update artifact_target. Existing target: {}/{}".format( + self._artifact_target.artifact, self._artifact_target.name + ) + self._artifact_target = _WBValueArtifactTarget(artifact, name) + + def _get_artifact_entry_ref_url(self) -> Optional[str]: + # If the object is coming from another artifact + if self._artifact_source and self._artifact_source.name: + ref_entry = self._artifact_source.artifact.get_entry( + type(self).with_suffix(self._artifact_source.name) + ) + return str(ref_entry.ref_url()) + # Else, if the object is destined for another artifact and we support client IDs + elif ( + self._artifact_target + and self._artifact_target.name + and self._artifact_target.artifact._client_id is not None + and self._artifact_target.artifact._final + and _server_accepts_client_ids() + ): + return "wandb-client-artifact://{}/{}".format( + self._artifact_target.artifact._client_id, + type(self).with_suffix(self._artifact_target.name), + ) + # Else if we do not support client IDs, but online, then block on upload + # Note: this is old behavior just to stay backwards compatible + # with older server versions. This code path should be removed + # once those versions are no longer supported. This path uses a .wait + # which blocks the user process on artifact upload. + elif ( + self._artifact_target + and self._artifact_target.name + and self._artifact_target.artifact._is_draft_save_started() + and not util._is_offline() + and not _server_accepts_client_ids() + ): + self._artifact_target.artifact.wait() + ref_entry = self._artifact_target.artifact.get_entry( + type(self).with_suffix(self._artifact_target.name) + ) + return str(ref_entry.ref_url()) + return None + + def _get_artifact_entry_latest_ref_url(self) -> Optional[str]: + if ( + self._artifact_target + and self._artifact_target.name + and self._artifact_target.artifact._client_id is not None + and self._artifact_target.artifact._final + and _server_accepts_client_ids() + ): + return "wandb-client-artifact://{}:latest/{}".format( + self._artifact_target.artifact._sequence_client_id, + type(self).with_suffix(self._artifact_target.name), + ) + # Else if we do not support client IDs, then block on upload + # Note: this is old behavior just to stay backwards compatible + # with older server versions. This code path should be removed + # once those versions are no longer supported. This path uses a .wait + # which blocks the user process on artifact upload. + elif ( + self._artifact_target + and self._artifact_target.name + and self._artifact_target.artifact._is_draft_save_started() + and not util._is_offline() + and not _server_accepts_client_ids() + ): + self._artifact_target.artifact.wait() + ref_entry = self._artifact_target.artifact.get_entry( + type(self).with_suffix(self._artifact_target.name) + ) + return str(ref_entry.ref_url()) + return None diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78d8180378595d5e43d3df95b45b7fa37a7260f1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/classes.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/classes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26641c7a294bba9f6672ca48231eff24db5b57dd Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/classes.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/image_mask.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/image_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9adb2eafae2d4ca2d94cb1b6bd5cc72902aaec73 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/image_mask.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/classes.py b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/classes.py new file mode 100644 index 0000000000000000000000000000000000000000..54a22054aa44ad58de5b46a72b1123d73bc8836d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/classes.py @@ -0,0 +1,159 @@ +import os +from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Type, Union + +from .. import _dtypes +from ..base_types.media import Media + +if TYPE_CHECKING: # pragma: no cover + from wandb.sdk.artifacts.artifact import Artifact + + from ...wandb_run import Run as LocalRun + + +class Classes(Media): + _log_type = "classes" + + _class_set: Sequence[dict] + + def __init__(self, class_set: Sequence[dict]) -> None: + """Classes is holds class metadata intended to be used in concert with other objects when visualizing artifacts. + + Args: + class_set (list): list of dicts in the form of {"id":int|str, "name":str} + """ + super().__init__() + for class_obj in class_set: + assert "id" in class_obj and "name" in class_obj + self._class_set = class_set + + @classmethod + def from_json( + cls: Type["Classes"], + json_obj: dict, + source_artifact: Optional["Artifact"], + ) -> "Classes": + return cls(json_obj.get("class_set")) # type: ignore + + def to_json(self, run_or_artifact: Optional[Union["LocalRun", "Artifact"]]) -> dict: + json_obj = {} + # This is a bit of a hack to allow _ClassesIdType to + # be able to operate fully without an artifact in play. + # In all other cases, artifact should be a true artifact. + if run_or_artifact is not None: + json_obj = super().to_json(run_or_artifact) + json_obj["_type"] = Classes._log_type + json_obj["class_set"] = self._class_set + return json_obj + + def get_type(self) -> "_ClassesIdType": + return _ClassesIdType(self) + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + def __eq__(self, other: object) -> bool: + if isinstance(other, Classes): + return self._class_set == other._class_set + else: + return False + + +class _ClassesIdType(_dtypes.Type): + name = "classesId" + legacy_names = ["wandb.Classes_id"] + types = [Classes] + + def __init__( + self, + classes_obj: Optional[Classes] = None, + valid_ids: Optional["_dtypes.UnionType"] = None, + ): + if valid_ids is None: + valid_ids = _dtypes.UnionType() + elif isinstance(valid_ids, list): + valid_ids = _dtypes.UnionType( + [_dtypes.ConstType(item) for item in valid_ids] + ) + elif isinstance(valid_ids, _dtypes.UnionType): + valid_ids = valid_ids + else: + raise TypeError("valid_ids must be None, list, or UnionType") + + if classes_obj is None: + classes_obj = Classes( + [ + {"id": _id.params["val"], "name": str(_id.params["val"])} + for _id in valid_ids.params["allowed_types"] + ] + ) + elif not isinstance(classes_obj, Classes): + raise TypeError("valid_ids must be None, or instance of Classes") + else: + valid_ids = _dtypes.UnionType( + [ + _dtypes.ConstType(class_obj["id"]) + for class_obj in classes_obj._class_set + ] + ) + + self.wb_classes_obj_ref = classes_obj + self.params.update({"valid_ids": valid_ids}) + + def assign(self, py_obj: Optional[Any] = None) -> "_dtypes.Type": + return self.assign_type(_dtypes.ConstType(py_obj)) + + def assign_type(self, wb_type: "_dtypes.Type") -> "_dtypes.Type": + valid_ids = self.params["valid_ids"].assign_type(wb_type) + if not isinstance(valid_ids, _dtypes.InvalidType): + return self + + return _dtypes.InvalidType() + + @classmethod + def from_obj(cls, py_obj: Optional[Any] = None) -> "_dtypes.Type": + return cls(py_obj) + + def to_json(self, artifact: Optional["Artifact"] = None) -> Dict[str, Any]: + cl_dict = super().to_json(artifact) + # TODO (tss): Refactor this block with the similar one in wandb.Image. + # This is a bit of a smell that the classes object does not follow + # the same file-pattern as other media types. + if artifact is not None: + class_name = os.path.join("media", "cls") + classes_entry = artifact.add(self.wb_classes_obj_ref, class_name) + cl_dict["params"]["classes_obj"] = { + "type": "classes-file", + "path": classes_entry.path, + "digest": classes_entry.digest, # is this needed really? + } + else: + cl_dict["params"]["classes_obj"] = self.wb_classes_obj_ref.to_json(artifact) + return cl_dict + + @classmethod + def from_json( + cls, + json_dict: Dict[str, Any], + artifact: Optional["Artifact"] = None, + ) -> "_dtypes.Type": + classes_obj = None + if ( + json_dict.get("params", {}).get("classes_obj", {}).get("type") + == "classes-file" + ): + if artifact is not None: + classes_obj = artifact.get( + json_dict.get("params", {}).get("classes_obj", {}).get("path") + ) + assert classes_obj is None or isinstance(classes_obj, Classes) + else: + raise RuntimeError("Expected artifact to be non-null.") + else: + classes_obj = Classes.from_json( + json_dict["params"]["classes_obj"], artifact + ) + + return cls(classes_obj) + + +_dtypes.TypeRegistry.add(_ClassesIdType) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f23b0223651a68c546497365bcfca08da922374 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__init__.py @@ -0,0 +1,14 @@ +from ._launch import launch +from ._launch_add import launch_add +from .agent.agent import LaunchAgent +from .inputs.manage import manage_config_file, manage_wandb_config +from .utils import load_wandb_config + +__all__ = [ + "LaunchAgent", + "launch", + "launch_add", + "load_wandb_config", + "manage_config_file", + "manage_wandb_config", +] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch.py new file mode 100644 index 0000000000000000000000000000000000000000..ed184af624f53191c5191768eeaeb16a8364d69d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch.py @@ -0,0 +1,330 @@ +import asyncio +import logging +import os +import sys +from typing import Any, Dict, List, Optional, Tuple + +import yaml + +import wandb +from wandb.apis.internal import Api + +from . import loader +from ._project_spec import LaunchProject +from .agent import LaunchAgent +from .agent.agent import construct_agent_configs +from .environment.local_environment import LocalEnvironment +from .errors import ExecutionError, LaunchError +from .runner.abstract import AbstractRun +from .utils import ( + LAUNCH_CONFIG_FILE, + PROJECT_SYNCHRONOUS, + construct_launch_spec, + validate_launch_spec_source, +) + +_logger = logging.getLogger(__name__) + + +def set_launch_logfile(logfile: str) -> None: + """Set the logfile for the launch agent.""" + # Get logger of parent module + _launch_logger = logging.getLogger("wandb.sdk.launch") + if logfile == "-": + logfile_stream = sys.stdout + else: + try: + logfile_stream = open(logfile, "w") + # check if file is writable + except Exception as e: + wandb.termerror( + f"Could not open {logfile} for writing logs. Please check " + f"the path and permissions.\nError: {e}" + ) + return + + wandb.termlog( + f"Internal agent logs printing to {'stdout' if logfile == '-' else logfile}. " + ) + handler = logging.StreamHandler(logfile_stream) + handler.formatter = logging.Formatter( + "%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d " + "[%(filename)s:%(funcName)s():%(lineno)s] %(message)s" + ) + _launch_logger.addHandler(handler) + _launch_logger.log(logging.INFO, "Internal agent logs printing to %s", logfile) + + +def resolve_agent_config( # noqa: C901 + entity: Optional[str], + max_jobs: Optional[int], + queues: Optional[Tuple[str]], + config: Optional[str], + verbosity: Optional[int], +) -> Tuple[Dict[str, Any], Api]: + """Resolve the agent config. + + Arguments: + api (Api): The api. + entity (str): The entity. + max_jobs (int): The max number of jobs. + queues (Tuple[str]): The queues. + config (str): The config. + verbosity (int): How verbose to print, 0 or None = default, 1 = print status every 20 seconds, 2 = also print debugging information + + Returns: + Tuple[Dict[str, Any], Api]: The resolved config and api. + """ + defaults = { + "max_jobs": 1, + "max_schedulers": 1, + "queues": [], + "registry": {}, + "builder": {}, + "verbosity": 0, + } + resolved_config: Dict[str, Any] = defaults + config_path = config or os.path.expanduser(LAUNCH_CONFIG_FILE) + if os.path.isfile(config_path): + launch_config = {} + with open(config_path) as f: + try: + launch_config = yaml.safe_load(f) + # This is considered unreachable by mypy, but it's not. + if launch_config is None: + launch_config = {} # type: ignore + except yaml.YAMLError as e: + raise LaunchError(f"Invalid launch agent config: {e}") + resolved_config.update(launch_config.items()) + elif config is not None: + raise LaunchError( + f"Could not find use specified launch config file: {config_path}" + ) + if os.environ.get("WANDB_ENTITY") is not None: + resolved_config.update({"entity": os.environ.get("WANDB_ENTITY")}) + if os.environ.get("WANDB_LAUNCH_MAX_JOBS") is not None: + resolved_config.update( + {"max_jobs": int(os.environ.get("WANDB_LAUNCH_MAX_JOBS", 1))} + ) + + if entity is not None: + resolved_config.update({"entity": entity}) + if max_jobs is not None: + resolved_config.update({"max_jobs": int(max_jobs)}) + if queues: + resolved_config.update({"queues": list(queues)}) + if verbosity: + resolved_config.update({"verbosity": int(verbosity)}) + # queue -> queues + if resolved_config.get("queue"): + if isinstance(resolved_config.get("queue"), str): + resolved_config["queues"].append(resolved_config["queue"]) + else: + raise LaunchError( + f"Invalid launch agent config for key 'queue' with type: {type(resolved_config.get('queue'))}" + + " (expected str). Specify multiple queues with the 'queues' key" + ) + + keys = ["entity"] + settings = { + k: resolved_config.get(k) for k in keys if resolved_config.get(k) is not None + } + + api = Api(default_settings=settings) + + if resolved_config.get("entity") is None: + resolved_config.update({"entity": api.default_entity}) + + return resolved_config, api + + +def create_and_run_agent( + api: Api, + config: Dict[str, Any], +) -> None: + try: + from wandb.sdk.launch.agent import config as agent_config + except ModuleNotFoundError: + raise LaunchError( + "wandb launch-agent requires pydantic to be installed. " + "Please install with `pip install wandb[launch]`" + ) + try: + agent_config.AgentConfig(**config) + except agent_config.ValidationError as e: + errors = e.errors() + for error in errors: + loc = ".".join([str(x) for x in error.get("loc", [])]) + msg = f"Agent config error in field {loc}" + value = error.get("input") + if not isinstance(value, dict): + msg += f" (value: {value})" + msg += f": {error['msg']}" + wandb.termerror(msg) + raise LaunchError("Invalid launch agent config") + agent = LaunchAgent(api, config) + try: + asyncio.run(agent.loop()) + except asyncio.CancelledError: + pass + + +async def _launch( + api: Api, + job: Optional[str] = None, + name: Optional[str] = None, + project: Optional[str] = None, + entity: Optional[str] = None, + docker_image: Optional[str] = None, + entry_point: Optional[List[str]] = None, + version: Optional[str] = None, + resource: Optional[str] = None, + resource_args: Optional[Dict[str, Any]] = None, + launch_config: Optional[Dict[str, Any]] = None, + synchronous: Optional[bool] = None, + run_id: Optional[str] = None, + repository: Optional[str] = None, +) -> AbstractRun: + """Helper that delegates to the project-running method corresponding to the passed-in backend.""" + if launch_config is None: + launch_config = {} + if resource is None: + resource = "local-container" + launch_spec = construct_launch_spec( + None, + job, + api, + name, + project, + entity, + docker_image, + resource, + entry_point, + version, + resource_args, + launch_config, + run_id, + repository, + author=None, + ) + validate_launch_spec_source(launch_spec) + launch_project = LaunchProject.from_spec(launch_spec, api) + launch_project.fetch_and_validate_project() + entrypoint = launch_project.get_job_entry_point() + image_uri = ( + launch_project.docker_image or launch_project.job_base_image + ) # Either set by user or None. + + # construct runner config. + runner_config: Dict[str, Any] = {} + runner_config[PROJECT_SYNCHRONOUS] = synchronous + + config = launch_config or {} + environment_config, build_config, registry_config = construct_agent_configs(config) + environment = loader.environment_from_config(environment_config) + if environment is not None and not isinstance(environment, LocalEnvironment): + await environment.verify() + registry = loader.registry_from_config(registry_config, environment) + builder = loader.builder_from_config(build_config, environment, registry) + if not (launch_project.docker_image or launch_project.job_base_image): + assert entrypoint + image_uri = await builder.build_image(launch_project, entrypoint, None) + backend = loader.runner_from_config( + resource, api, runner_config, environment, registry + ) + if backend: + assert image_uri + submitted_run = await backend.run(launch_project, image_uri) + # this check will always pass, run is only optional in the agent case where + # a run queue id is present on the backend config + assert submitted_run + return submitted_run + else: + raise ExecutionError( + f"Unavailable backend {resource}, available backends: {', '.join(loader.WANDB_RUNNERS)}" + ) + + +def launch( + api: Api, + job: Optional[str] = None, + entry_point: Optional[List[str]] = None, + version: Optional[str] = None, + name: Optional[str] = None, + resource: Optional[str] = None, + resource_args: Optional[Dict[str, Any]] = None, + project: Optional[str] = None, + entity: Optional[str] = None, + docker_image: Optional[str] = None, + config: Optional[Dict[str, Any]] = None, + synchronous: Optional[bool] = True, + run_id: Optional[str] = None, + repository: Optional[str] = None, +) -> AbstractRun: + """Launch a W&B launch experiment. + + Arguments: + job: string reference to a wandb.Job eg: wandb/test/my-job:latest + api: An instance of a wandb Api from wandb.apis.internal. + entry_point: Entry point to run within the project. Defaults to using the entry point used + in the original run for wandb URIs, or main.py for git repository URIs. + version: For Git-based projects, either a commit hash or a branch name. + name: Name run under which to launch the run. + resource: Execution backend for the run. + resource_args: Resource related arguments for launching runs onto a remote backend. + Will be stored on the constructed launch config under ``resource_args``. + project: Target project to send launched run to + entity: Target entity to send launched run to + config: A dictionary containing the configuration for the run. May also contain + resource specific arguments under the key "resource_args". + synchronous: Whether to block while waiting for a run to complete. Defaults to True. + Note that if ``synchronous`` is False and ``backend`` is "local-container", this + method will return, but the current process will block when exiting until + the local run completes. If the current process is interrupted, any + asynchronous runs launched via this method will be terminated. If + ``synchronous`` is True and the run fails, the current process will + error out as well. + run_id: ID for the run (To ultimately replace the :name: field) + repository: string name of repository path for remote registry + + Example: + ```python + from wandb.sdk.launch import launch + + job = "wandb/jobs/Hello World:latest" + params = {"epochs": 5} + # Run W&B project and create a reproducible docker environment + # on a local host + api = wandb.apis.internal.Api() + launch(api, job, parameters=params) + ``` + + + Returns: + an instance of`wandb.launch.SubmittedRun` exposing information (e.g. run ID) + about the launched run. + + Raises: + `wandb.exceptions.ExecutionError` If a run launched in blocking mode + is unsuccessful. + """ + submitted_run_obj = asyncio.run( + _launch( + job=job, + name=name, + project=project, + entity=entity, + docker_image=docker_image, + entry_point=entry_point, + version=version, + resource=resource, + resource_args=resource_args, + launch_config=config, + synchronous=synchronous, + api=api, + run_id=run_id, + repository=repository, + ) + ) + + return submitted_run_obj diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch_add.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch_add.py new file mode 100644 index 0000000000000000000000000000000000000000..3af61a670efce6af6033d72c2bc66a52853dfb20 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch_add.py @@ -0,0 +1,255 @@ +import asyncio +import pprint +from typing import Any, Dict, List, Optional, Union + +import wandb +import wandb.apis.public as public +from wandb.apis.internal import Api +from wandb.errors import CommError +from wandb.sdk.launch.builder.build import build_image_from_project +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.utils import ( + LAUNCH_DEFAULT_PROJECT, + LOG_PREFIX, + construct_launch_spec, + validate_launch_spec_source, +) + +from ._project_spec import LaunchProject + + +def push_to_queue( + api: Api, + queue_name: str, + launch_spec: Dict[str, Any], + template_variables: Optional[dict], + project_queue: str, + priority: Optional[int] = None, +) -> Any: + return api.push_to_run_queue( + queue_name, launch_spec, template_variables, project_queue, priority + ) + + +def launch_add( + uri: Optional[str] = None, + job: Optional[str] = None, + config: Optional[Dict[str, Any]] = None, + template_variables: Optional[Dict[str, Union[float, int, str]]] = None, + project: Optional[str] = None, + entity: Optional[str] = None, + queue_name: Optional[str] = None, + resource: Optional[str] = None, + entry_point: Optional[List[str]] = None, + name: Optional[str] = None, + version: Optional[str] = None, + docker_image: Optional[str] = None, + project_queue: Optional[str] = None, + resource_args: Optional[Dict[str, Any]] = None, + run_id: Optional[str] = None, + build: Optional[bool] = False, + repository: Optional[str] = None, + sweep_id: Optional[str] = None, + author: Optional[str] = None, + priority: Optional[int] = None, +) -> "public.QueuedRun": + """Enqueue a W&B launch experiment. With either a source uri, job or docker_image. + + Arguments: + uri: URI of experiment to run. A wandb run uri or a Git repository URI. + job: string reference to a wandb.Job eg: wandb/test/my-job:latest + config: A dictionary containing the configuration for the run. May also contain + resource specific arguments under the key "resource_args" + template_variables: A dictionary containing values of template variables for a run queue. + Expected format of {"VAR_NAME": VAR_VALUE} + project: Target project to send launched run to + entity: Target entity to send launched run to + queue: the name of the queue to enqueue the run to + priority: the priority level of the job, where 1 is the highest priority + resource: Execution backend for the run: W&B provides built-in support for "local-container" backend + entry_point: Entry point to run within the project. Defaults to using the entry point used + in the original run for wandb URIs, or main.py for git repository URIs. + name: Name run under which to launch the run. + version: For Git-based projects, either a commit hash or a branch name. + docker_image: The name of the docker image to use for the run. + resource_args: Resource related arguments for launching runs onto a remote backend. + Will be stored on the constructed launch config under ``resource_args``. + run_id: optional string indicating the id of the launched run + build: optional flag defaulting to false, requires queue to be set + if build, an image is created, creates a job artifact, pushes a reference + to that job artifact to queue + repository: optional string to control the name of the remote repository, used when + pushing images to a registry + project_queue: optional string to control the name of the project for the queue. Primarily used + for back compatibility with project scoped queues + + + Example: + ```python + from wandb.sdk.launch import launch_add + + project_uri = "https://github.com/wandb/examples" + params = {"alpha": 0.5, "l1_ratio": 0.01} + # Run W&B project and create a reproducible docker environment + # on a local host + api = wandb.apis.internal.Api() + launch_add(uri=project_uri, parameters=params) + ``` + + + Returns: + an instance of`wandb.api.public.QueuedRun` which gives information about the + queued run, or if `wait_until_started` or `wait_until_finished` are called, gives access + to the underlying Run information. + + Raises: + `wandb.exceptions.LaunchError` if unsuccessful + """ + api = Api() + + return _launch_add( + api, + job, + config, + template_variables, + project, + entity, + queue_name, + resource, + entry_point, + name, + version, + docker_image, + project_queue, + resource_args, + run_id=run_id, + build=build, + repository=repository, + sweep_id=sweep_id, + author=author, + priority=priority, + ) + + +def _launch_add( + api: Api, + job: Optional[str], + config: Optional[Dict[str, Any]], + template_variables: Optional[dict], + project: Optional[str], + entity: Optional[str], + queue_name: Optional[str], + resource: Optional[str], + entry_point: Optional[List[str]], + name: Optional[str], + version: Optional[str], + docker_image: Optional[str], + project_queue: Optional[str], + resource_args: Optional[Dict[str, Any]] = None, + run_id: Optional[str] = None, + build: Optional[bool] = False, + repository: Optional[str] = None, + sweep_id: Optional[str] = None, + author: Optional[str] = None, + priority: Optional[int] = None, +) -> "public.QueuedRun": + launch_spec = construct_launch_spec( + None, + job, + api, + name, + project, + entity, + docker_image, + resource, + entry_point, + version, + resource_args, + config, + run_id, + repository, + author, + sweep_id, + ) + + if build: + if resource == "local-process": + raise LaunchError( + "Cannot build a docker image for the resource: local-process" + ) + + if launch_spec.get("job") is not None: + wandb.termwarn("Build doesn't support setting a job. Overwriting job.") + launch_spec["job"] = None + + launch_project = LaunchProject.from_spec(launch_spec, api) + docker_image_uri = asyncio.run( + build_image_from_project(launch_project, api, config or {}) + ) + run = wandb.run or wandb.init( + project=launch_spec["project"], + entity=launch_spec["entity"], + job_type="launch_job", + ) + + job_artifact = run._log_job_artifact_with_image( # type: ignore + docker_image_uri, launch_project.override_args + ) + job_name = job_artifact.wait().name + + job = f"{launch_spec['entity']}/{launch_spec['project']}/{job_name}" + launch_spec["job"] = job + launch_spec["uri"] = None # Remove given URI --> now in job + + if queue_name is None: + queue_name = "default" + if project_queue is None: + project_queue = LAUNCH_DEFAULT_PROJECT + spec_template_vars = launch_spec.get("template_variables") + if isinstance(spec_template_vars, dict): + launch_spec.pop("template_variables") + if template_variables is None: + template_variables = spec_template_vars + else: + template_variables = { + **spec_template_vars, + **template_variables, + } + + validate_launch_spec_source(launch_spec) + res = push_to_queue( + api, queue_name, launch_spec, template_variables, project_queue, priority + ) + + if res is None or "runQueueItemId" not in res: + raise LaunchError("Error adding run to queue") + + updated_spec = res.get("runSpec") + if updated_spec: + if updated_spec.get("resource_args"): + launch_spec["resource_args"] = updated_spec.get("resource_args") + if updated_spec.get("resource"): + launch_spec["resource"] = updated_spec.get("resource") + + if project_queue == LAUNCH_DEFAULT_PROJECT: + wandb.termlog(f"{LOG_PREFIX}Added run to queue {queue_name}.") + else: + wandb.termlog(f"{LOG_PREFIX}Added run to queue {project_queue}/{queue_name}.") + wandb.termlog(f"{LOG_PREFIX}Launch spec:\n{pprint.pformat(launch_spec)}\n") + + public_api = public.Api() + if job is not None: + try: + public_api.artifact(job, type="job") + except (ValueError, CommError) as e: + raise LaunchError(f"Unable to fetch job with name {job}: {e}") + + queued_run = public_api.queued_run( + launch_spec["entity"], + launch_spec["project"], + queue_name, + res["runQueueItemId"], + project_queue, + priority, + ) + return queued_run # type: ignore diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_project_spec.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_project_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..399e33e7a543ceed1edcf7c00d0853b959d3d064 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/_project_spec.py @@ -0,0 +1,566 @@ +"""Convert launch arguments into a runnable wandb launch script. + +Arguments can come from a launch spec or call to wandb launch. +""" + +import enum +import json +import logging +import os +import shutil +import tempfile +from copy import deepcopy +from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast + +from six.moves import shlex_quote + +import wandb +from wandb.apis.internal import Api +from wandb.errors import CommError +from wandb.sdk.launch.utils import get_entrypoint_file +from wandb.sdk.lib.runid import generate_id + +from .errors import LaunchError +from .utils import LOG_PREFIX, recursive_macro_sub + +if TYPE_CHECKING: + from wandb.sdk.artifacts.artifact import Artifact + +_logger = logging.getLogger(__name__) + + +# need to make user root for sagemaker, so users have access to /opt/ml directories +# that let users create artifacts and access input data +RESOURCE_UID_MAP = {"local": 1000, "sagemaker": 0} +IMAGE_TAG_MAX_LENGTH = 32 + + +class LaunchSource(enum.IntEnum): + """Enumeration of possible sources for a launch project. + + Attributes: + DOCKER: Source is a Docker image. This can happen if a user runs + `wandb launch -d `. + JOB: Source is a job. This is standard case. + SCHEDULER: Source is a wandb sweep scheduler command. + """ + + DOCKER: int = 1 + JOB: int = 2 + SCHEDULER: int = 3 + + +class LaunchProject: + """A launch project specification. + + The LaunchProject is initialized from a raw launch spec an internal API + object. The project encapsulates logic for taking a launch spec and converting + it into the executable code. + + The LaunchProject needs to ultimately produce a full container spec for + execution in docker, k8s, sagemaker, or vertex. This container spec includes: + - container image uri + - environment variables for configuring wandb etc. + - entrypoint command and arguments + - additional arguments specific to the target resource (e.g. instance type, node selector) + + This class is stateful and certain methods can only be called after + `LaunchProject.fetch_and_validate_project()` has been called. + + Notes on the entrypoint: + - The entrypoint is the command that will be run inside the container. + - The LaunchProject stores two entrypoints + - The job entrypoint is the entrypoint specified in the job's config. + - The override entrypoint is the entrypoint specified in the launch spec. + - The override entrypoint takes precedence over the job entrypoint. + """ + + # This init is way to long, and there are too many attributes on this sucker. + def __init__( + self, + uri: Optional[str], + job: Optional[str], + api: Api, + launch_spec: Dict[str, Any], + target_entity: str, + target_project: str, + name: Optional[str], + docker_config: Dict[str, Any], + git_info: Dict[str, str], + overrides: Dict[str, Any], + resource: str, + resource_args: Dict[str, Any], + run_id: Optional[str], + sweep_id: Optional[str] = None, + ): + self.uri = uri + self.job = job + if job is not None: + wandb.termlog(f"{LOG_PREFIX}Launching job: {job}") + self._job_artifact: Optional[Artifact] = None + self.api = api + self.launch_spec = launch_spec + self.target_entity = target_entity + self.target_project = target_project.lower() + self.name = name # TODO: replace with run_id + # the builder key can be passed in through the resource args + # but these resource_args are then passed to the appropriate + # runner, so we need to pop the builder key out + resource_args_copy = deepcopy(resource_args) + resource_args_build = resource_args_copy.get(resource, {}).pop("builder", {}) + self.resource = resource + self.resource_args = resource_args_copy + self.sweep_id = sweep_id + self.author = launch_spec.get("author") + self.python_version: Optional[str] = launch_spec.get("python_version") + self._job_dockerfile: Optional[str] = None + self._job_build_context: Optional[str] = None + self._job_base_image: Optional[str] = None + self.accelerator_base_image: Optional[str] = resource_args_build.get( + "accelerator", {} + ).get("base_image") or resource_args_build.get("cuda", {}).get("base_image") + self.docker_image: Optional[str] = docker_config.get( + "docker_image" + ) or launch_spec.get("image_uri") + self.docker_user_id = docker_config.get("user_id", 1000) + self._entry_point: Optional[EntryPoint] = ( + None # todo: keep multiple entrypoint support? + ) + self.init_overrides(overrides) + self.init_source() + self.init_git(git_info) + self.deps_type: Optional[str] = None + self._runtime: Optional[str] = None + self.run_id = run_id or generate_id() + self._queue_name: Optional[str] = None + self._queue_entity: Optional[str] = None + self._run_queue_item_id: Optional[str] = None + + def init_source(self) -> None: + if self.docker_image is not None: + self.source = LaunchSource.DOCKER + self.project_dir = None + elif self.job is not None: + self.source = LaunchSource.JOB + self.project_dir = tempfile.mkdtemp() + elif self.uri and self.uri.startswith("placeholder"): + self.source = LaunchSource.SCHEDULER + self.project_dir = os.getcwd() + self._entry_point = self.override_entrypoint + + def change_project_dir(self, new_dir: str) -> None: + """Change the project directory to a new directory.""" + # Copy the contents of the old project dir to the new project dir. + old_dir = self.project_dir + if old_dir is not None: + shutil.copytree( + old_dir, + new_dir, + symlinks=True, + dirs_exist_ok=True, + ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc", ".git"), + ) + shutil.rmtree(old_dir) + self.project_dir = new_dir + + def init_git(self, git_info: Dict[str, str]) -> None: + self.git_version = git_info.get("version") + self.git_repo = git_info.get("repo") + + def init_overrides(self, overrides: Dict[str, Any]) -> None: + """Initialize override attributes for a launch project.""" + self.overrides = overrides + self.override_args: List[str] = overrides.get("args", []) + self.override_config: Dict[str, Any] = overrides.get("run_config", {}) + self.override_artifacts: Dict[str, Any] = overrides.get("artifacts", {}) + self.override_files: Dict[str, Any] = overrides.get("files", {}) + self.override_entrypoint: Optional[EntryPoint] = None + self.override_dockerfile: Optional[str] = overrides.get("dockerfile") + override_entrypoint = overrides.get("entry_point") + if override_entrypoint: + _logger.info("Adding override entry point") + self.override_entrypoint = EntryPoint( + name=get_entrypoint_file(override_entrypoint), + command=override_entrypoint, + ) + + def __repr__(self) -> str: + """String representation of LaunchProject.""" + if self.source == LaunchSource.JOB: + return f"{self.job}" + return f"{self.uri}" + + @classmethod + def from_spec(cls, launch_spec: Dict[str, Any], api: Api) -> "LaunchProject": + """Constructs a LaunchProject instance using a launch spec. + + Arguments: + launch_spec: Dictionary representation of launch spec + api: Instance of wandb.apis.internal Api + + Returns: + An initialized `LaunchProject` object + """ + name: Optional[str] = None + if launch_spec.get("name"): + name = launch_spec["name"] + return LaunchProject( + launch_spec.get("uri"), + launch_spec.get("job"), + api, + launch_spec, + launch_spec["entity"], + launch_spec["project"], + name, + launch_spec.get("docker", {}), + launch_spec.get("git", {}), + launch_spec.get("overrides", {}), + launch_spec.get("resource", None), + launch_spec.get("resource_args", {}), + launch_spec.get("run_id", None), + launch_spec.get("sweep_id", {}), + ) + + @property + def job_dockerfile(self) -> Optional[str]: + return self._job_dockerfile + + @property + def job_build_context(self) -> Optional[str]: + return self._job_build_context + + @property + def job_base_image(self) -> Optional[str]: + return self._job_base_image + + def set_job_dockerfile(self, dockerfile: str) -> None: + self._job_dockerfile = dockerfile + + def set_job_build_context(self, build_context: str) -> None: + self._job_build_context = build_context + + def set_job_base_image(self, base_image: str) -> None: + self._job_base_image = base_image + + @property + def image_name(self) -> str: + if self.job_base_image is not None: + return self.job_base_image + if self.docker_image is not None: + return self.docker_image + elif self.uri is not None: + cleaned_uri = self.uri.replace("https://", "/") + first_sep = cleaned_uri.find("/") + shortened_uri = cleaned_uri[first_sep:] + return wandb.util.make_docker_image_name_safe(shortened_uri) + else: + # this will always pass since one of these 3 is required + assert self.job is not None + return wandb.util.make_docker_image_name_safe(self.job.split(":")[0]) + + @property + def queue_name(self) -> Optional[str]: + return self._queue_name + + @queue_name.setter + def queue_name(self, value: str) -> None: + self._queue_name = value + + @property + def queue_entity(self) -> Optional[str]: + return self._queue_entity + + @queue_entity.setter + def queue_entity(self, value: str) -> None: + self._queue_entity = value + + @property + def run_queue_item_id(self) -> Optional[str]: + return self._run_queue_item_id + + @run_queue_item_id.setter + def run_queue_item_id(self, value: str) -> None: + self._run_queue_item_id = value + + def fill_macros(self, image: str) -> Dict[str, Any]: + """Substitute values for macros in resource arguments. + + Certain macros can be used in resource args. These macros allow the + user to set resource args dynamically in the context of the + run being launched. The macros are given in the ${macro} format. The + following macros are currently supported: + + ${project_name} - the name of the project the run is being launched to. + ${entity_name} - the owner of the project the run being launched to. + ${run_id} - the id of the run being launched. + ${run_name} - the name of the run that is launching. + ${image_uri} - the URI of the container image for this run. + + Additionally, you may use ${} to refer to the value of any + environment variables that you plan to set in the environment of any + agents that will receive these resource args. + + Calling this method will overwrite the contents of self.resource_args + with the substituted values. + + Args: + image (str): The image name to fill in for ${wandb-image}. + + Returns: + Dict[str, Any]: The resource args with all macros filled in. + """ + update_dict = { + "project_name": self.target_project, + "entity_name": self.target_entity, + "run_id": self.run_id, + "run_name": self.name, + "image_uri": image, + "author": self.author, + } + update_dict.update(os.environ) + result = recursive_macro_sub(self.resource_args, update_dict) + # recursive_macro_sub given a dict returns a dict with the same keys + # but with other input types behaves differently. The cast is for mypy. + return cast(Dict[str, Any], result) + + def build_required(self) -> bool: + """Checks the source to see if a build is required.""" + if self.job_base_image is not None: + return False + if self.source != LaunchSource.JOB: + return True + return False + + @property + def docker_image(self) -> Optional[str]: + """Returns the Docker image associated with this LaunchProject. + + This will only be set if an image_uri is being run outside a job. + + Returns: + Optional[str]: The Docker image or None if not specified. + """ + if self._docker_image: + return self._docker_image + return None + + @docker_image.setter + def docker_image(self, value: str) -> None: + """Sets the Docker image for the project. + + Args: + value (str): The Docker image to set. + + Returns: + None + """ + self._docker_image = value + self._ensure_not_docker_image_and_local_process() + + def get_job_entry_point(self) -> Optional["EntryPoint"]: + """Returns the job entrypoint for the project.""" + # assuming project only has 1 entry point, pull that out + # tmp fn until we figure out if we want to support multiple entry points or not + if not self._entry_point: + if not self.docker_image and not self.job_base_image: + raise LaunchError( + "Project must have at least one entry point unless docker image is specified." + ) + return None + return self._entry_point + + def set_job_entry_point(self, command: List[str]) -> "EntryPoint": + """Set job entrypoint for the project.""" + assert ( + self._entry_point is None + ), "Cannot set entry point twice. Use LaunchProject.override_entrypoint" + new_entrypoint = EntryPoint(name=command[-1], command=command) + self._entry_point = new_entrypoint + return new_entrypoint + + def fetch_and_validate_project(self) -> None: + """Fetches a project into a local directory, adds the config values to the directory, and validates the first entrypoint for the project. + + Arguments: + launch_project: LaunchProject to fetch and validate. + api: Instance of wandb.apis.internal Api + + Returns: + A validated `LaunchProject` object. + + """ + if self.source == LaunchSource.DOCKER: + return + elif self.source == LaunchSource.JOB: + self._fetch_job() + assert self.project_dir is not None + + # Let's make sure we document this very clearly. + def get_image_source_string(self) -> str: + """Returns a unique string identifying the source of an image.""" + if self.source == LaunchSource.JOB: + assert self._job_artifact is not None + return f"{self._job_artifact.name}:v{self._job_artifact.version}" + elif self.source == LaunchSource.DOCKER: + assert isinstance(self.docker_image, str) + return self.docker_image + else: + raise LaunchError( + "Unknown source type when determining image source string" + ) + + def _ensure_not_docker_image_and_local_process(self) -> None: + """Ensure that docker image is not specified with local-process resource runner. + + Raises: + LaunchError: If docker image is specified with local-process resource runner. + """ + if self.docker_image is not None and self.resource == "local-process": + raise LaunchError( + "Cannot specify docker image with local-process resource runner" + ) + + def _fetch_job(self) -> None: + """Fetches the job details from the public API and configures the launch project. + + Raises: + LaunchError: If there is an error accessing the job. + """ + public_api = wandb.apis.public.Api() + job_dir = tempfile.mkdtemp() + try: + job = public_api.job(self.job, path=job_dir) + except CommError as e: + msg = e.message + raise LaunchError( + f"Error accessing job {self.job}: {msg} on {public_api.settings.get('base_url')}" + ) + job.configure_launch_project(self) # Why is this a method of the job? + self._job_artifact = job._job_artifact + + def get_env_vars_dict(self, api: Api, max_env_length: int) -> Dict[str, str]: + """Generate environment variables for the project. + + Arguments: + launch_project: LaunchProject to generate environment variables for. + + Returns: + Dictionary of environment variables. + """ + env_vars = {} + env_vars["WANDB_BASE_URL"] = api.settings("base_url") + override_api_key = self.launch_spec.get("_wandb_api_key") + env_vars["WANDB_API_KEY"] = override_api_key or api.api_key + if self.target_project: + env_vars["WANDB_PROJECT"] = self.target_project + env_vars["WANDB_ENTITY"] = self.target_entity + env_vars["WANDB_LAUNCH"] = "True" + env_vars["WANDB_RUN_ID"] = self.run_id + if self.docker_image: + env_vars["WANDB_DOCKER"] = self.docker_image + if self.name is not None: + env_vars["WANDB_NAME"] = self.name + if "author" in self.launch_spec and not override_api_key: + env_vars["WANDB_USERNAME"] = self.launch_spec["author"] + if self.sweep_id: + env_vars["WANDB_SWEEP_ID"] = self.sweep_id + if self.launch_spec.get("_resume_count", 0) > 0: + env_vars["WANDB_RESUME"] = "allow" + if self.queue_name: + env_vars[wandb.env.LAUNCH_QUEUE_NAME] = self.queue_name + if self.queue_entity: + env_vars[wandb.env.LAUNCH_QUEUE_ENTITY] = self.queue_entity + if self.run_queue_item_id: + env_vars[wandb.env.LAUNCH_TRACE_ID] = self.run_queue_item_id + + _inject_wandb_config_env_vars(self.override_config, env_vars, max_env_length) + _inject_file_overrides_env_vars(self.override_files, env_vars, max_env_length) + + artifacts = {} + # if we're spinning up a launch process from a job + # we should tell the run to use that artifact + if self.job: + artifacts = {wandb.util.LAUNCH_JOB_ARTIFACT_SLOT_NAME: self.job} + env_vars["WANDB_ARTIFACTS"] = json.dumps( + {**artifacts, **self.override_artifacts} + ) + return env_vars + + def parse_existing_requirements(self) -> str: + import pkg_resources + + requirements_line = "" + assert self.project_dir is not None + base_requirements = os.path.join(self.project_dir, "requirements.txt") + if os.path.exists(base_requirements): + include_only = set() + with open(base_requirements) as f: + iter = pkg_resources.parse_requirements(f) + while True: + try: + pkg = next(iter) + if hasattr(pkg, "name"): + name = pkg.name.lower() + else: + name = str(pkg) + include_only.add(shlex_quote(name)) + except StopIteration: + break + # Different versions of pkg_resources throw different errors + # just catch them all and ignore packages we can't parse + except Exception as e: + _logger.warn(f"Unable to parse requirements.txt: {e}") + continue + requirements_line += "WANDB_ONLY_INCLUDE={} ".format(",".join(include_only)) + if "wandb" not in requirements_line: + wandb.termwarn(f"{LOG_PREFIX}wandb is not present in requirements.txt.") + return requirements_line + + +class EntryPoint: + """An entry point into a wandb launch specification.""" + + def __init__(self, name: Optional[str], command: List[str]): + self.name = name + self.command = command + + def update_entrypoint_path(self, new_path: str) -> None: + """Updates the entrypoint path to a new path.""" + if len(self.command) == 2 and ( + self.command[0].startswith("python") or self.command[0] == "bash" + ): + self.command[1] = new_path + + +def _inject_wandb_config_env_vars( + config: Dict[str, Any], env_dict: Dict[str, Any], maximum_env_length: int +) -> None: + str_config = json.dumps(config) + if len(str_config) <= maximum_env_length: + env_dict["WANDB_CONFIG"] = str_config + return + + chunks = [ + str_config[i : i + maximum_env_length] + for i in range(0, len(str_config), maximum_env_length) + ] + config_chunks_dict = {f"WANDB_CONFIG_{i}": chunk for i, chunk in enumerate(chunks)} + env_dict.update(config_chunks_dict) + + +def _inject_file_overrides_env_vars( + overrides: Dict[str, Any], env_dict: Dict[str, Any], maximum_env_length: int +) -> None: + str_overrides = json.dumps(overrides) + if len(str_overrides) <= maximum_env_length: + env_dict["WANDB_LAUNCH_FILE_OVERRIDES"] = str_overrides + return + + chunks = [ + str_overrides[i : i + maximum_env_length] + for i in range(0, len(str_overrides), maximum_env_length) + ] + overrides_chunks_dict = { + f"WANDB_LAUNCH_FILE_OVERRIDES_{i}": chunk for i, chunk in enumerate(chunks) + } + env_dict.update(overrides_chunks_dict) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..838a7886c290fced892b06b8fee5137cbcb278af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__init__.py @@ -0,0 +1,5 @@ +from .agent import LaunchAgent + +LaunchAgent = LaunchAgent + +__all__ = ["LaunchAgent"] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0696979c98f8a06c8930f2c989404530d19c09bb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/agent.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a2f90d3db57d3c040690f3c59072e10da19b6d6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/agent.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05ef49e2819f53c7342a423cbdaa2aa2bfcb5e77 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/job_status_tracker.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/job_status_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f5f967f75b9f7240f4966530a5deff523ce9039 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/job_status_tracker.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/run_queue_item_file_saver.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/run_queue_item_file_saver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7eed038c9030fec154abc492705641e0bef574e6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/run_queue_item_file_saver.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/agent.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b036cbdf7c2b33c376c10ae4b8e538722de6c9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/agent.py @@ -0,0 +1,924 @@ +"""Implementation of launch agent.""" + +import asyncio +import logging +import os +import pprint +import threading +import time +import traceback +from dataclasses import dataclass +from multiprocessing import Event +from typing import Any, Dict, List, Optional, Tuple, Union + +import yaml + +import wandb +from wandb.apis.internal import Api +from wandb.errors import CommError +from wandb.sdk.launch._launch_add import launch_add +from wandb.sdk.launch.runner.local_container import LocalSubmittedRun +from wandb.sdk.launch.runner.local_process import LocalProcessRunner +from wandb.sdk.launch.sweeps.scheduler import Scheduler +from wandb.sdk.launch.utils import LAUNCH_CONFIG_FILE, resolve_build_and_registry_config +from wandb.sdk.lib import runid + +from .. import loader +from .._project_spec import LaunchProject +from ..errors import LaunchDockerError, LaunchError +from ..utils import ( + LAUNCH_DEFAULT_PROJECT, + LOG_PREFIX, + PROJECT_SYNCHRONOUS, + event_loop_thread_exec, +) +from .job_status_tracker import JobAndRunStatusTracker +from .run_queue_item_file_saver import RunQueueItemFileSaver + +AGENT_POLLING_INTERVAL = 10 +RECEIVED_JOB_POLLING_INTERVAL = 0.0 # more frequent when we know we have jobs + +AGENT_POLLING = "POLLING" +AGENT_RUNNING = "RUNNING" +AGENT_KILLED = "KILLED" + +HIDDEN_AGENT_RUN_TYPE = "sweep-controller" + +MAX_RESUME_COUNT = 5 + +RUN_INFO_GRACE_PERIOD = 60 + +DEFAULT_STOPPED_RUN_TIMEOUT = 60 + +DEFAULT_PRINT_INTERVAL = 5 * 60 +VERBOSE_PRINT_INTERVAL = 20 + +_env_timeout = os.environ.get("WANDB_LAUNCH_START_TIMEOUT") +if _env_timeout: + try: + RUN_START_TIMEOUT = float(_env_timeout) + except ValueError: + raise LaunchError( + f"Invalid value for WANDB_LAUNCH_START_TIMEOUT: {_env_timeout}" + ) +else: + RUN_START_TIMEOUT = 60 * 30 # default 30 minutes + +_logger = logging.getLogger(__name__) + + +@dataclass +class JobSpecAndQueue: + job: Dict[str, Any] + queue: str + + +def _convert_access(access: str) -> str: + """Convert access string to a value accepted by wandb.""" + access = access.upper() + assert ( + access == "PROJECT" or access == "USER" + ), "Queue access must be either project or user" + return access + + +def _max_from_config( + config: Dict[str, Any], key: str, default: int = 1 +) -> Union[int, float]: + """Get an integer from the config, or float.inf if -1. + + Utility for parsing integers from the agent config with a default, infinity + handling, and integer parsing. Raises more informative error if parse error. + """ + try: + val = config.get(key) + if val is None: + val = default + max_from_config = int(val) + except ValueError as e: + raise LaunchError( + f"Error when parsing LaunchAgent config key: ['{key}': " + f"{config.get(key)}]. Error: {str(e)}" + ) + if max_from_config == -1: + return float("inf") + + if max_from_config < 0: + raise LaunchError( + f"Error when parsing LaunchAgent config key: ['{key}': " + f"{config.get(key)}]. Error: negative value." + ) + return max_from_config + + +class InternalAgentLogger: + def __init__(self, verbosity=0): + self._print_to_terminal = verbosity >= 2 + + def error(self, message: str): + if self._print_to_terminal: + wandb.termerror(f"{LOG_PREFIX}{message}") + _logger.error(f"{LOG_PREFIX}{message}") + + def warn(self, message: str): + if self._print_to_terminal: + wandb.termwarn(f"{LOG_PREFIX}{message}") + _logger.warn(f"{LOG_PREFIX}{message}") + + def info(self, message: str): + if self._print_to_terminal: + wandb.termlog(f"{LOG_PREFIX}{message}") + _logger.info(f"{LOG_PREFIX}{message}") + + def debug(self, message: str): + if self._print_to_terminal: + wandb.termlog(f"{LOG_PREFIX}{message}") + _logger.debug(f"{LOG_PREFIX}{message}") + + +def construct_agent_configs( + launch_config: Optional[Dict] = None, + build_config: Optional[Dict] = None, +) -> Tuple[Optional[Dict[str, Any]], Dict[str, Any], Dict[str, Any]]: + registry_config = None + environment_config = None + if launch_config is not None: + build_config = launch_config.get("builder") + registry_config = launch_config.get("registry") + + default_launch_config = None + if os.path.exists(os.path.expanduser(LAUNCH_CONFIG_FILE)): + with open(os.path.expanduser(LAUNCH_CONFIG_FILE)) as f: + default_launch_config = ( + yaml.safe_load(f) or {} + ) # In case the config is empty, we want it to be {} instead of None. + environment_config = default_launch_config.get("environment") + + build_config, registry_config = resolve_build_and_registry_config( + default_launch_config, build_config, registry_config + ) + + return environment_config, build_config, registry_config + + +class LaunchAgent: + """Launch agent class which polls run given run queues and launches runs for wandb launch.""" + + _instance = None + + def __new__(cls, *args: Any, **kwargs: Any) -> "LaunchAgent": + """Create a new instance of the LaunchAgent. + + This method ensures that only one instance of the LaunchAgent is created. + This is done so that information about the agent can be accessed from + elsewhere in the library. + """ + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + @classmethod + def name(cls) -> str: + """Return the name of the agent.""" + if cls._instance is None: + raise LaunchError("LaunchAgent has not been initialized") + name = cls._instance._name + if isinstance(name, str): + return name + raise LaunchError(f"Found invalid name for agent {name}") + + @classmethod + def initialized(cls) -> bool: + """Return whether the agent is initialized.""" + return cls._instance is not None + + def __init__(self, api: Api, config: Dict[str, Any]): + """Initialize a launch agent. + + Arguments: + api: Api object to use for making requests to the backend. + config: Config dictionary for the agent. + """ + self._entity = config["entity"] + self._project = LAUNCH_DEFAULT_PROJECT + self._api = api + self._base_url = self._api.settings().get("base_url") + self._ticks = 0 + self._jobs: Dict[int, JobAndRunStatusTracker] = {} + self._jobs_lock = threading.Lock() + self._jobs_event = Event() + self._jobs_event.set() + self._cwd = os.getcwd() + self._namespace = runid.generate_id() + self._access = _convert_access("project") + self._max_jobs = _max_from_config(config, "max_jobs") + self._max_schedulers = _max_from_config(config, "max_schedulers") + self._secure_mode = config.get("secure_mode", False) + self._verbosity = config.get("verbosity", 0) + self._internal_logger = InternalAgentLogger(verbosity=self._verbosity) + self._last_status_print_time = 0.0 + self.default_config: Dict[str, Any] = config + self._stopped_run_timeout = config.get( + "stopped_run_timeout", DEFAULT_STOPPED_RUN_TIMEOUT + ) + self._known_warnings: List[str] = [] + + # Get agent version from env var if present, otherwise wandb version + self.version: str = "wandb@" + wandb.__version__ + env_agent_version = os.environ.get("WANDB_AGENT_VERSION") + if env_agent_version and env_agent_version != "wandb-launch-agent": + self.version = env_agent_version + + # serverside creation + self.gorilla_supports_agents = ( + self._api.launch_agent_introspection() is not None + ) + self._gorilla_supports_fail_run_queue_items = ( + self._api.fail_run_queue_item_introspection() + ) + + self._queues: List[str] = config.get("queues", ["default"]) + + # remove project field from agent config before sending to back end + # because otherwise it shows up in the config in the UI and confuses users + sent_config = config.copy() + if "project" in sent_config: + del sent_config["project"] + + create_response = self._api.create_launch_agent( + self._entity, + self._project, + self._queues, + sent_config, + self.version, + self.gorilla_supports_agents, + ) + self._id = create_response["launchAgentId"] + if self._api.entity_is_team(self._entity): + wandb.termwarn( + f"{LOG_PREFIX}Agent is running on team entity ({self._entity}). Members of this team will be able to run code on this device." + ) + + agent_response = self._api.get_launch_agent( + self._id, self.gorilla_supports_agents + ) + self._name = agent_response["name"] + self._init_agent_run() + + def _is_scheduler_job(self, run_spec: Dict[str, Any]) -> bool: + """Determine whether a job/runSpec is a sweep scheduler.""" + if not run_spec: + self._internal_logger.debug( + "Received runSpec in _is_scheduler_job that was empty" + ) + + if run_spec.get("uri") != Scheduler.PLACEHOLDER_URI: + return False + + if run_spec.get("resource") == "local-process": + # Any job pushed to a run queue that has a scheduler uri is + # allowed to use local-process + if run_spec.get("job"): + return True + + # If a scheduler is local-process and run through CLI, also + # confirm command is in format: [wandb scheduler ] + cmd = run_spec.get("overrides", {}).get("entry_point", []) + if len(cmd) < 3: + return False + + if cmd[:2] != ["wandb", "scheduler"]: + return False + + return True + + async def fail_run_queue_item( + self, + run_queue_item_id: str, + message: str, + phase: str, + files: Optional[List[str]] = None, + ) -> None: + if self._gorilla_supports_fail_run_queue_items: + fail_rqi = event_loop_thread_exec(self._api.fail_run_queue_item) + await fail_rqi(run_queue_item_id, message, phase, files) + + def _init_agent_run(self) -> None: + # TODO: has it been long enough that all backends support agents? + self._wandb_run = None + + if self.gorilla_supports_agents: + settings = wandb.Settings( + silent=True, disable_git=True, disable_job_creation=True + ) + self._wandb_run = wandb.init( + project=self._project, + entity=self._entity, + settings=settings, + id=self._name, + job_type=HIDDEN_AGENT_RUN_TYPE, + ) + + @property + def thread_ids(self) -> List[int]: + """Returns a list of keys running thread ids for the agent.""" + with self._jobs_lock: + return list(self._jobs.keys()) + + @property + def num_running_schedulers(self) -> int: + """Return just the number of schedulers.""" + with self._jobs_lock: + return len([x for x in self._jobs if self._jobs[x].is_scheduler]) + + @property + def num_running_jobs(self) -> int: + """Return the number of jobs not including schedulers.""" + with self._jobs_lock: + return len([x for x in self._jobs if not self._jobs[x].is_scheduler]) + + async def pop_from_queue(self, queue: str) -> Any: + """Pops an item off the runqueue to run as a job. + + Arguments: + queue: Queue to pop from. + + Returns: + Item popped off the queue. + + Raises: + Exception: if there is an error popping from the queue. + """ + try: + pop = event_loop_thread_exec(self._api.pop_from_run_queue) + ups = await pop( + queue, + entity=self._entity, + project=self._project, + agent_id=self._id, + ) + return ups + except Exception as e: + print("Exception:", e) + return None + + def print_status(self) -> None: + """Prints the current status of the agent.""" + self._last_status_print_time = time.time() + output_str = "agent " + if self._name: + output_str += f"{self._name} " + if self.num_running_jobs < self._max_jobs: + output_str += f"polling on queues {','.join(self._queues)}, " + output_str += ( + f"running {self.num_running_jobs} out of a maximum of {self._max_jobs} jobs" + ) + + wandb.termlog(f"{LOG_PREFIX}{output_str}") + if self.num_running_jobs > 0: + output_str += f": {','.join(str(job_id) for job_id in self.thread_ids)}" + + _logger.info(output_str) + + async def update_status(self, status: str) -> None: + """Update the status of the agent. + + Arguments: + status: Status to update the agent to. + """ + _update_status = event_loop_thread_exec(self._api.update_launch_agent_status) + update_ret = await _update_status( + self._id, status, self.gorilla_supports_agents + ) + if not update_ret["success"]: + wandb.termerror(f"{LOG_PREFIX}Failed to update agent status to {status}") + + def _check_run_exists_and_inited( + self, entity: str, project: str, run_id: str, rqi_id: str + ) -> bool: + """Checks the stateof the run to ensure it has been inited. Note this will not behave well with resuming.""" + # Checks the _wandb key in the run config for the run queue item id. If it exists, the + # submitted run definitely called init. Falls back to checking state of run. + # TODO: handle resuming runs + + # Sweep runs exist but are in pending state, normal launch runs won't exist + # so will raise a CommError. + try: + run_state = self._api.get_run_state(entity, project, run_id) + if run_state.lower() != "pending": + return True + except CommError: + self._internal_logger.info( + f"Run {entity}/{project}/{run_id} with rqi id: {rqi_id} did not have associated run", + ) + return False + + async def finish_thread_id( + self, + thread_id: int, + exception: Optional[Union[Exception, LaunchDockerError]] = None, + ) -> None: + """Removes the job from our list for now.""" + with self._jobs_lock: + job_and_run_status = self._jobs[thread_id] + if ( + job_and_run_status.entity is not None + and job_and_run_status.entity != self._entity + ): + self._internal_logger.info( + "Skipping check for completed run status because run is on a different entity than agent", + ) + elif exception is not None: + tb_str = traceback.format_exception( + type(exception), value=exception, tb=exception.__traceback__ + ) + fnames = job_and_run_status.saver.save_contents( + "".join(tb_str), "error.log", "error" + ) + await self.fail_run_queue_item( + job_and_run_status.run_queue_item_id, + str(exception), + job_and_run_status.err_stage, + fnames, + ) + elif job_and_run_status.project is None or job_and_run_status.run_id is None: + self._internal_logger.info( + f"called finish_thread_id on thread whose tracker has no project or run id. RunQueueItemID: {job_and_run_status.run_queue_item_id}", + ) + wandb.termerror( + "Missing project or run id on thread called finish thread id" + ) + await self.fail_run_queue_item( + job_and_run_status.run_queue_item_id, + "submitted job was finished without assigned project or run id", + "agent", + ) + elif job_and_run_status.run is not None: + called_init = False + # We do some weird stuff here getting run info to check for a + # created in run in W&B. + # + # We retry for 60 seconds with an exponential backoff in case + # upsert run is taking a while. + logs = None + interval = 1 + while True: + called_init = self._check_run_exists_and_inited( + self._entity, + job_and_run_status.project, + job_and_run_status.run_id, + job_and_run_status.run_queue_item_id, + ) + if called_init or interval > RUN_INFO_GRACE_PERIOD: + break + if not called_init: + # Fetch the logs now if we don't get run info on the + # first try, in case the logs are cleaned from the runner + # environment (e.g. k8s) during the run info grace period. + if interval == 1: + logs = await job_and_run_status.run.get_logs() + await asyncio.sleep(interval) + interval *= 2 + if not called_init: + fnames = None + if job_and_run_status.completed_status == "finished": + _msg = "The submitted job exited successfully but failed to call wandb.init" + else: + _msg = "The submitted run was not successfully started" + if logs: + fnames = job_and_run_status.saver.save_contents( + logs, "error.log", "error" + ) + await self.fail_run_queue_item( + job_and_run_status.run_queue_item_id, _msg, "run", fnames + ) + else: + self._internal_logger.info( + f"Finish thread id {thread_id} had no exception and no run" + ) + wandb._sentry.exception( + "launch agent called finish thread id on thread without run or exception" + ) + + # TODO: keep logs or something for the finished jobs + with self._jobs_lock: + del self._jobs[thread_id] + + # update status back to polling if no jobs are running + if len(self.thread_ids) == 0: + await self.update_status(AGENT_POLLING) + + async def run_job( + self, job: Dict[str, Any], queue: str, file_saver: RunQueueItemFileSaver + ) -> None: + """Set up project and run the job. + + Arguments: + job: Job to run. + """ + _msg = f"{LOG_PREFIX}Launch agent received job:\n{pprint.pformat(job)}\n" + wandb.termlog(_msg) + _logger.info(_msg) + # update agent status + await self.update_status(AGENT_RUNNING) + + # parse job + self._internal_logger.info("Parsing launch spec") + launch_spec = job["runSpec"] + + # Abort if this job attempts to override secure mode + self._assert_secure(launch_spec) + job_tracker = JobAndRunStatusTracker(job["runQueueItemId"], queue, file_saver) + + asyncio.create_task( + self.task_run_job( + launch_spec, + job, + self.default_config, + self._api, + job_tracker, + ) + ) + + def _assert_secure(self, launch_spec: Dict[str, Any]) -> None: + """If secure mode is set, make sure no vulnerable keys are overridden.""" + if not self._secure_mode: + return + k8s_config = launch_spec.get("resource_args", {}).get("kubernetes", {}) + + pod_secure_keys = ["hostPID", "hostIPC", "hostNetwork", "initContainers"] + pod_spec = k8s_config.get("spec", {}).get("template", {}).get("spec", {}) + for key in pod_secure_keys: + if key in pod_spec: + raise ValueError( + f'This agent is configured to lock "{key}" in pod spec ' + "but the job specification attempts to override it." + ) + + container_specs = pod_spec.get("containers", []) + for container_spec in container_specs: + if "command" in container_spec: + raise ValueError( + 'This agent is configured to lock "command" in container spec ' + "but the job specification attempts to override it." + ) + + if launch_spec.get("overrides", {}).get("entry_point"): + raise ValueError( + 'This agent is configured to lock the "entrypoint" override ' + "but the job specification attempts to override it." + ) + + async def loop(self) -> None: + """Loop infinitely to poll for jobs and run them. + + Raises: + KeyboardInterrupt: if the agent is requested to stop. + """ + self.print_status() + if self._verbosity == 0: + print_interval = DEFAULT_PRINT_INTERVAL + else: + print_interval = VERBOSE_PRINT_INTERVAL + try: + while True: + job = None + self._ticks += 1 + agent_response = self._api.get_launch_agent( + self._id, self.gorilla_supports_agents + ) + if agent_response["stopPolling"]: + # shutdown process and all jobs if requested from ui + raise KeyboardInterrupt + if self.num_running_jobs < self._max_jobs: + # only check for new jobs if we're not at max + job_and_queue = await self.get_job_and_queue() + # these will either both be None, or neither will be None + if job_and_queue is not None: + job = job_and_queue.job + queue = job_and_queue.queue + try: + file_saver = RunQueueItemFileSaver( + self._wandb_run, job["runQueueItemId"] + ) + if self._is_scheduler_job(job.get("runSpec", {})): + # If job is a scheduler, and we are already at the cap, ignore, + # don't ack, and it will be pushed back onto the queue in 1 min + if self.num_running_schedulers >= self._max_schedulers: + wandb.termwarn( + f"{LOG_PREFIX}Agent already running the maximum number " + f"of sweep schedulers: {self._max_schedulers}. To set " + "this value use `max_schedulers` key in the agent config" + ) + continue + await self.run_job(job, queue, file_saver) + except Exception as e: + wandb.termerror( + f"{LOG_PREFIX}Error running job: {traceback.format_exc()}" + ) + wandb._sentry.exception(e) + + # always the first phase, because we only enter phase 2 within the thread + files = file_saver.save_contents( + contents=traceback.format_exc(), + fname="error.log", + file_sub_type="error", + ) + await self.fail_run_queue_item( + run_queue_item_id=job["runQueueItemId"], + message=str(e), + phase="agent", + files=files, + ) + + if self._ticks % 2 == 0: + if len(self.thread_ids) == 0: + await self.update_status(AGENT_POLLING) + else: + await self.update_status(AGENT_RUNNING) + if time.time() - self._last_status_print_time > print_interval: + self.print_status() + + if self.num_running_jobs == self._max_jobs or job is None: + # all threads busy or did not receive job + await asyncio.sleep(AGENT_POLLING_INTERVAL) + else: + await asyncio.sleep(RECEIVED_JOB_POLLING_INTERVAL) + + except KeyboardInterrupt: + await self.update_status(AGENT_KILLED) + wandb.termlog(f"{LOG_PREFIX}Shutting down, active jobs:") + self.print_status() + finally: + self._jobs_event.clear() + + # Threaded functions + async def task_run_job( + self, + launch_spec: Dict[str, Any], + job: Dict[str, Any], + default_config: Dict[str, Any], + api: Api, + job_tracker: JobAndRunStatusTracker, + ) -> None: + rqi_id = job["runQueueItemId"] + assert rqi_id + exception: Optional[Union[LaunchDockerError, Exception]] = None + try: + with self._jobs_lock: + self._jobs[rqi_id] = job_tracker + await self._task_run_job( + launch_spec, job, default_config, api, rqi_id, job_tracker + ) + except LaunchDockerError as e: + wandb.termerror( + f"{LOG_PREFIX}agent {self._name} encountered an issue while starting Docker, see above output for details." + ) + exception = e + wandb._sentry.exception(e) + except LaunchError as e: + wandb.termerror(f"{LOG_PREFIX}Error running job: {e}") + exception = e + wandb._sentry.exception(e) + except Exception as e: + wandb.termerror(f"{LOG_PREFIX}Error running job: {traceback.format_exc()}") + exception = e + wandb._sentry.exception(e) + finally: + await self.finish_thread_id(rqi_id, exception) + + async def _task_run_job( + self, + launch_spec: Dict[str, Any], + job: Dict[str, Any], + default_config: Dict[str, Any], + api: Api, + thread_id: int, + job_tracker: JobAndRunStatusTracker, + ) -> None: + project = LaunchProject.from_spec(launch_spec, api) + self._set_queue_and_rqi_in_project(project, job, job_tracker.queue) + ack = event_loop_thread_exec(api.ack_run_queue_item) + await ack(job["runQueueItemId"], project.run_id) + # don't launch sweep runs if the sweep isn't healthy + await self.check_sweep_state(launch_spec, api) + + job_tracker.update_run_info(project) + self._internal_logger.info("Fetching and validating project...") + project.fetch_and_validate_project() + self._internal_logger.info("Fetching resource...") + resource = launch_spec.get("resource") or "local-container" + backend_config: Dict[str, Any] = { + PROJECT_SYNCHRONOUS: False, # agent always runs async + } + self._internal_logger.info("Loading backend") + override_build_config = launch_spec.get("builder") + + _, build_config, registry_config = construct_agent_configs( + default_config, override_build_config + ) + image_uri = project.docker_image or project.job_base_image + entrypoint = project.get_job_entry_point() + environment = loader.environment_from_config( + default_config.get("environment", {}) + ) + registry = loader.registry_from_config(registry_config, environment) + builder = loader.builder_from_config(build_config, environment, registry) + backend = loader.runner_from_config( + resource, api, backend_config, environment, registry + ) + if not ( + project.docker_image + or project.job_base_image + or isinstance(backend, LocalProcessRunner) + ): + assert entrypoint is not None + image_uri = await builder.build_image(project, entrypoint, job_tracker) + + self._internal_logger.info("Backend loaded...") + if isinstance(backend, LocalProcessRunner): + run = await backend.run(project, image_uri) + else: + assert image_uri + run = await backend.run(project, image_uri) + if self._is_scheduler_job(launch_spec): + with self._jobs_lock: + self._jobs[thread_id].is_scheduler = True + wandb.termlog( + f"{LOG_PREFIX}Preparing to run sweep scheduler " + f"({self.num_running_schedulers}/{self._max_schedulers})" + ) + + if not run: + with self._jobs_lock: + job_tracker.failed_to_start = True + return + with self._jobs_lock: + job_tracker.run = run + start_time = time.time() + stopped_time: Optional[float] = None + while self._jobs_event.is_set(): + # If run has failed to start before timeout, kill it + state = (await run.get_status()).state + if state == "starting" and RUN_START_TIMEOUT > 0: + if time.time() - start_time > RUN_START_TIMEOUT: + await run.cancel() + raise LaunchError( + f"Run failed to start within {RUN_START_TIMEOUT} seconds. " + "If you want to increase this timeout, set WANDB_LAUNCH_START_TIMEOUT " + "to a larger value." + ) + if await self._check_run_finished(job_tracker, launch_spec): + return + if await job_tracker.check_wandb_run_stopped(self._api): + if stopped_time is None: + stopped_time = time.time() + else: + if time.time() - stopped_time > self._stopped_run_timeout: + await run.cancel() + await asyncio.sleep(AGENT_POLLING_INTERVAL) + + # temp: for local, kill all jobs. we don't yet have good handling for different + # types of runners in general + if isinstance(run, LocalSubmittedRun) and run._command_proc is not None: + run._command_proc.kill() + + async def check_sweep_state(self, launch_spec: Dict[str, Any], api: Api) -> None: + """Check the state of a sweep before launching a run for the sweep.""" + if launch_spec.get("sweep_id"): + try: + get_sweep_state = event_loop_thread_exec(api.get_sweep_state) + state = await get_sweep_state( + sweep=launch_spec["sweep_id"], + entity=launch_spec["entity"], + project=launch_spec["project"], + ) + except Exception as e: + self._internal_logger.debug(f"Fetch sweep state error: {e}") + state = None + + if state != "RUNNING" and state != "PAUSED": + raise LaunchError( + f"Launch agent picked up sweep job, but sweep ({launch_spec['sweep_id']}) was in a terminal state ({state})" + ) + + async def _check_run_finished( + self, job_tracker: JobAndRunStatusTracker, launch_spec: Dict[str, Any] + ) -> bool: + if job_tracker.completed_status: + return True + + # the run can be done before the run has started + # but can also be none if the run failed to start + # so if there is no run, either the run hasn't started yet + # or it has failed + if job_tracker.run is None: + if job_tracker.failed_to_start: + return True + return False + + known_error = False + try: + run = job_tracker.run + status = await run.get_status() + state = status.state + + for warning in status.messages: + if warning not in self._known_warnings: + self._known_warnings.append(warning) + success = self._api.update_run_queue_item_warning( + job_tracker.run_queue_item_id, + warning, + "Kubernetes", + [], + ) + if not success: + _logger.warning( + f"Error adding warning {warning} to run queue item {job_tracker.run_queue_item_id}" + ) + self._known_warnings.remove(warning) + + if state == "preempted" and job_tracker.entity == self._entity: + config = launch_spec.copy() + config["run_id"] = job_tracker.run_id + config["_resume_count"] = config.get("_resume_count", 0) + 1 + with self._jobs_lock: + job_tracker.completed_status = state + if config["_resume_count"] > MAX_RESUME_COUNT: + wandb.termlog( + f"{LOG_PREFIX}Run {job_tracker.run_id} has already resumed {MAX_RESUME_COUNT} times." + ) + return True + wandb.termlog( + f"{LOG_PREFIX}Run {job_tracker.run_id} was preempted, requeueing..." + ) + + if "sweep_id" in config: + # allow resumed runs from sweeps that have already completed by removing + # the sweep id before pushing to queue + del config["sweep_id"] + + launch_add( + config=config, + project_queue=self._project, + queue_name=job_tracker.queue, + ) + return True + # TODO change these statuses to an enum + if state in ["stopped", "failed", "finished", "preempted"]: + if job_tracker.is_scheduler: + wandb.termlog(f"{LOG_PREFIX}Scheduler finished with ID: {run.id}") + if state == "failed": + # on fail, update sweep state. scheduler run_id should == sweep_id + try: + self._api.set_sweep_state( + sweep=job_tracker.run_id, + entity=job_tracker.entity, + project=job_tracker.project, + state="CANCELED", + ) + except Exception as e: + raise LaunchError(f"Failed to update sweep state: {e}") + else: + wandb.termlog(f"{LOG_PREFIX}Job finished with ID: {run.id}") + with self._jobs_lock: + job_tracker.completed_status = state + return True + + return False + except LaunchError as e: + wandb.termerror( + f"{LOG_PREFIX}Terminating job {run.id} because it failed to start: {str(e)}" + ) + known_error = True + with self._jobs_lock: + job_tracker.failed_to_start = True + # TODO: make get_status robust to errors for each runner, and handle them + except Exception as e: + wandb.termerror(f"{LOG_PREFIX}Error getting status for job {run.id}") + wandb.termerror(traceback.format_exc()) + _logger.info("---") + _logger.info("Caught exception while getting status.") + _logger.info(f"Job ID: {run.id}") + _logger.info(traceback.format_exc()) + _logger.info("---") + wandb._sentry.exception(e) + return known_error + + async def get_job_and_queue(self) -> Optional[JobSpecAndQueue]: + for queue in self._queues: + job = await self.pop_from_queue(queue) + if job is not None: + self._queues.remove(queue) + self._queues.append(queue) + return JobSpecAndQueue(job, queue) + return None + + def _set_queue_and_rqi_in_project( + self, project: LaunchProject, job: Dict[str, Any], queue: str + ) -> None: + project.queue_name = queue + + # queue entity currently always matches the agent + project.queue_entity = self._entity + project.run_queue_item_id = job["runQueueItemId"] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/config.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1e2b3d6d73a55f1fc8f12e70b5b33e49b0230e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/config.py @@ -0,0 +1,296 @@ +"""Definition of the config object used by the Launch agent.""" + +from enum import Enum +from typing import List, Optional + +# ValidationError is imported for exception type checking purposes only. +from pydantic import ( # type: ignore + BaseModel, + Field, + ValidationError, # noqa: F401 + root_validator, + validator, +) + +import wandb +from wandb.sdk.launch.utils import ( + AZURE_BLOB_REGEX, + AZURE_CONTAINER_REGISTRY_URI_REGEX, + ELASTIC_CONTAINER_REGISTRY_URI_REGEX, + GCP_ARTIFACT_REGISTRY_URI_REGEX, + GCS_URI_RE, + S3_URI_RE, +) + +__all__ = [ + "ValidationError", + "AgentConfig", +] + + +class EnvironmentType(str, Enum): + """Enum of valid environment types.""" + + aws = "aws" + gcp = "gcp" + azure = "azure" + + +class RegistryType(str, Enum): + """Enum of valid registry types.""" + + ecr = "ecr" + acr = "acr" + gcr = "gcr" + + +class BuilderType(str, Enum): + """Enum of valid builder types.""" + + docker = "docker" + kaniko = "kaniko" + noop = "noop" + + +class TargetPlatform(str, Enum): + """Enum of valid target platforms.""" + + linux_amd64 = "linux/amd64" + linux_arm64 = "linux/arm64" + + +class RegistryConfig(BaseModel): + """Configuration for registry block. + + Note that we don't forbid extra fields here because: + - We want to allow all fields supported by each registry + - We will perform validation on the registry object itself later + - Registry block is being deprecated in favor of destination field in builder + """ + + type: Optional[RegistryType] = Field( + None, + description="The type of registry to use.", + ) + uri: Optional[str] = Field( + None, + description="The URI of the registry.", + ) + + @validator("uri") # type: ignore + @classmethod + def validate_uri(cls, uri: str) -> str: + return validate_registry_uri(uri) + + +class EnvironmentConfig(BaseModel): + """Configuration for the environment block.""" + + type: Optional[EnvironmentType] = Field( + None, + description="The type of environment to use.", + ) + region: Optional[str] = Field(..., description="The region to use.") + + class Config: + extra = "allow" + + @root_validator(pre=True) # type: ignore + @classmethod + def check_extra_fields(cls, values: dict) -> dict: + """Check for extra fields and print a warning.""" + for key in values: + if key not in ["type", "region"]: + wandb.termwarn( + f"Unrecognized field {key} in environment block. Please check your config file." + ) + return values + + +class BuilderConfig(BaseModel): + type: Optional[BuilderType] = Field( + None, + description="The type of builder to use.", + ) + destination: Optional[str] = Field( + None, + description="The destination to use for the built image. If not provided, " + "the image will be pushed to the registry.", + ) + + platform: Optional[TargetPlatform] = Field( + None, + description="The platform to use for the built image. If not provided, " + "the platform will be detected automatically.", + ) + + build_context_store: Optional[str] = Field( + None, + description="The build context store to use. Required for kaniko builds.", + alias="build-context-store", + ) + build_job_name: Optional[str] = Field( + "wandb-launch-container-build", + description="Name prefix of the build job.", + alias="build-job-name", + ) + secret_name: Optional[str] = Field( + None, + description="The name of the secret to use for the build job.", + alias="secret-name", + ) + secret_key: Optional[str] = Field( + None, + description="The key of the secret to use for the build job.", + alias="secret-key", + ) + kaniko_image: Optional[str] = Field( + "gcr.io/kaniko-project/executor:latest", + description="The image to use for the kaniko executor.", + alias="kaniko-image", + ) + + @validator("build_context_store") # type: ignore + @classmethod + def validate_build_context_store( + cls, build_context_store: Optional[str] + ) -> Optional[str]: + """Validate that the build context store is a valid container registry URI.""" + if build_context_store is None: + return None + for regex in [ + S3_URI_RE, + GCS_URI_RE, + AZURE_BLOB_REGEX, + ]: + if regex.match(build_context_store): + return build_context_store + raise ValueError( + "Invalid build context store. Build context store must be a URI for an " + "S3 bucket, GCS bucket, or Azure blob." + ) + + @root_validator(pre=True) # type: ignore + @classmethod + def validate_docker(cls, values: dict) -> dict: + """Right now there are no required fields for docker builds.""" + return values + + @validator("destination") # type: ignore + @classmethod + def validate_destination(cls, destination: Optional[str]) -> Optional[str]: + """Validate that the destination is a valid container registry URI.""" + if destination is None: + return None + return validate_registry_uri(destination) + + +class AgentConfig(BaseModel): + """Configuration for the Launch agent.""" + + queues: List[str] = Field( + default=[], + description="The queues to use for this agent.", + ) + entity: Optional[str] = Field( + description="The W&B entity to use for this agent.", + ) + max_jobs: Optional[int] = Field( + 1, + description="The maximum number of jobs to run concurrently.", + ) + max_schedulers: Optional[int] = Field( + 1, + description="The maximum number of sweep schedulers to run concurrently.", + ) + secure_mode: Optional[bool] = Field( + False, + description="Whether to use secure mode for this agent. If True, the " + "agent will reject runs that attempt to override the entrypoint or image.", + ) + registry: Optional[RegistryConfig] = Field( + None, + description="The registry to use.", + ) + environment: Optional[EnvironmentConfig] = Field( + None, + description="The environment to use.", + ) + builder: Optional[BuilderConfig] = Field( + None, + description="The builder to use.", + ) + verbosity: Optional[int] = Field( + 0, + description="How verbose to print, 0 = default, 1 = verbose, 2 = very verbose", + ) + stopped_run_timeout: Optional[int] = Field( + 60, + description="How many seconds to wait after receiving the stop command before forcibly cancelling a run.", + ) + + class Config: + extra = "forbid" + + +def validate_registry_uri(uri: str) -> str: + """Validate that the registry URI is a valid container registry URI. + + The URI should resolve to an image name in a container registry. The recognized + formats are for ECR, ACR, and GCP Artifact Registry. If the URI does not match + any of these formats, a warning is printed indicating the registry type is not + recognized and the agent can't guarantee that images can be pushed. + + If the format is recognized but does not resolve to an image name, an + error is raised. For example, if the URI is an ECR URI but does not include + an image name or includes a tag as well as an image name, an error is raised. + """ + tag_msg = ( + "Destination for built images may not include a tag, but the URI provided " + "includes the suffix '{tag}'. Please remove the tag and try again. The agent " + "will automatically tag each image with a unique hash of the source code." + ) + if uri.startswith("https://"): + uri = uri[8:] + + match = GCP_ARTIFACT_REGISTRY_URI_REGEX.match(uri) + if match: + if match.group("tag"): + raise ValueError(tag_msg.format(tag=match.group("tag"))) + if not match.group("image_name"): + raise ValueError( + "An image name must be specified in the URI for a GCP Artifact Registry. " + "Please provide a uri with the format " + "'https://-docker.pkg.dev///'." + ) + return uri + + match = AZURE_CONTAINER_REGISTRY_URI_REGEX.match(uri) + if match: + if match.group("tag"): + raise ValueError(tag_msg.format(tag=match.group("tag"))) + if not match.group("repository"): + raise ValueError( + "A repository name must be specified in the URI for an " + "Azure Container Registry. Please provide a uri with the format " + "'https://.azurecr.io/'." + ) + return uri + + match = ELASTIC_CONTAINER_REGISTRY_URI_REGEX.match(uri) + if match: + if match.group("tag"): + raise ValueError(tag_msg.format(tag=match.group("tag"))) + if not match.group("repository"): + raise ValueError( + "A repository name must be specified in the URI for an " + "Elastic Container Registry. Please provide a uri with the format " + "'https://.dkr.ecr..amazonaws.com/'." + ) + return uri + + wandb.termwarn( + f"Unable to recognize registry type in URI {uri}. You are responsible " + "for ensuring the agent can push images to this registry." + ) + return uri diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/job_status_tracker.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/job_status_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..2b25b5bc637b6a09fadd260d8d7eb702e9f54d32 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/job_status_tracker.py @@ -0,0 +1,53 @@ +import logging +from dataclasses import dataclass +from typing import Optional + +from wandb.apis.internal import Api +from wandb.errors import CommError +from wandb.sdk.launch._project_spec import LaunchProject + +from ..runner.abstract import AbstractRun +from ..utils import event_loop_thread_exec +from .run_queue_item_file_saver import RunQueueItemFileSaver + +_logger = logging.getLogger(__name__) + + +@dataclass +class JobAndRunStatusTracker: + run_queue_item_id: str + queue: str + saver: RunQueueItemFileSaver + run_id: Optional[str] = None + project: Optional[str] = None + entity: Optional[str] = None + run: Optional[AbstractRun] = None + failed_to_start: bool = False + completed_status: Optional[str] = None + is_scheduler: bool = False + err_stage: str = "agent" + + @property + def job_completed(self) -> bool: + return self.failed_to_start or self.completed_status is not None + + def update_run_info(self, launch_project: LaunchProject) -> None: + self.run_id = launch_project.run_id + self.project = launch_project.target_project + self.entity = launch_project.target_entity + + def set_err_stage(self, stage: str) -> None: + self.err_stage = stage + + async def check_wandb_run_stopped(self, api: Api) -> bool: + assert ( + self.run_id is not None + and self.project is not None + and self.entity is not None + ), "Job tracker does not contain run info. Update with run info before checking if run stopped" + check_stop = event_loop_thread_exec(api.api.check_stop_requested) + try: + return bool(await check_stop(self.project, self.entity, self.run_id)) + except CommError as e: + _logger.error(f"CommError when checking if wandb run stopped: {e}") + return False diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/run_queue_item_file_saver.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/run_queue_item_file_saver.py new file mode 100644 index 0000000000000000000000000000000000000000..810ea0ba50e890876a6d565a737c9b250be17867 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/run_queue_item_file_saver.py @@ -0,0 +1,45 @@ +"""Implementation of the run queue item file saver class.""" + +import os +import sys +from typing import List, Optional + +import wandb + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +FileSubtypes = Literal["warning", "error"] + + +class RunQueueItemFileSaver: + def __init__( + self, + agent_run: Optional["wandb.sdk.wandb_run.Run"], + run_queue_item_id: str, + ): + self.run_queue_item_id = run_queue_item_id + self.run = agent_run + + def save_contents( + self, contents: str, fname: str, file_sub_type: FileSubtypes + ) -> Optional[List[str]]: + if not isinstance(self.run, wandb.sdk.wandb_run.Run): + wandb.termwarn("Not saving file contents because agent has no run") + return None + root_dir = self.run._settings.files_dir + saved_run_path = os.path.join(self.run_queue_item_id, file_sub_type, fname) + local_path = os.path.join(root_dir, saved_run_path) + os.makedirs(os.path.dirname(local_path), exist_ok=True) + with open(local_path, "w") as f: + f.write(contents) + res = self.run.save(local_path, base_path=root_dir, policy="now") + if isinstance(res, list): + return [saved_run_path] + else: + wandb.termwarn( + f"Failed to save files for run queue item: {self.run_queue_item_id}" + ) + return None diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/create_job.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/create_job.py new file mode 100644 index 0000000000000000000000000000000000000000..0e8e71f51f55b9039aed5d34cd6cacf3c4e818c5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/create_job.py @@ -0,0 +1,528 @@ +import json +import logging +import os +import re +import sys +import tempfile +from typing import Any, Dict, List, Optional, Tuple + +import wandb +from wandb.apis.internal import Api +from wandb.sdk.artifacts.artifact import Artifact +from wandb.sdk.internal.job_builder import JobBuilder +from wandb.sdk.launch.git_reference import GitReference +from wandb.sdk.launch.utils import ( + _is_git_uri, + get_current_python_version, + get_entrypoint_file, +) +from wandb.sdk.lib import filesystem +from wandb.util import make_artifact_name_safe + +logging.basicConfig(stream=sys.stdout, level=logging.INFO) +_logger = logging.getLogger("wandb") + + +CODE_ARTIFACT_EXCLUDE_PATHS = ["wandb", ".git"] + + +def create_job( + path: str, + job_type: str, + entity: Optional[str] = None, + project: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + aliases: Optional[List[str]] = None, + runtime: Optional[str] = None, + entrypoint: Optional[str] = None, + git_hash: Optional[str] = None, + build_context: Optional[str] = None, + dockerfile: Optional[str] = None, +) -> Optional[Artifact]: + """Create a job from a path, not as the output of a run. + + Arguments: + path (str): Path to the job directory. + job_type (str): Type of the job. One of "git", "code", or "image". + entity (Optional[str]): Entity to create the job under. + project (Optional[str]): Project to create the job under. + name (Optional[str]): Name of the job. + description (Optional[str]): Description of the job. + aliases (Optional[List[str]]): Aliases for the job. + runtime (Optional[str]): Python runtime of the job, like 3.9. + entrypoint (Optional[str]): Entrypoint of the job. If build_context is + provided, path is relative to build_context. + git_hash (Optional[str]): Git hash of a specific commit, when using git type jobs. + build_context (Optional[str]): Path to the build context, when using image type jobs. + dockerfile (Optional[str]): Path to the Dockerfile, when using image type jobs. + If build_context is provided, path is relative to build_context. + + Returns: + Optional[Artifact]: The artifact created by the job, the action (for printing), and job aliases. + None if job creation failed. + + Example: + ```python + artifact_job = wandb.create_job( + job_type="code", + path=".", + entity="wandb", + project="jobs", + name="my-train-job", + description="My training job", + aliases=["train"], + runtime="3.9", + entrypoint="train.py", + ) + # then run the newly created job + artifact_job.call() + ``` + """ + api = Api() + + artifact_job, _action, _aliases = _create_job( + api, + job_type, + path, + entity, + project, + name, + description, + aliases, + runtime, + entrypoint, + git_hash, + build_context, + dockerfile, + ) + + return artifact_job + + +def _create_job( + api: Api, + job_type: str, + path: str, + entity: Optional[str] = None, + project: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + aliases: Optional[List[str]] = None, + runtime: Optional[str] = None, + entrypoint: Optional[str] = None, + git_hash: Optional[str] = None, + build_context: Optional[str] = None, + dockerfile: Optional[str] = None, + base_image: Optional[str] = None, +) -> Tuple[Optional[Artifact], str, List[str]]: + wandb.termlog(f"Creating launch job of type: {job_type}...") + + if name and name != make_artifact_name_safe(name): + wandb.termerror( + f"Artifact names may only contain alphanumeric characters, dashes, underscores, and dots. Did you mean: {make_artifact_name_safe(name)}" + ) + return None, "", [] + + if runtime is not None: + if not re.match(r"^3\.\d+$", runtime): + wandb.termerror( + f"Runtime (-r, --runtime) must be a minor version of Python 3, " + f"e.g. 3.9 or 3.10, received {runtime}" + ) + return None, "", [] + aliases = aliases or [] + tempdir = tempfile.TemporaryDirectory() + try: + metadata, requirements = _make_metadata_for_partial_job( + job_type=job_type, + tempdir=tempdir, + git_hash=git_hash, + runtime=runtime, + path=path, + entrypoint=entrypoint, + ) + if not metadata: + return None, "", [] + except Exception as e: + wandb.termerror(f"Error creating job: {e}") + return None, "", [] + + _dump_metadata_and_requirements( + metadata=metadata, + tmp_path=tempdir.name, + requirements=requirements, + ) + + try: + # init hidden wandb run with job building disabled (handled manually) + run = wandb.init( + dir=tempdir.name, + settings={"silent": True, "disable_job_creation": True}, + entity=entity, + project=project, + job_type="cli_create_job", + ) + except Exception: + # Error printed by wandb.init + return None, "", [] + + job_builder = _configure_job_builder_for_partial(tempdir.name, job_source=job_type) + if job_type == "code": + assert entrypoint is not None + job_name = _make_code_artifact( + api=api, + job_builder=job_builder, + path=path, + entrypoint=entrypoint, + run=run, # type: ignore + entity=entity, + project=project, + name=name, + ) + if not job_name: + return None, "", [] + name = job_name + + # build job artifact, loads wandb-metadata and creates wandb-job.json here + artifact = job_builder.build( + api.api, + dockerfile=dockerfile, + build_context=build_context, + base_image=base_image, + ) + if not artifact: + wandb.termerror("JobBuilder failed to build a job") + _logger.debug("Failed to build job, check job source and metadata") + return None, "", [] + + if not name: + name = artifact.name + + aliases += job_builder._aliases + if "latest" not in aliases: + aliases += ["latest"] + + res, _ = api.create_artifact( + artifact_type_name="job", + artifact_collection_name=name, + digest=artifact.digest, + client_id=artifact._client_id, + sequence_client_id=artifact._sequence_client_id, + entity_name=entity, + project_name=project, + run_name=run.id, # type: ignore # run will be deleted after creation + description=description, + metadata={"_partial": True}, + is_user_created=True, + aliases=[{"artifactCollectionName": name, "alias": a} for a in aliases], + ) + action = "No changes detected for" + if not res.get("artifactSequence", {}).get("latestArtifact"): + # When there is no latestArtifact, we are creating new + action = "Created" + elif res.get("state") == "PENDING": + # updating an existing artifafct, state is pending awaiting call to + # log_artifact to upload and finalize artifact. If not pending, digest + # is the same as latestArtifact, so no changes detected + action = "Updated" + + run.log_artifact(artifact, aliases=aliases) # type: ignore + artifact.wait() + run.finish() # type: ignore + + # fetch, then delete hidden run + _run = wandb.Api().run(f"{entity}/{project}/{run.id}") # type: ignore + _run.delete() + + return artifact, action, aliases + + +def _make_metadata_for_partial_job( + job_type: str, + tempdir: tempfile.TemporaryDirectory, + git_hash: Optional[str], + runtime: Optional[str], + path: str, + entrypoint: Optional[str], +) -> Tuple[Optional[Dict[str, Any]], Optional[List[str]]]: + """Create metadata for partial jobs, return metadata and requirements.""" + metadata = {} + if job_type == "git": + assert entrypoint is not None + repo_metadata = _create_repo_metadata( + path=path, + tempdir=tempdir.name, + entrypoint=entrypoint, + git_hash=git_hash, + runtime=runtime, + ) + if not repo_metadata: + tempdir.cleanup() # otherwise git can pollute + return None, None + metadata.update(repo_metadata) + return metadata, None + + if job_type == "code": + assert entrypoint is not None + artifact_metadata, requirements = _create_artifact_metadata( + path=path, entrypoint=entrypoint, runtime=runtime + ) + if not artifact_metadata: + return None, None + metadata.update(artifact_metadata) + return metadata, requirements + + if job_type == "image": + if runtime: + wandb.termwarn( + "Setting runtime is not supported for image jobs, ignoring runtime" + ) + # TODO(gst): support entrypoint for image based jobs + if entrypoint: + wandb.termwarn( + "Setting an entrypoint is not currently supported for image jobs, ignoring entrypoint argument" + ) + metadata.update({"python": runtime or "", "docker": path}) + return metadata, None + + wandb.termerror(f"Invalid job type: {job_type}") + return None, None + + +def _maybe_warn_python_no_executable(entrypoint: str): + entrypoint_list = entrypoint.split(" ") + if len(entrypoint_list) == 1 and entrypoint_list[0].endswith(".py"): + wandb.termwarn( + f"Entrypoint {entrypoint} is a python file without an executable, you may want to use `python {entrypoint}` as the entrypoint instead." + ) + + +def _create_repo_metadata( + path: str, + tempdir: str, + entrypoint: str, + git_hash: Optional[str] = None, + runtime: Optional[str] = None, +) -> Optional[Dict[str, Any]]: + # Make sure the entrypoint doesn't contain any backward path traversal + if entrypoint and ".." in entrypoint: + wandb.termerror("Entrypoint cannot contain backward path traversal") + return None + + _maybe_warn_python_no_executable(entrypoint) + + if not _is_git_uri(path): + wandb.termerror("Path must be a git URI") + return None + + ref = GitReference(path, git_hash) + if not ref: + wandb.termerror("Could not parse git URI") + return None + + ref.fetch(tempdir) + + commit = ref.commit_hash + if not commit: + if not ref.commit_hash: + wandb.termerror("Could not find git commit hash") + return None + commit = ref.commit_hash + + local_dir = os.path.join(tempdir, ref.path or "") + python_version = runtime + if not python_version: + if os.path.exists(os.path.join(local_dir, "runtime.txt")): + with open(os.path.join(local_dir, "runtime.txt")) as f: + python_version = f.read().strip() + elif os.path.exists(os.path.join(local_dir, ".python-version")): + with open(os.path.join(local_dir, ".python-version")) as f: + python_version = f.read().strip().splitlines()[0] + else: + python_version, _ = get_current_python_version() + + python_version = _clean_python_version(python_version) + + metadata = { + "git": { + "commit": commit, + "remote": ref.url, + }, + "entrypoint": entrypoint.split(" "), + "python": python_version, # used to build container + "notebook": False, # partial jobs from notebooks not supported + } + + return metadata + + +def _create_artifact_metadata( + path: str, entrypoint: str, runtime: Optional[str] = None +) -> Tuple[Optional[Dict[str, Any]], Optional[List[str]]]: + if not os.path.isdir(path): + wandb.termerror("Path must be a valid file or directory") + return {}, [] + + _maybe_warn_python_no_executable(entrypoint) + + entrypoint_list = entrypoint.split(" ") + entrypoint_file = get_entrypoint_file(entrypoint_list) + + # read local requirements.txt and dump to temp dir for builder + requirements = [] + depspath = os.path.join(path, "requirements.txt") + if os.path.exists(depspath): + with open(depspath) as f: + requirements = f.read().splitlines() + + if not any(["wandb" in r for r in requirements]): + wandb.termwarn("wandb is not present in requirements.txt.") + + if runtime: + python_version = _clean_python_version(runtime) + else: + python_version, _ = get_current_python_version() + python_version = _clean_python_version(python_version) + + metadata = { + "python": python_version, + "codePath": entrypoint_file, + "entrypoint": entrypoint_list, + } + return metadata, requirements + + +def _configure_job_builder_for_partial(tmpdir: str, job_source: str) -> JobBuilder: + """Configure job builder with temp dir and job source.""" + # adjust git source to repo + if job_source == "git": + job_source = "repo" + + # adjust code source to artifact + if job_source == "code": + job_source = "artifact" + + settings = wandb.Settings() + settings.update({"files_dir": tmpdir, "job_source": job_source}) + job_builder = JobBuilder( + settings=settings, # type: ignore + verbose=True, + ) + job_builder._partial = True + # never allow notebook runs + job_builder._is_notebook_run = False + # set run inputs and outputs to empty dicts + job_builder.set_config({}) + job_builder.set_summary({}) + return job_builder + + +def _make_code_artifact( + api: Api, + job_builder: JobBuilder, + run: "wandb.sdk.wandb_run.Run", + path: str, + entrypoint: str, + entity: Optional[str], + project: Optional[str], + name: Optional[str], +) -> Optional[str]: + """Helper for creating and logging code artifacts. + + Returns the name of the eventual job. + """ + entrypoint_list = entrypoint.split(" ") + # We no longer require the entrypoint to end in an existing file. But we + # need something to use as the default job artifact name. In the future we + # may require the user to provide a job name explicitly when calling + # wandb job create. + entrypoint_file = entrypoint_list[-1] + artifact_name = _make_code_artifact_name(os.path.join(path, entrypoint_file), name) + code_artifact = wandb.Artifact( + name=artifact_name, + type="code", + description="Code artifact for job", + ) + + try: + code_artifact.add_dir(path) + except Exception as e: + if os.path.islink(path): + wandb.termerror( + "Symlinks are not supported for code artifact jobs, please copy the code into a directory and try again" + ) + wandb.termerror(f"Error adding to code artifact: {e}") + return None + + # Remove paths we don't want to include, if present + for item in CODE_ARTIFACT_EXCLUDE_PATHS: + try: + code_artifact.remove(item) + except FileNotFoundError: + pass + + res, _ = api.create_artifact( + artifact_type_name="code", + artifact_collection_name=artifact_name, + digest=code_artifact.digest, + client_id=code_artifact._client_id, + sequence_client_id=code_artifact._sequence_client_id, + entity_name=entity, + project_name=project, + run_name=run.id, # run will be deleted after creation + description="Code artifact for job", + metadata={"codePath": path, "entrypoint": entrypoint_file}, + is_user_created=True, + aliases=[ + {"artifactCollectionName": artifact_name, "alias": a} for a in ["latest"] + ], + ) + run.log_artifact(code_artifact) + code_artifact.wait() + job_builder._handle_server_artifact(res, code_artifact) # type: ignore + + # code artifacts have "code" prefix, remove it and alias + if not name: + name = code_artifact.name.replace("code", "job").split(":")[0] + + return name + + +def _make_code_artifact_name(path: str, name: Optional[str]) -> str: + """Make a code artifact name from a path and user provided name.""" + if name: + return f"code-{name}" + + clean_path = path.replace("./", "") + if clean_path[0] == "/": + clean_path = clean_path[1:] + if clean_path[-1] == "/": + clean_path = clean_path[:-1] + + path_name = f"code-{make_artifact_name_safe(clean_path)}" + return path_name + + +def _dump_metadata_and_requirements( + tmp_path: str, metadata: Dict[str, Any], requirements: Optional[List[str]] +) -> None: + """Dump manufactured metadata and requirements.txt. + + File used by the job_builder to create a job from provided metadata. + """ + filesystem.mkdir_exists_ok(tmp_path) + with open(os.path.join(tmp_path, "wandb-metadata.json"), "w") as f: + json.dump(metadata, f) + + requirements = requirements or [] + with open(os.path.join(tmp_path, "requirements.txt"), "w") as f: + f.write("\n".join(requirements)) + + +def _clean_python_version(python_version: str) -> str: + # remove micro if present + if python_version.count(".") > 1: + python_version = ".".join(python_version.split(".")[:2]) + _logger.debug(f"micro python version stripped. Now: {python_version}") + return python_version diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/abstract.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/abstract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f2deda251d0b037e0710305393ae316b068b24e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/abstract.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/aws_environment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/aws_environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee032aae0910341f91de5af56b00b84f20fd066f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/aws_environment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/azure_environment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/azure_environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c59fa93c785abae0a3503ec78f0d44e2d6ac2b0e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/azure_environment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/gcp_environment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/gcp_environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48eabedbedbeafd1b50c8f93401d6e1a5d3ceee2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/gcp_environment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/local_environment.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/local_environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d26a009bef92954123a24452f2f458b86e62204b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/local_environment.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/abstract.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/abstract.py new file mode 100644 index 0000000000000000000000000000000000000000..a736ef0404018f0e640a6dad397de281a77895b2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/abstract.py @@ -0,0 +1,29 @@ +"""Abstract base class for environments.""" + +from abc import ABC, abstractmethod + + +class AbstractEnvironment(ABC): + """Abstract base class for environments.""" + + region: str + + @abstractmethod + async def verify(self) -> None: + """Verify that the environment is configured correctly.""" + raise NotImplementedError + + @abstractmethod + async def upload_file(self, source: str, destination: str) -> None: + """Upload a file from the local filesystem to storage in the environment.""" + raise NotImplementedError + + @abstractmethod + async def upload_dir(self, source: str, destination: str) -> None: + """Upload the contents of a directory from the local filesystem to the environment.""" + raise NotImplementedError + + @abstractmethod + async def verify_storage_uri(self, uri: str) -> None: + """Verify that the storage URI is configured correctly.""" + raise NotImplementedError diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/aws_environment.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/aws_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca54b6d1ac269916fcd98a7c0a4b6fa25465b45 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/aws_environment.py @@ -0,0 +1,322 @@ +"""Implements the AWS environment.""" + +import logging +import os +from typing import Dict, Optional + +from wandb.sdk.launch.errors import LaunchError +from wandb.util import get_module + +from ..utils import ARN_PARTITION_RE, S3_URI_RE, event_loop_thread_exec +from .abstract import AbstractEnvironment + +boto3 = get_module( + "boto3", + required="AWS environment requires boto3 to be installed. Please install " + "it with `pip install wandb[launch]`.", +) +botocore = get_module( + "botocore", + required="AWS environment requires botocore to be installed. Please install " + "it with `pip install wandb[launch]`.", +) + +_logger = logging.getLogger(__name__) + + +class AwsEnvironment(AbstractEnvironment): + """AWS environment.""" + + def __init__( + self, + region: str, + access_key: str, + secret_key: str, + session_token: str, + ) -> None: + """Initialize the AWS environment. + + Arguments: + region (str): The AWS region. + + Raises: + LaunchError: If the AWS environment is not configured correctly. + """ + super().__init__() + _logger.info(f"Initializing AWS environment in region {region}.") + self._region = region + self._access_key = access_key + self._secret_key = secret_key + self._session_token = session_token + self._account = None + self._partition = None + + @classmethod + def from_default(cls, region: Optional[str] = None) -> "AwsEnvironment": + """Create an AWS environment from the default AWS environment. + + Arguments: + region (str, optional): The AWS region. + verify (bool, optional): Whether to verify the AWS environment. Defaults to True. + + Returns: + AwsEnvironment: The AWS environment. + """ + _logger.info("Creating AWS environment from default credentials.") + try: + session = boto3.Session() + if hasattr(session, "region"): + region = region or session.region + region = region or os.environ.get("AWS_REGION") + credentials = session.get_credentials() + if not credentials: + raise LaunchError( + "Could not create AWS environment from default environment. Please verify that your AWS credentials are configured correctly." + ) + access_key = credentials.access_key + secret_key = credentials.secret_key + session_token = credentials.token + except botocore.client.ClientError as e: + raise LaunchError( + f"Could not create AWS environment from default environment. Please verify that your AWS credentials are configured correctly. {e}" + ) + if not region: + raise LaunchError( + "Could not create AWS environment from default environment. Region not specified." + ) + return cls( + region=region, + access_key=access_key, + secret_key=secret_key, + session_token=session_token, + ) + + @classmethod + def from_config( + cls, + config: Dict[str, str], + ) -> "AwsEnvironment": + """Create an AWS environment from the default AWS environment. + + Arguments: + config (dict): Configuration dictionary. + verify (bool, optional): Whether to verify the AWS environment. Defaults to True. + + Returns: + AwsEnvironment: The AWS environment. + """ + region = str(config.get("region", "")) + if not region: + raise LaunchError( + "Could not create AWS environment from config. Region not specified." + ) + return cls.from_default( + region=region, + ) + + @property + def region(self) -> str: + """The AWS region.""" + return self._region + + @region.setter + def region(self, region: str) -> None: + self._region = region + + async def get_partition(self) -> str: + """Set the partition for the AWS environment.""" + try: + session = await self.get_session() + client = await event_loop_thread_exec(session.client)("sts") + get_caller_identity = event_loop_thread_exec(client.get_caller_identity) + identity = await get_caller_identity() + arn = identity.get("Arn") + if not arn: + raise LaunchError( + "Could not set partition for AWS environment. ARN not found." + ) + matched_partition = ARN_PARTITION_RE.match(arn) + if not matched_partition: + raise LaunchError( + f"Could not set partition for AWS environment. ARN {arn} is not valid." + ) + partition = matched_partition.group(1) + return partition + except botocore.exceptions.ClientError as e: + raise LaunchError( + f"Could not set partition for AWS environment. {e}" + ) from e + + async def verify(self) -> None: + """Verify that the AWS environment is configured correctly. + + Raises: + LaunchError: If the AWS environment is not configured correctly. + """ + _logger.debug("Verifying AWS environment.") + try: + session = await self.get_session() + client = await event_loop_thread_exec(session.client)("sts") + get_caller_identity = event_loop_thread_exec(client.get_caller_identity) + self._account = (await get_caller_identity()).get("Account") + # TODO: log identity details from the response + except botocore.exceptions.ClientError as e: + raise LaunchError( + f"Could not verify AWS environment. Please verify that your AWS credentials are configured correctly. {e}" + ) from e + + async def get_session(self) -> "boto3.Session": # type: ignore + """Get an AWS session. + + Returns: + boto3.Session: The AWS session. + + Raises: + LaunchError: If the AWS session could not be created. + """ + _logger.debug(f"Creating AWS session in region {self._region}") + try: + session = event_loop_thread_exec(boto3.Session) + return await session( + region_name=self._region, + aws_access_key_id=self._access_key, + aws_secret_access_key=self._secret_key, + aws_session_token=self._session_token, + ) + except botocore.exceptions.ClientError as e: + raise LaunchError(f"Could not create AWS session. {e}") + + async def upload_file(self, source: str, destination: str) -> None: + """Upload a file to s3 from local storage. + + The destination is a valid s3 URI, e.g. s3://bucket/key and will + be used as a prefix for the uploaded file. Only the filename of the source + is kept in the upload key. So if the source is "foo/bar" and the + destination is "s3://bucket/key", the file "foo/bar" will be uploaded + to "s3://bucket/key/bar". + + Arguments: + source (str): The path to the file or directory. + destination (str): The uri of the storage destination. This should + be a valid s3 URI, e.g. s3://bucket/key. + + Raises: + LaunchError: If the copy fails, the source path does not exist, or the + destination is not a valid s3 URI, or the upload fails. + """ + _logger.debug(f"Uploading {source} to {destination}") + _err_prefix = f"Error attempting to copy {source} to {destination}." + if not os.path.isfile(source): + raise LaunchError(f"{_err_prefix}: Source {source} does not exist.") + match = S3_URI_RE.match(destination) + if not match: + raise LaunchError( + f"{_err_prefix}: Destination {destination} is not a valid s3 URI." + ) + bucket = match.group(1) + key = match.group(2).lstrip("/") + if not key: + key = "" + session = await self.get_session() + try: + client = await event_loop_thread_exec(session.client)("s3") + client.upload_file(source, bucket, key) + except botocore.exceptions.ClientError as e: + raise LaunchError( + f"{_err_prefix}: botocore error attempting to copy {source} to {destination}. {e}" + ) + + async def upload_dir(self, source: str, destination: str) -> None: + """Upload a directory to s3 from local storage. + + The upload will place the contents of the source directory in the destination + with the same directory structure. So if the source is "foo/bar" and the + destination is "s3://bucket/key", the contents of "foo/bar" will be uploaded + to "s3://bucket/key/bar". + + Arguments: + source (str): The path to the file or directory. + destination (str): The URI of the storage. + recursive (bool, optional): If True, copy the directory recursively. Defaults to False. + + Raises: + LaunchError: If the copy fails, the source path does not exist, or the + destination is not a valid s3 URI. + """ + _logger.debug(f"Uploading {source} to {destination}") + _err_prefix = f"Error attempting to copy {source} to {destination}." + if not os.path.isdir(source): + raise LaunchError(f"{_err_prefix}: Source {source} does not exist.") + match = S3_URI_RE.match(destination) + if not match: + raise LaunchError( + f"{_err_prefix}: Destination {destination} is not a valid s3 URI." + ) + bucket = match.group(1) + key = match.group(2).lstrip("/") + if not key: + key = "" + session = await self.get_session() + try: + client = await event_loop_thread_exec(session.client)("s3") + for path, _, files in os.walk(source): + for file in files: + abs_path = os.path.join(path, file) + key_path = ( + abs_path.replace(source, "").replace("\\", "/").lstrip("/") + ) + client.upload_file( + abs_path, + bucket, + key_path, + ) + except botocore.exceptions.ClientError as e: + raise LaunchError( + f"{_err_prefix}: botocore error attempting to copy {source} to {destination}. {e}" + ) from e + except Exception as e: + raise LaunchError( + f"{_err_prefix}: Unexpected error attempting to copy {source} to {destination}. {e}" + ) from e + + async def verify_storage_uri(self, uri: str) -> None: + """Verify that s3 storage is configured correctly. + + This will check that the bucket exists and that the credentials are + configured correctly. + + Arguments: + uri (str): The URI of the storage. + + Raises: + LaunchError: If the storage is not configured correctly or the URI is + not a valid s3 URI. + + Returns: + None + """ + _logger.debug(f"Verifying storage {uri}") + match = S3_URI_RE.match(uri) + if not match: + raise LaunchError( + f"Failed to validate storage uri: {uri} is not a valid s3 URI." + ) + bucket = match.group(1) + try: + session = await self.get_session() + client = await event_loop_thread_exec(session.client)("s3") + client.head_bucket(Bucket=bucket) + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "404": + raise LaunchError( + f"Could not verify AWS storage uri {uri}. Bucket {bucket} does not exist." + ) + if e.response["Error"]["Code"] == "403": + raise LaunchError( + f"Could not verify AWS storage uri {uri}. " + "Bucket {bucket} is not accessible. Please check that this " + "client is authenticated with permission to access the bucket." + ) + raise LaunchError( + f"Failed to verify AWS storage uri {uri}. Response: {e.response} Please verify that your AWS credentials are configured correctly." + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/azure_environment.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/azure_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..2dbfebbe14d499302b4c22dd2321cdfb4ff8df63 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/azure_environment.py @@ -0,0 +1,105 @@ +"""Implementation of AzureEnvironment class.""" + +from typing import Tuple + +from azure.core.exceptions import HttpResponseError # type: ignore +from azure.identity import DefaultAzureCredential # type: ignore +from azure.storage.blob import BlobClient, BlobServiceClient # type: ignore + +from ..errors import LaunchError +from ..utils import AZURE_BLOB_REGEX +from .abstract import AbstractEnvironment + + +class AzureEnvironment(AbstractEnvironment): + """AzureEnvironment is a helper for accessing Azure resources.""" + + def __init__( + self, + ) -> None: + """Initialize an AzureEnvironment.""" + + @classmethod + def from_config(cls, config: dict, verify: bool = True) -> "AzureEnvironment": + """Create an AzureEnvironment from a config dict.""" + return cls() + + @classmethod + def get_credentials(cls) -> DefaultAzureCredential: + """Get Azure credentials.""" + try: + return DefaultAzureCredential() + except Exception as e: + raise LaunchError( + f"Could not get Azure credentials. Please make sure you have " + f"configured your Azure CLI correctly.\n{e}" + ) from e + + async def upload_file(self, source: str, destination: str) -> None: + """Upload a file to Azure blob storage. + + Arguments: + source (str): The path to the file to upload. + destination (str): The destination path in Azure blob storage. Ex: + https://.blob.core.windows.net// + Raise: + LaunchError: If the file could not be uploaded. + """ + storage_account, storage_container, path = self.parse_uri(destination) + _err_prefix = f"Could not upload file {source} to Azure blob {destination}" + creds = self.get_credentials() + try: + client = BlobClient( + f"https://{storage_account}.blob.core.windows.net", + storage_container, + path, + credential=creds, + ) + with open(source, "rb") as f: + client.upload_blob(f, overwrite=True) + except HttpResponseError as e: + raise LaunchError(f"{_err_prefix}: {e.message}") from e + except Exception as e: + raise LaunchError(f"{_err_prefix}: {e.__class__.__name__}: {e}") from e + + async def upload_dir(self, source: str, destination: str) -> None: + """Upload a directory to Azure blob storage.""" + raise NotImplementedError() + + async def verify_storage_uri(self, uri: str) -> None: + """Verify that the given blob storage prefix exists. + + Args: + uri (str): The URI to verify. + """ + creds = self.get_credentials() + storage_account, storage_container, _ = self.parse_uri(uri) + try: + client = BlobServiceClient( + f"https://{storage_account}.blob.core.windows.net", + credential=creds, + ) + client.get_container_client(storage_container) + except Exception as e: + raise LaunchError( + f"Could not verify storage URI {uri} in container {storage_container}." + ) from e + + async def verify(self) -> None: + """Verify that the AzureEnvironment is valid.""" + self.get_credentials() + + @staticmethod + def parse_uri(uri: str) -> Tuple[str, str, str]: + """Parse an Azure blob storage URI into a storage account and container. + + Args: + uri (str): The URI to parse. + + Returns: + Tuple[str, str, prefix]: The storage account, container, and path. + """ + match = AZURE_BLOB_REGEX.match(uri) + if match is None: + raise LaunchError(f"Could not parse Azure blob URI {uri}.") + return match.group(1), match.group(2), match.group(3) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/gcp_environment.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/gcp_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..fa15211160dd8716722f99c45549302fe2807feb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/gcp_environment.py @@ -0,0 +1,335 @@ +"""Implementation of the GCP environment for wandb launch.""" + +import logging +import os +import subprocess +from typing import Optional + +from wandb.sdk.launch.errors import LaunchError +from wandb.util import get_module + +from ..utils import GCS_URI_RE, event_loop_thread_exec +from .abstract import AbstractEnvironment + +google = get_module( + "google", + required="Google Cloud Platform support requires the google package. Please" + " install it with `pip install wandb[launch]`.", +) +google.cloud.compute_v1 = get_module( + "google.cloud.compute_v1", + required="Google Cloud Platform support requires the google-cloud-compute package. " + "Please install it with `pip install wandb[launch]`.", +) +google.auth.credentials = get_module( + "google.auth.credentials", + required="Google Cloud Platform support requires google-auth. " + "Please install it with `pip install wandb[launch]`.", +) +google.auth.transport.requests = get_module( + "google.auth.transport.requests", + required="Google Cloud Platform support requires google-auth. " + "Please install it with `pip install wandb[launch]`.", +) +google.api_core.exceptions = get_module( + "google.api_core.exceptions", + required="Google Cloud Platform support requires google-api-core. " + "Please install it with `pip install wandb[launch]`.", +) +google.cloud.storage = get_module( + "google.cloud.storage", + required="Google Cloud Platform support requires google-cloud-storage. " + "Please install it with `pip install wandb[launch].", +) + + +_logger = logging.getLogger(__name__) + +GCP_REGION_ENV_VAR = "GOOGLE_CLOUD_REGION" + + +class GcpEnvironment(AbstractEnvironment): + """GCP Environment. + + Attributes: + region: The GCP region. + """ + + region: str + + def __init__( + self, + region: str, + ) -> None: + """Initialize the GCP environment. + + Arguments: + region: The GCP region. + verify: Whether to verify the credentials, region, and project. + + Raises: + LaunchError: If verify is True and the environment is not properly + configured. + """ + super().__init__() + _logger.info(f"Initializing GcpEnvironment in region {region}") + self.region: str = region + self._project = "" + + @classmethod + def from_config(cls, config: dict) -> "GcpEnvironment": + """Create a GcpEnvironment from a config dictionary. + + Arguments: + config: The config dictionary. + + Returns: + GcpEnvironment: The GcpEnvironment. + """ + if config.get("type") != "gcp": + raise LaunchError( + f"Could not create GcpEnvironment from config. Expected type 'gcp' " + f"but got '{config.get('type')}'." + ) + region = config.get("region", None) + if not region: + raise LaunchError( + "Could not create GcpEnvironment from config. Missing 'region' " + "field." + ) + return cls(region=region) + + @classmethod + def from_default( + cls, + ) -> "GcpEnvironment": + """Create a GcpEnvironment from the default configuration. + + Returns: + GcpEnvironment: The GcpEnvironment. + """ + region = get_default_region() + if region is None: + raise LaunchError( + "Could not create GcpEnvironment from user's gcloud configuration. " + "Please set the default region with `gcloud config set compute/region` " + "or set the environment variable {GCP_REGION_ENV_VAR}. " + "Alternatively, you may specify the region explicitly in your " + "wandb launch configuration at `$HOME/.config/wandb/launch-config.yaml`. " + "See https://docs.wandb.ai/guides/launch/run-agent#environments for more information." + ) + return cls(region=region) + + @property + def project(self) -> str: + """Get the name of the gcp project associated with the credentials. + + Returns: + str: The name of the gcp project. + + Raises: + LaunchError: If the launch environment cannot be verified. + """ + return self._project + + async def get_credentials(self) -> google.auth.credentials.Credentials: # type: ignore + """Get the GCP credentials. + + Uses google.auth.default() to get the credentials. If the credentials + are invalid, this method will refresh them. If the credentials are + still invalid after refreshing, this method will raise an error. + + Returns: + google.auth.credentials.Credentials: The GCP credentials. + + Raises: + LaunchError: If the GCP credentials are invalid. + """ + _logger.debug("Getting GCP credentials") + # TODO: Figure out a minimal set of scopes. + try: + google_auth_default = event_loop_thread_exec(google.auth.default) + creds, project = await google_auth_default() + if not self._project: + self._project = project + _logger.debug("Refreshing GCP credentials") + await event_loop_thread_exec(creds.refresh)( + google.auth.transport.requests.Request() + ) + except google.auth.exceptions.DefaultCredentialsError as e: + raise LaunchError( + "No Google Cloud Platform credentials found. Please run " + "`gcloud auth application-default login` or set the environment " + "variable GOOGLE_APPLICATION_CREDENTIALS to the path of a valid " + "service account key file." + ) from e + except google.auth.exceptions.RefreshError as e: + raise LaunchError( + "Could not refresh Google Cloud Platform credentials. Please run " + "`gcloud auth application-default login` or set the environment " + "variable GOOGLE_APPLICATION_CREDENTIALS to the path of a valid " + "service account key file." + ) from e + if not creds.valid: + raise LaunchError( + "Invalid Google Cloud Platform credentials. Please run " + "`gcloud auth application-default login` or set the environment " + "variable GOOGLE_APPLICATION_CREDENTIALS to the path of a valid " + "service account key file." + ) + return creds + + async def verify(self) -> None: + """Verify the credentials, region, and project. + + Credentials and region are verified by calling get_credentials(). The + region and is verified by calling the compute API. + + Raises: + LaunchError: If the credentials, region, or project are invalid. + + Returns: + None + """ + _logger.debug("Verifying GCP environment") + await self.get_credentials() + + async def verify_storage_uri(self, uri: str) -> None: + """Verify that a storage URI is valid. + + Arguments: + uri: The storage URI. + + Raises: + LaunchError: If the storage URI is invalid. + """ + match = GCS_URI_RE.match(uri) + if not match: + raise LaunchError(f"Invalid GCS URI: {uri}") + bucket = match.group(1) + cloud_storage_client = event_loop_thread_exec(google.cloud.storage.Client) + try: + credentials = await self.get_credentials() + storage_client = await cloud_storage_client(credentials=credentials) + bucket = await event_loop_thread_exec(storage_client.get_bucket)(bucket) + except google.api_core.exceptions.GoogleAPICallError as e: + raise LaunchError( + f"Failed verifying storage uri {uri}: bucket {bucket} does not exist." + ) from e + except google.api_core.exceptions.Forbidden as e: + raise LaunchError( + f"Failed verifying storage uri {uri}: bucket {bucket} is not accessible. Please check your permissions and try again." + ) from e + + async def upload_file(self, source: str, destination: str) -> None: + """Upload a file to GCS. + + Arguments: + source: The path to the local file. + destination: The path to the GCS file. + + Raises: + LaunchError: If the file cannot be uploaded. + """ + _logger.debug(f"Uploading file {source} to {destination}") + _err_prefix = f"Could not upload file {source} to GCS destination {destination}" + if not os.path.isfile(source): + raise LaunchError(f"{_err_prefix}: File {source} does not exist.") + match = GCS_URI_RE.match(destination) + if not match: + raise LaunchError(f"{_err_prefix}: Invalid GCS URI: {destination}") + bucket = match.group(1) + key = match.group(2).lstrip("/") + google_storage_client = event_loop_thread_exec(google.cloud.storage.Client) + credentials = await self.get_credentials() + try: + storage_client = await google_storage_client(credentials=credentials) + bucket = await event_loop_thread_exec(storage_client.bucket)(bucket) + blob = await event_loop_thread_exec(bucket.blob)(key) + await event_loop_thread_exec(blob.upload_from_filename)(source) + except google.api_core.exceptions.GoogleAPICallError as e: + resp = e.response + assert resp is not None + try: + message = resp.json()["error"]["message"] + except Exception: + message = str(resp) + raise LaunchError(f"{_err_prefix}: {message}") from e + + async def upload_dir(self, source: str, destination: str) -> None: + """Upload a directory to GCS. + + Arguments: + source: The path to the local directory. + destination: The path to the GCS directory. + + Raises: + LaunchError: If the directory cannot be uploaded. + """ + _logger.debug(f"Uploading directory {source} to {destination}") + _err_prefix = ( + f"Could not upload directory {source} to GCS destination {destination}" + ) + if not os.path.isdir(source): + raise LaunchError(f"{_err_prefix}: Directory {source} does not exist.") + match = GCS_URI_RE.match(destination) + if not match: + raise LaunchError(f"{_err_prefix}: Invalid GCS URI: {destination}") + bucket = match.group(1) + key = match.group(2).lstrip("/") + google_storage_client = event_loop_thread_exec(google.cloud.storage.Client) + credentials = await self.get_credentials() + try: + storage_client = await google_storage_client(credentials=credentials) + bucket = await event_loop_thread_exec(storage_client.bucket)(bucket) + for root, _, files in os.walk(source): + for file in files: + local_path = os.path.join(root, file) + gcs_path = os.path.join( + key, os.path.relpath(local_path, source) + ).replace("\\", "/") + blob = await event_loop_thread_exec(bucket.blob)(gcs_path) + await event_loop_thread_exec(blob.upload_from_filename)(local_path) + except google.api_core.exceptions.GoogleAPICallError as e: + resp = e.response + assert resp is not None + try: + message = resp.json()["error"]["message"] + except Exception: + message = str(resp) + raise LaunchError(f"{_err_prefix}: {message}") from e + except Exception as e: + raise LaunchError(f"{_err_prefix}: GCS upload failed: {e}") from e + + +def get_gcloud_config_value(config_name: str) -> Optional[str]: + """Get a value from gcloud config. + + Arguments: + config_name: The name of the config value. + + Returns: + str: The config value, or None if the value is not set. + """ + try: + output = subprocess.check_output( + ["gcloud", "config", "get-value", config_name], stderr=subprocess.STDOUT + ) + value = str(output.decode("utf-8").strip()) + if value and "unset" not in value: + return value + return None + except subprocess.CalledProcessError: + return None + + +def get_default_region() -> Optional[str]: + """Get the default region from gcloud config or environment variables. + + Returns: + str: The default region, or None if it cannot be determined. + """ + region = get_gcloud_config_value("compute/region") + if not region: + region = os.environ.get(GCP_REGION_ENV_VAR) + return region diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/local_environment.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/local_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..6022338631781a6c74af73a537689b226b5ba0e7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/local_environment.py @@ -0,0 +1,66 @@ +"""Dummy local environment implementation. This is the default environment.""" + +from typing import Any, Dict, Union + +from wandb.sdk.launch.errors import LaunchError + +from .abstract import AbstractEnvironment + + +class LocalEnvironment(AbstractEnvironment): + """Local environment class.""" + + def __init__(self) -> None: + """Initialize a local environment by doing nothing.""" + pass + + @classmethod + def from_config( + cls, config: Dict[str, Union[Dict[str, Any], str]] + ) -> "LocalEnvironment": + """Create a local environment from a config. + + Arguments: + config (dict): The config. This is ignored. + + Returns: + LocalEnvironment: The local environment. + """ + return cls() + + async def verify(self) -> None: + """Verify that the local environment is configured correctly.""" + raise LaunchError("Attempted to verify LocalEnvironment.") + + async def verify_storage_uri(self, uri: str) -> None: + """Verify that the storage URI is configured correctly. + + Arguments: + uri (str): The storage URI. This is ignored. + """ + raise LaunchError("Attempted to verify storage uri for LocalEnvironment.") + + async def upload_file(self, source: str, destination: str) -> None: + """Upload a file from the local filesystem to storage in the environment. + + Arguments: + source (str): The source file. This is ignored. + destination (str): The destination file. This is ignored. + """ + raise LaunchError("Attempted to upload file for LocalEnvironment.") + + async def upload_dir(self, source: str, destination: str) -> None: + """Upload the contents of a directory from the local filesystem to the environment. + + Arguments: + source (str): The source directory. This is ignored. + destination (str): The destination directory. This is ignored. + """ + raise LaunchError("Attempted to upload directory for LocalEnvironment.") + + async def get_project(self) -> str: + """Get the project of the local environment. + + Returns: An empty string. + """ + raise LaunchError("Attempted to get project for LocalEnvironment.") diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/errors.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..36e98600a102472f4cd998d8fd8170182279ee1f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/errors.py @@ -0,0 +1,19 @@ +from wandb.errors import Error + + +class LaunchError(Error): + """Raised when a known error occurs in wandb launch.""" + + pass + + +class LaunchDockerError(Error): + """Raised when Docker daemon is not running.""" + + pass + + +class ExecutionError(Error): + """Generic execution exception.""" + + pass diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/git_reference.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/git_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..4eeac74e09f00fe32eb65f41baab0f69519c4842 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/git_reference.py @@ -0,0 +1,109 @@ +"""Support for parsing GitHub URLs (which might be user provided) into constituent parts.""" + +import re +from dataclasses import dataclass +from enum import IntEnum +from typing import Optional, Tuple, Union + +from wandb.sdk.launch.errors import LaunchError + +PREFIX_HTTPS = "https://" +PREFIX_SSH = "git@" +SUFFIX_GIT = ".git" + + +GIT_COMMIT_REGEX = re.compile(r"[0-9a-f]{40}") + + +class ReferenceType(IntEnum): + BRANCH = 1 + COMMIT = 2 + + +def _parse_netloc(netloc: str) -> Tuple[Optional[str], Optional[str], str]: + """Parse netloc into username, password, and host. + + github.com => None, None, "@github.com" + username@github.com => "username", None, "github.com" + username:password@github.com => "username", "password", "github.com" + """ + parts = netloc.split("@", 1) + if len(parts) == 1: + return None, None, parts[0] + auth, host = parts + parts = auth.split(":", 1) + if len(parts) == 1: + return parts[0], None, host + return parts[0], parts[1], host + + +@dataclass +class GitReference: + def __init__(self, remote: str, ref: Optional[str] = None) -> None: + """Initialize a reference from a remote and ref. + + Arguments: + remote: A remote URL or URI. + ref: A branch, tag, or commit hash. + """ + self.uri = remote + self.ref = ref + + @property + def url(self) -> Optional[str]: + return self.uri + + def fetch(self, dst_dir: str) -> None: + """Fetch the repo into dst_dir and refine githubref based on what we learn.""" + # We defer importing git until the last moment, because the import requires that the git + # executable is available on the PATH, so we only want to fail if we actually need it. + import git # type: ignore + + repo = git.Repo.init(dst_dir) + self.path = repo.working_dir + origin = repo.create_remote("origin", self.uri or "") + + try: + # We fetch the origin so that we have branch and tag references + origin.fetch() + except git.exc.GitCommandError as e: + raise LaunchError( + f"Unable to fetch from git remote repository {self.url}:\n{e}" + ) + + ref: Union[git.RemoteReference, str] + if self.ref: + if self.ref in origin.refs: + ref = origin.refs[self.ref] + else: + ref = self.ref + head = repo.create_head(self.ref, ref) + head.checkout() + self.commit_hash = head.commit.hexsha + + else: + # TODO: Is there a better way to do this? + default_branch = None + for ref in repo.references: + if hasattr(ref, "tag"): # Skip tag references + continue + refname = ref.name + if refname.startswith("origin/"): # Trim off "origin/" + refname = refname[7:] + if refname == "main": + default_branch = "main" + break + if refname == "master": + default_branch = "master" + # Keep looking in case we also have a main, which we let take precedence + # (While the references appear to be sorted, not clear if that's guaranteed.) + if not default_branch: + raise LaunchError( + f"Unable to determine branch or commit to checkout from {self.url}" + ) + self.default_branch = default_branch + self.ref = default_branch + head = repo.create_head(default_branch, origin.refs[default_branch]) + head.checkout() + self.commit_hash = head.commit.hexsha + repo.submodule_update(init=True, recursive=True) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/loader.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..d8015a25d233688ab2e2e1634d0bed621121235d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/loader.py @@ -0,0 +1,249 @@ +"""Utilities for the agent.""" + +from typing import Any, Dict, Optional + +import wandb +from wandb.apis.internal import Api +from wandb.docker import is_docker_installed +from wandb.sdk.launch.errors import LaunchError + +from .builder.abstract import AbstractBuilder +from .environment.abstract import AbstractEnvironment +from .registry.abstract import AbstractRegistry +from .runner.abstract import AbstractRunner + +WANDB_RUNNERS = { + "local-container", + "local-process", + "kubernetes", + "vertex", + "sagemaker", +} + + +def environment_from_config(config: Optional[Dict[str, Any]]) -> AbstractEnvironment: + """Create an environment from a config. + + This helper function is used to create an environment from a config. The + config should have a "type" key that specifies the type of environment to + create. The remaining keys are passed to the environment's from_config + method. If the config is None or empty, a LocalEnvironment is returned. + + Arguments: + config (Dict[str, Any]): The config. + + Returns: + Environment: The environment constructed. + """ + if not config: + from .environment.local_environment import LocalEnvironment + + return LocalEnvironment() # This is the default, dummy environment. + env_type = config.get("type") + if not env_type: + raise LaunchError( + "Could not create environment from config. Environment type not specified!" + ) + if env_type == "local": + from .environment.local_environment import LocalEnvironment + + return LocalEnvironment.from_config(config) + if env_type == "aws": + from .environment.aws_environment import AwsEnvironment + + return AwsEnvironment.from_config(config) + if env_type == "gcp": + from .environment.gcp_environment import GcpEnvironment + + return GcpEnvironment.from_config(config) + if env_type == "azure": + from .environment.azure_environment import AzureEnvironment + + return AzureEnvironment.from_config(config) + raise LaunchError( + f"Could not create environment from config. Invalid type: {env_type}" + ) + + +def registry_from_config( + config: Optional[Dict[str, Any]], environment: AbstractEnvironment +) -> AbstractRegistry: + """Create a registry from a config. + + This helper function is used to create a registry from a config. The + config should have a "type" key that specifies the type of registry to + create. The remaining keys are passed to the registry's from_config + method. If the config is None or empty, a LocalRegistry is returned. + + Arguments: + config (Dict[str, Any]): The registry config. + environment (Environment): The environment of the registry. + + Returns: + The registry if config is not None, otherwise None. + + Raises: + LaunchError: If the registry is not configured correctly. + """ + if not config: + from .registry.local_registry import LocalRegistry + + return LocalRegistry() # This is the default, dummy registry. + + wandb.termwarn( + "The `registry` block of the launch agent config is being deprecated. " + "Please specify an image repository URI under the `builder.destination` " + "key of your launch agent config. See " + "https://docs.wandb.ai/guides/launch/setup-agent-advanced#agent-configuration " + "for more information." + ) + + registry_type = config.get("type") + if registry_type is None or registry_type == "local": + from .registry.local_registry import LocalRegistry + + return LocalRegistry() # This is the default, dummy registry. + if registry_type == "ecr": + from .registry.elastic_container_registry import ElasticContainerRegistry + + return ElasticContainerRegistry.from_config(config) + if registry_type == "gcr": + from .registry.google_artifact_registry import GoogleArtifactRegistry + + return GoogleArtifactRegistry.from_config(config) + if registry_type == "acr": + from .registry.azure_container_registry import AzureContainerRegistry + + return AzureContainerRegistry.from_config(config) + raise LaunchError( + f"Could not create registry from config. Invalid registry type: {registry_type}" + ) + + +def builder_from_config( + config: Optional[Dict[str, Any]], + environment: AbstractEnvironment, + registry: AbstractRegistry, +) -> AbstractBuilder: + """Create a builder from a config. + + This helper function is used to create a builder from a config. The + config should have a "type" key that specifies the type of builder to import + and create. The remaining keys are passed to the builder's from_config + method. If the config is None or empty, a default builder is returned. + + The default builder will be a DockerBuilder if we find a working docker cli + on the system, otherwise it will be a NoOpBuilder. + + Arguments: + config (Dict[str, Any]): The builder config. + registry (Registry): The registry of the builder. + + Returns: + The builder. + + Raises: + LaunchError: If the builder is not configured correctly. + """ + if not config: + if is_docker_installed(): + from .builder.docker_builder import DockerBuilder + + return DockerBuilder.from_config( + {}, environment, registry + ) # This is the default builder. + + from .builder.noop import NoOpBuilder + + return NoOpBuilder.from_config({}, environment, registry) + + builder_type = config.get("type") + if builder_type is None: + raise LaunchError( + "Could not create builder from config. Builder type not specified" + ) + if builder_type == "docker": + from .builder.docker_builder import DockerBuilder + + return DockerBuilder.from_config(config, environment, registry) + if builder_type == "kaniko": + from .builder.kaniko_builder import KanikoBuilder + + return KanikoBuilder.from_config(config, environment, registry) + if builder_type == "noop": + from .builder.noop import NoOpBuilder + + return NoOpBuilder.from_config(config, environment, registry) + raise LaunchError( + f"Could not create builder from config. Invalid builder type: {builder_type}" + ) + + +def runner_from_config( + runner_name: str, + api: Api, + runner_config: Dict[str, Any], + environment: AbstractEnvironment, + registry: AbstractRegistry, +) -> AbstractRunner: + """Create a runner from a config. + + This helper function is used to create a runner from a config. The + config should have a "type" key that specifies the type of runner to import + and create. The remaining keys are passed to the runner's from_config + method. If the config is None or empty, a LocalContainerRunner is returned. + + Arguments: + runner_name (str): The name of the backend. + api (Api): The API. + runner_config (Dict[str, Any]): The backend config. + + Returns: + The runner. + + Raises: + LaunchError: If the runner is not configured correctly. + """ + if not runner_name or runner_name in ["local-container", "local"]: + from .runner.local_container import LocalContainerRunner + + return LocalContainerRunner(api, runner_config, environment, registry) + if runner_name == "local-process": + from .runner.local_process import LocalProcessRunner + + return LocalProcessRunner(api, runner_config) + if runner_name == "sagemaker": + from .environment.aws_environment import AwsEnvironment + + if not isinstance(environment, AwsEnvironment): + try: + environment = AwsEnvironment.from_default() + except LaunchError as e: + raise LaunchError( + "Could not create Sagemaker runner. " + "Environment must be an instance of AwsEnvironment." + ) from e + from .runner.sagemaker_runner import SageMakerRunner + + return SageMakerRunner(api, runner_config, environment, registry) + if runner_name in ["vertex", "gcp-vertex"]: + from .environment.gcp_environment import GcpEnvironment + + if not isinstance(environment, GcpEnvironment): + try: + environment = GcpEnvironment.from_default() + except LaunchError as e: + raise LaunchError( + "Could not create Vertex runner. " + "Environment must be an instance of GcpEnvironment." + ) from e + from .runner.vertex_runner import VertexRunner + + return VertexRunner(api, runner_config, environment, registry) + if runner_name == "kubernetes": + from .runner.kubernetes_runner import KubernetesRunner + + return KubernetesRunner(api, runner_config, environment, registry) + raise LaunchError( + f"Could not create runner from config. Invalid runner name: {runner_name}" + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/abstract.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/abstract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78eca3b211b5d28c4db94e2859181730ccce8679 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/abstract.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/anon.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/anon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..100ec1cc690a0edd80b6b8d006a694edf041c91c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/anon.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/azure_container_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/azure_container_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a33a453f4fc85cfa27df5560549f2a519408918 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/azure_container_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/elastic_container_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/elastic_container_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..755a2f74bc0b13fa0c2065ba5f61e4b597137e08 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/elastic_container_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/google_artifact_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/google_artifact_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5800101483351c437b87ca47b9e4738f3036f31d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/google_artifact_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/local_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/local_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d522e1a41dfe7f5117981dc1a89bb65ec140b8c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/__pycache__/local_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/abstract.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/abstract.py new file mode 100644 index 0000000000000000000000000000000000000000..9be4ba0787ff10297173a9b152833f21efa6928a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/abstract.py @@ -0,0 +1,48 @@ +"""Abstract base class for registries.""" + +from abc import ABC, abstractmethod +from typing import Tuple + + +class AbstractRegistry(ABC): + """Abstract base class for registries.""" + + uri: str + + async def get_username_password(self) -> Tuple[str, str]: + """Get the username and password for the registry. + + Returns: + (str, str): The username and password. + """ + raise NotImplementedError + + @abstractmethod + async def get_repo_uri(self) -> str: + """Get the URI for a repository. + + Returns: + str: The URI. + """ + raise NotImplementedError + + @abstractmethod + async def check_image_exists(self, image_uri: str) -> bool: + """Check if an image exists in the registry. + + Arguments: + image_uri (str): The URI of the image. + + Returns: + bool: True if the image exists. + """ + raise NotImplementedError + + @classmethod + @abstractmethod + def from_config( + cls, + config: dict, + ) -> "AbstractRegistry": + """Create a registry from a config.""" + raise NotImplementedError diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/anon.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/anon.py new file mode 100644 index 0000000000000000000000000000000000000000..7408606415fac01e234423614906e364a9561ea4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/anon.py @@ -0,0 +1,29 @@ +from typing import Tuple + +from wandb.docker import is_docker_installed +from wandb.sdk.launch.utils import docker_image_exists + +from .abstract import AbstractRegistry + + +class AnonynmousRegistry(AbstractRegistry): + def __init__(self, uri: str) -> None: + """Initialize the registry.""" + self.uri = uri + + async def get_username_password(self) -> Tuple[str, str]: + """Get the username and password for the registry.""" + raise NotImplementedError("Anonymous registry does not require authentication") + + async def get_repo_uri(self) -> str: + return self.uri + + async def check_image_exists(self, image_uri: str) -> bool: + """Check if an image exists in the registry.""" + if not is_docker_installed(): + return False + return docker_image_exists(image_uri) + + @classmethod + def from_config(cls, config: dict) -> "AbstractRegistry": + return cls(uri=config["uri"]) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/azure_container_registry.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/azure_container_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..36302a02af81922028c218d7672de5a2adc48750 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/azure_container_registry.py @@ -0,0 +1,124 @@ +"""Implementation of AzureContainerRegistry class.""" + +import re +from typing import TYPE_CHECKING, Optional, Tuple + +from wandb.sdk.launch.environment.azure_environment import AzureEnvironment +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.utils import AZURE_CONTAINER_REGISTRY_URI_REGEX +from wandb.util import get_module + +from .abstract import AbstractRegistry + +if TYPE_CHECKING: + from azure.containerregistry import ContainerRegistryClient # type: ignore + from azure.core.exceptions import ResourceNotFoundError # type: ignore + + +ContainerRegistryClient = get_module( # noqa: F811 + "azure.containerregistry", + required="The azure-containerregistry package is required to use launch with Azure. Please install it with `pip install azure-containerregistry`.", +).ContainerRegistryClient + +ResourceNotFoundError = get_module( # noqa: F811 + "azure.core.exceptions", + required="The azure-core package is required to use launch with Azure. Please install it with `pip install azure-core`.", +).ResourceNotFoundError + + +class AzureContainerRegistry(AbstractRegistry): + """Helper for accessing Azure Container Registry resources.""" + + def __init__( + self, + uri: Optional[str] = None, + registry_name: Optional[str] = None, + repo_name: Optional[str] = None, + ): + """Initialize an AzureContainerRegistry.""" + if uri is not None: + self.uri = uri + if any(x is not None for x in (registry_name, repo_name)): + raise LaunchError( + "Please specify either a registry name and repo name or a registry URI." + ) + if self.uri.startswith("https://"): + self.uri = self.uri[len("https://") :] + match = AZURE_CONTAINER_REGISTRY_URI_REGEX.match(self.uri) + if match is None: + raise LaunchError( + f"Unable to parse Azure Container Registry URI: {self.uri}" + ) + self.registry_name = match.group(1) + self.repo_name = match.group(2) + else: + if any(x is None for x in (registry_name, repo_name)): + raise LaunchError( + "Please specify both a registry name and repo name or a registry URI." + ) + self.registry_name = registry_name + self.repo_name = repo_name + self.uri = f"{self.registry_name}.azurecr.io/{self.repo_name}" + + @classmethod + def from_config( + cls, + config: dict, + ) -> "AzureContainerRegistry": + """Create an AzureContainerRegistry from a config dict. + + Args: + config (dict): The config dict. + environment (AbstractEnvironment): The environment to use. + verify (bool, optional): Whether to verify the registry. Defaults to True. + + Returns: + AzureContainerRegistry: The registry. + + Raises: + LaunchError: If the config is invalid. + """ + uri = config.get("uri") + if uri is None: + raise LaunchError( + "Please specify a registry name to use under the registry.uri." + ) + return cls( + uri=uri, + ) + + async def get_username_password(self) -> Tuple[str, str]: + """Get username and password for container registry.""" + raise NotImplementedError + + async def check_image_exists(self, image_uri: str) -> bool: + """Check if image exists in container registry. + + Args: + image_uri (str): Image URI to check. + + Returns: + bool: True if image exists, False otherwise. + """ + match = re.match(AZURE_CONTAINER_REGISTRY_URI_REGEX, image_uri) + if match is None: + raise LaunchError( + f"Unable to parse Azure Container Registry URI: {image_uri}" + ) + registry = match.group(1) + repository = match.group(2) + tag = match.group(3) + credential = AzureEnvironment.get_credentials() + client = ContainerRegistryClient(f"https://{registry}.azurecr.io", credential) + try: + client.get_manifest_properties(repository, tag) + return True + except ResourceNotFoundError: + return False + except Exception as e: + raise LaunchError( + f"Unable to check if image exists in Azure Container Registry: {e}" + ) from e + + async def get_repo_uri(self) -> str: + return f"{self.registry_name}.azurecr.io/{self.repo_name}" diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/elastic_container_registry.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/elastic_container_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..0dc9e02c0366ebc45167365417e06735ca12e459 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/elastic_container_registry.py @@ -0,0 +1,192 @@ +"""Implementation of Elastic Container Registry class for wandb launch.""" + +import base64 +import logging +from typing import Dict, Optional, Tuple + +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.registry.abstract import AbstractRegistry +from wandb.sdk.launch.utils import ( + ELASTIC_CONTAINER_REGISTRY_URI_REGEX, + event_loop_thread_exec, +) +from wandb.util import get_module + +_logger = logging.getLogger(__name__) + +botocore = get_module( # noqa: F811 + "botocore", + required="The boto3 package is required to use launch with AWS. Please install it with `pip install wandb[launch]`.", +) +boto3 = get_module( # noqa: F811 + "boto3", + required="The boto3 package is required to use launch with AWS. Please install it with `pip install wandb[launch]`.", +) + + +class ElasticContainerRegistry(AbstractRegistry): + """Elastic Container Registry class.""" + + def __init__( + self, + uri: Optional[str] = None, + account_id: Optional[str] = None, + region: Optional[str] = None, + repo_name: Optional[str] = None, + ) -> None: + """Initialize the Elastic Container Registry. + + Arguments: + uri: The uri of the repository. + account_id: The AWS account id. + region: The AWS region of the container registry. + repository: The name of the repository. + + Raises: + LaunchError: If there is an error initializing the Elastic Container Registry helper. + """ + if uri: + self.uri = uri + if any([account_id, region, repo_name]): + raise LaunchError( + "Could not create ElasticContainerRegistry from config. Either 'uri' or " + "'account_id', 'region', and 'repo_name' are required." + ) + match = ELASTIC_CONTAINER_REGISTRY_URI_REGEX.match( + self.uri, + ) + if not match: + raise LaunchError( + f"Could not create ElasticContainerRegistry from config. The uri " + f"{self.uri} is invalid." + ) + self.account_id = match.group("account") + self.region = match.group("region") + self.repo_name = match.group("repository") + else: + if not all([account_id, region, repo_name]): + raise LaunchError( + "Could not create ElasticContainerRegistry from config. Either 'uri' or " + "'account_id', 'region', and 'repo_name' are required." + ) + self.account_id = account_id + self.region = region + self.repo_name = repo_name + self.uri = f"{self.account_id}.dkr.ecr.{self.region}.amazonaws.com/{self.repo_name}" + if self.account_id is None: + raise LaunchError( + "Could not create ElasticContainerRegistry from config. Either 'uri' or " + "'account_id' is required." + ) + if self.region is None: + raise LaunchError( + "Could not create ElasticContainerRegistry from config. Either 'uri' or " + "'region' is required." + ) + if self.repo_name is None: + raise LaunchError( + "Could not create ElasticContainerRegistry from config. Either 'uri' or " + "'repository' is required." + ) + + @classmethod + def from_config( + cls, + config: Dict[str, str], + ) -> "ElasticContainerRegistry": + """Create an Elastic Container Registry from a config. + + Arguments: + config (dict): The config. + + Returns: + ElasticContainerRegistry: The Elastic Container Registry. + """ + # TODO: Replace this with pydantic. + acceptable_keys = { + "uri", + "type", + "account_id", + "region", + "repo_name", + } + unsupported_keys = set(config.keys()) - acceptable_keys + if unsupported_keys: + raise LaunchError( + f"The Elastic Container Registry config contains unsupported keys: " + f"{unsupported_keys}. Please remove these keys. The acceptable " + f"keys are: {acceptable_keys}." + ) + return cls( + uri=config.get("uri"), + account_id=config.get("account_id"), + region=config.get("region"), + repo_name=config.get("repository"), + ) + + async def get_username_password(self) -> Tuple[str, str]: + """Get the username and password for the registry. + + Returns: + (str, str): The username and password. + + Raises: + RegistryError: If there is an error getting the username and password. + """ + _logger.debug("Getting username and password for Elastic Container Registry.") + try: + session = boto3.Session(region_name=self.region) + client = await event_loop_thread_exec(session.client)("ecr") + response = await event_loop_thread_exec(client.get_authorization_token)() + username, password = base64.standard_b64decode( + response["authorizationData"][0]["authorizationToken"] + ).split(b":") + return username.decode("utf-8"), password.decode("utf-8") + + except botocore.exceptions.ClientError as e: + code = e.response["Error"]["Code"] + msg = e.response["Error"]["Message"] + # TODO: Log the code and the message here? + raise LaunchError(f"Error getting username and password: {code} {msg}") + + async def get_repo_uri(self) -> str: + """Get the uri of the repository. + + Returns: + str: The uri of the repository. + """ + return f"{self.account_id}.dkr.ecr.{self.region}.amazonaws.com/{self.repo_name}" + + async def check_image_exists(self, image_uri: str) -> bool: + """Check if the image tag exists. + + Arguments: + image_uri (str): The full image_uri. + + Returns: + bool: True if the image tag exists. + """ + if ":" not in image_uri: + tag = image_uri + else: + uri, tag = image_uri.split(":") + repo_uri = await self.get_repo_uri() + if uri != repo_uri: + raise LaunchError( + f"Image uri {image_uri} does not match Elastic Container Registry uri {repo_uri}." + ) + _logger.debug(f"Checking if image tag {tag} exists in repository {self.uri}") + try: + session = boto3.Session(region_name=self.region) + client = await event_loop_thread_exec(session.client)("ecr") + response = await event_loop_thread_exec(client.describe_images)( + repositoryName=self.repo_name, imageIds=[{"imageTag": tag}] + ) + return len(response["imageDetails"]) > 0 + + except botocore.exceptions.ClientError as e: + code = e.response["Error"]["Code"] + if code == "ImageNotFoundException": + return False + msg = e.response["Error"]["Message"] + raise LaunchError(f"Error checking if image tag exists: {code} {msg}") diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/google_artifact_registry.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/google_artifact_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..4592bce0aba0da5ccfd09324521d3f616876a890 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/google_artifact_registry.py @@ -0,0 +1,219 @@ +"""Implementation of Google Artifact Registry for wandb launch.""" + +import logging +from typing import Optional, Tuple + +import google.auth # type: ignore +import google.cloud.artifactregistry # type: ignore + +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.utils import ( + GCP_ARTIFACT_REGISTRY_URI_REGEX, + event_loop_thread_exec, +) +from wandb.util import get_module + +from .abstract import AbstractRegistry + +_logger = logging.getLogger(__name__) + +google = get_module( # noqa: F811 + "google", + required="The google package is required to use launch with Google. Please install it with `pip install wandb[launch]`.", +) +google.auth = get_module( # noqa: F811 + "google.auth", + required="The google-auth package is required to use launch with Google. Please install it with `pip install wandb[launch]`.", +) + +google.cloud.artifactregistry = get_module( # noqa: F811 + "google.cloud.artifactregistry", + required="The google-cloud-artifactregistry package is required to use launch with Google. Please install it with `pip install wandb[launch]`.", +) + + +class GoogleArtifactRegistry(AbstractRegistry): + """Google Artifact Registry helper for interacting with the registry. + + This helper should be constructed from either a uri or a repository, + project, and optional image-name. If constructed from a uri, the uri + must be of the form REGION-docker.pkg.dev/PROJECT/REPOSITORY/[IMAGE_NAME], + with an optional https:// preceding. + """ + + def __init__( + self, + uri: Optional[str] = None, + repository: Optional[str] = None, + image_name: Optional[str] = None, + project: Optional[str] = None, + region: Optional[str] = None, + ) -> None: + """Initialize the Google Artifact Registry. + + Either uri or repository and image_name must be provided. Project and + region are optional, and will be inferred from the uri if provided, or + from the default credentials if not. + + Arguments: + uri (optional): The uri of the repository. + repository (optional): The repository name. + image_name (optional): The image name. + project (optional): The GCP project name. + region (optional): The GCP region name. + + Raises: + LaunchError: If verify is True and the container registry or its + environment have not been properly configured. Or if the environment + is not an instance of GcpEnvironment. + """ + _logger.info( + f"Initializing Google Artifact Registry with repository {repository} " + f"and image name {image_name}" + ) + + if uri is not None: + self.uri = uri + # Raise an error if any other kwargs were provided in addition to uri. + if any([repository, image_name, project, region]): + raise LaunchError( + "The Google Artifact Registry must be specified with either " + "the uri key or the repository, image-name, project and region " + "keys, but not both." + ) + match = GCP_ARTIFACT_REGISTRY_URI_REGEX.match(self.uri) + if not match: + raise LaunchError( + f"The Google Artifact Registry uri {self.uri} is invalid. " + "Please provide a uri of the form " + "REGION-docker.pkg.dev/PROJECT/REPOSITORY/IMAGE_NAME." + ) + self.project = match.group("project") + self.region = match.group("region") + self.repository = match.group("repository") + self.image_name = match.group("image_name") + else: + if any(x is None for x in (repository, region, image_name)): + raise LaunchError( + "The Google Artifact Registry must be specified with either " + "the uri key or the repository, image-name, project and region " + "keys." + ) + self.project = project + self.region = region + self.repository = repository + self.image_name = image_name + self.uri = f"{self.region}-docker.pkg.dev/{self.project}/{self.repository}/{self.image_name}" + + _missing_kwarg_msg = ( + "The Google Artifact Registry is missing the {} kwarg. " + "Please specify it by name or as part of the uri argument." + ) + if not self.region: + raise LaunchError(_missing_kwarg_msg.format("region")) + if not self.repository: + raise LaunchError(_missing_kwarg_msg.format("repository")) + if not self.image_name: + raise LaunchError(_missing_kwarg_msg.format("image-name")) + # Try to load default project from the default credentials. + self.credentials, project = google.auth.default() + self.project = self.project or project + self.credentials.refresh(google.auth.transport.requests.Request()) + + @classmethod + def from_config( + cls, + config: dict, + ) -> "GoogleArtifactRegistry": + """Create a Google Artifact Registry from a config. + + Arguments: + config: A dictionary containing the following keys: + repository: The repository name. + image-name: The image name. + environment: A GcpEnvironment configured for access to this registry. + + Returns: + A GoogleArtifactRegistry. + """ + # TODO: Replace this with pydantic. + acceptable_keys = { + "uri", + "type", + "repository", + "image-name", + "region", + "project", + } + unacceptable_keys = set(config.keys()) - acceptable_keys + if unacceptable_keys: + raise LaunchError( + f"The Google Artifact Registry config contains unacceptable keys: " + f"{unacceptable_keys}. Please remove these keys. The acceptable " + f"keys are: {acceptable_keys}." + ) + return cls( + uri=config.get("uri"), + repository=config.get("repository"), + image_name=config.get("image-name"), + project=config.get("project"), + region=config.get("region"), + ) + + async def get_username_password(self) -> Tuple[str, str]: + """Get the username and password for the registry. + + Returns: + A tuple of the username and password. + """ + if not self.credentials.token: + self.credentials.refresh(google.auth.transport.requests.Request()) + return "oauth2accesstoken", self.credentials.token + + async def get_repo_uri(self) -> str: + """Get the URI for the given repository. + + Arguments: + repo_name: The repository name. + + Returns: + The repository URI. + """ + return ( + f"{self.region}-docker.pkg.dev/" + f"{self.project}/{self.repository}/{self.image_name}" + ) + + async def check_image_exists(self, image_uri: str) -> bool: + """Check if the image exists. + + Arguments: + image_uri: The image URI. + + Returns: + True if the image exists, False otherwise. + """ + _logger.info(f"Checking if image {image_uri} exists") + repo_uri, tag = image_uri.split(":") + self_repo_uri = await self.get_repo_uri() + if repo_uri != self_repo_uri: + raise LaunchError( + f"The image {image_uri} does not match to the image uri " + f"repository {self.uri}." + ) + parent = f"projects/{self.project}/locations/{self.region}/repositories/{self.repository}" + artifact_registry_client = event_loop_thread_exec( + google.cloud.artifactregistry.ArtifactRegistryClient + ) + client = await artifact_registry_client(credentials=self.credentials) + list_images = event_loop_thread_exec(client.list_docker_images) + try: + for image in await list_images(request={"parent": parent}): + if tag in image.tags: + return True + except google.api_core.exceptions.NotFound as e: # type: ignore[attr-defined] + raise LaunchError( + f"The Google Artifact Registry repository {self.repository} " + f"does not exist. Please create it or modify your registry configuration." + ) from e + return False diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/local_registry.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/local_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..b73711ab82ea85588880a22a6d0791ab868f106a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/registry/local_registry.py @@ -0,0 +1,67 @@ +"""Local registry implementation.""" + +import logging +from typing import Tuple + +from wandb.docker import is_docker_installed +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.utils import docker_image_exists + +from .abstract import AbstractRegistry + +_logger = logging.getLogger(__name__) + + +class LocalRegistry(AbstractRegistry): + """A local registry. + + This is a dummy registry that is used when no registry is configured. + """ + + def __init__(self) -> None: + """Initialize a local registry.""" + pass + + @classmethod + def from_config( + cls, + config: dict, + ) -> "LocalRegistry": + """Create a local registry from a config. + + Arguments: + config (dict): The config. This is ignored. + environment (AbstractEnvironment): The environment. This is ignored. + + Returns: + LocalRegistry: The local registry. + """ + return cls() + + async def verify(self) -> None: + """Verify the local registry by doing nothing.""" + pass + + async def get_username_password(self) -> Tuple[str, str]: + """Get the username and password of the local registry.""" + raise LaunchError("Attempted to get username and password for LocalRegistry.") + + async def get_repo_uri(self) -> str: + """Get the uri of the local registry. + + Returns: An empty string. + """ + return "" + + async def check_image_exists(self, image_uri: str) -> bool: + """Check if an image exists in the local registry. + + Arguments: + image_uri (str): The uri of the image. + + Returns: + bool: True. + """ + if is_docker_installed(): + return docker_image_exists(image_uri) + return False diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3cdc29ddb4ffc4358ee1eb464564a81bd4e3519c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__init__.py @@ -0,0 +1,39 @@ +import logging +from typing import Any, Callable, Dict + +log = logging.getLogger(__name__) + + +class SchedulerError(Exception): + """Raised when a known error occurs with wandb sweep scheduler.""" + + pass + + +def _import_sweep_scheduler() -> Any: + from .scheduler_sweep import SweepScheduler + + return SweepScheduler + + +_WANDB_SCHEDULERS: Dict[str, Callable] = { + "wandb": _import_sweep_scheduler, +} + + +def load_scheduler(scheduler_type: str) -> Any: + scheduler_type = scheduler_type.lower() + if scheduler_type not in _WANDB_SCHEDULERS: + raise SchedulerError( + f"The `scheduler_name` argument must be one of " + f"{list(_WANDB_SCHEDULERS.keys())}, got: {scheduler_type}" + ) + + log.warn(f"Loading dependencies for Scheduler of type: {scheduler_type}") + import_func = _WANDB_SCHEDULERS[scheduler_type] + return import_func() + + +__all__ = [ + "load_scheduler", +] diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/scheduler.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..58fe4c8a099fefbc813549bf1a2a1d45190118da --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/scheduler.py @@ -0,0 +1,742 @@ +"""Abstract Scheduler class.""" + +import asyncio +import base64 +import copy +import logging +import os +import socket +import threading +import time +import traceback +from abc import ABC, abstractmethod +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Union + +import click +import yaml + +import wandb +from wandb.errors import CommError +from wandb.sdk.launch._launch_add import launch_add +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.sweeps import SchedulerError +from wandb.sdk.launch.sweeps.utils import ( + create_sweep_command_args, + make_launch_sweep_entrypoint, +) +from wandb.sdk.launch.utils import ( + event_loop_thread_exec, + strip_resource_args_and_template_vars, +) +from wandb.sdk.lib.runid import generate_id + +if TYPE_CHECKING: + import wandb.apis.public as public + from wandb.apis.internal import Api + from wandb.apis.public import QueuedRun, Run + from wandb.sdk.wandb_run import Run as SdkRun + + +_logger = logging.getLogger(__name__) +LOG_PREFIX = f"{click.style('sched:', fg='cyan')} " + +DEFAULT_POLLING_SLEEP = 5.0 + + +class SchedulerState(Enum): + PENDING = 0 + STARTING = 1 + RUNNING = 2 + FLUSH_RUNS = 3 + COMPLETED = 4 + FAILED = 5 + STOPPED = 6 + CANCELLED = 7 + + +class RunState(Enum): + RUNNING = "running", "alive" + PENDING = "pending", "alive" + PREEMPTING = "preempting", "alive" + CRASHED = "crashed", "dead" + FAILED = "failed", "dead" + KILLED = "killed", "dead" + FINISHED = "finished", "dead" + PREEMPTED = "preempted", "dead" + # unknown when api.get_run_state fails or returns unexpected state + # assumed alive, unless we get unknown 2x then move to failed (dead) + UNKNOWN = "unknown", "alive" + + def __new__(cls: Any, *args: List, **kwds: Any) -> "RunState": + obj: RunState = object.__new__(cls) + obj._value_ = args[0] + return obj + + def __init__(self, _: str, life: str = "unknown") -> None: + self._life = life + + @property + def is_alive(self) -> bool: + return self._life == "alive" + + +@dataclass +class _Worker: + agent_config: Dict[str, Any] + agent_id: str + + +@dataclass +class SweepRun: + id: str + worker_id: int + state: RunState = RunState.RUNNING + queued_run: Optional["public.QueuedRun"] = None + args: Optional[Dict[str, Any]] = None + logs: Optional[List[str]] = None + + +class Scheduler(ABC): + """A controller/agent that populates a Launch RunQueue from a hyperparameter sweep.""" + + PLACEHOLDER_URI = "placeholder-uri-scheduler" + SWEEP_JOB_TYPE = "sweep-controller" + ENTRYPOINT = ["wandb", "scheduler", "WANDB_SWEEP_ID"] + + def __init__( + self, + api: "Api", + *args: Optional[Any], + polling_sleep: Optional[float] = None, + sweep_id: Optional[str] = None, + entity: Optional[str] = None, + project: Optional[str] = None, + project_queue: Optional[str] = None, + num_workers: Optional[Union[int, str]] = None, + **kwargs: Optional[Any], + ): + from wandb.apis.public import Api as PublicApi + + self._api = api + self._public_api = PublicApi() + self._entity = ( + entity + or os.environ.get("WANDB_ENTITY") + or api.settings("entity") + or api.default_entity + ) + self._project = ( + project or os.environ.get("WANDB_PROJECT") or api.settings("project") + ) + self._sweep_id: str = sweep_id or "empty-sweep-id" + self._state: SchedulerState = SchedulerState.PENDING + + # Make sure the provided sweep_id corresponds to a valid sweep + try: + resp = self._api.sweep( + sweep_id, "{}", entity=self._entity, project=self._project + ) + if resp.get("state") == SchedulerState.CANCELLED.name: + self._state = SchedulerState.CANCELLED + self._sweep_config = yaml.safe_load(resp["config"]) + self._num_runs_launched: int = self._get_num_runs_launched(resp["runs"]) + if self._num_runs_launched > 0: + wandb.termlog( + f"{LOG_PREFIX}Found {self._num_runs_launched} previous valid runs for sweep {self._sweep_id}" + ) + except Exception as e: + raise SchedulerError( + f"{LOG_PREFIX}Exception when finding sweep ({sweep_id}) {e}" + ) + + # Scheduler may receive additional kwargs which will be piped into the launch command + self._kwargs: Dict[str, Any] = kwargs + + # Dictionary of the runs being managed by the scheduler + self._runs: Dict[str, SweepRun] = {} + # Threading lock to ensure thread-safe access to the runs dictionary + self._threading_lock: threading.Lock = threading.Lock() + self._polling_sleep = ( + polling_sleep if polling_sleep is not None else DEFAULT_POLLING_SLEEP + ) + self._project_queue = project_queue + # Optionally run multiple workers in (pseudo-)parallel. Workers do not + # actually run training workloads, they simply send heartbeat messages + # (emulating a real agent) and add new runs to the launch queue. The + # launch agent is the one that actually runs the training workloads. + self._workers: Dict[int, _Worker] = {} + + # Init wandb scheduler run + self._wandb_run = self._init_wandb_run() + + # Grab params from scheduler wandb run config + num_workers = num_workers or self._wandb_run.config.get("scheduler", {}).get( + "num_workers" + ) + self._num_workers = int(num_workers) if str(num_workers).isdigit() else 8 + self._settings_config: Dict[str, Any] = self._wandb_run.config.get( + "settings", {} + ) + + @abstractmethod + def _get_next_sweep_run(self, worker_id: int) -> Optional[SweepRun]: + """Called when worker available.""" + pass + + @abstractmethod + def _poll(self) -> None: + """Called every polling loop.""" + pass + + @abstractmethod + def _exit(self) -> None: + pass + + @abstractmethod + def _load_state(self) -> None: + pass + + @abstractmethod + def _save_state(self) -> None: + pass + + @property + def state(self) -> SchedulerState: + _logger.debug(f"{LOG_PREFIX}Scheduler state is {self._state.name}") + return self._state + + @state.setter + def state(self, value: SchedulerState) -> None: + _logger.debug(f"{LOG_PREFIX}Scheduler was {self.state.name} is {value.name}") + self._state = value + + @property + def is_alive(self) -> bool: + if self.state in [ + SchedulerState.COMPLETED, + SchedulerState.FAILED, + SchedulerState.STOPPED, + SchedulerState.CANCELLED, + ]: + return False + return True + + @property + def at_runcap(self) -> bool: + """False if under user-specified cap on # of runs.""" + run_cap = self._sweep_config.get("run_cap") + if not run_cap: + return False + at_runcap: bool = self._num_runs_launched >= run_cap + return at_runcap + + @property + def num_active_runs(self) -> int: + return len(self._runs) + + @property + def busy_workers(self) -> Dict[int, _Worker]: + """Returns dict of id:worker already assigned to a launch run. + + runs should always have a worker_id, but are created before + workers are assigned to the run + """ + busy_workers = {} + for _, r in self._yield_runs(): + busy_workers[r.worker_id] = self._workers[r.worker_id] + return busy_workers + + @property + def available_workers(self) -> Dict[int, _Worker]: + """Returns dict of id:worker ready to launch another run.""" + if len(self._workers) == 0: + return {} + return { + _id: w for _id, w in self._workers.items() if _id not in self.busy_workers + } + + def _init_wandb_run(self) -> "SdkRun": + """Controls resume or init logic for a scheduler wandb run.""" + settings = wandb.Settings(disable_job_creation=True) + run: SdkRun = wandb.init( # type: ignore + name=f"Scheduler.{self._sweep_id}", + resume="allow", + config=self._kwargs, # when run as a job, this sets config + settings=settings, + ) + return run + + def stop_sweep(self) -> None: + """Stop the sweep.""" + self._state = SchedulerState.STOPPED + + def fail_sweep(self, err: Optional[str]) -> None: + """Fail the sweep w/ optional exception.""" + self._state = SchedulerState.FAILED + if err: + raise SchedulerError(err) + + def start(self) -> None: + """Start a scheduler, confirms prerequisites, begins execution loop.""" + wandb.termlog(f"{LOG_PREFIX}Scheduler starting.") + if not self.is_alive: + wandb.termerror( + f"{LOG_PREFIX}Sweep already in end state ({self.state.name.lower()}). Exiting..." + ) + self.exit() + return + + self._state = SchedulerState.STARTING + if not self._try_load_executable(): + wandb.termerror( + f"{LOG_PREFIX}No 'job' or 'image_uri' loaded from sweep config." + ) + self.exit() + return + + # For resuming sweeps + self._load_state() + asyncio.run(self._register_agents()) + self.run() + + def run(self) -> None: + """Main run function.""" + wandb.termlog(f"{LOG_PREFIX}Scheduler running") + self.state = SchedulerState.RUNNING + try: + while True: + self._update_scheduler_run_state() + if not self.is_alive: + break + + wandb.termlog(f"{LOG_PREFIX}Polling for new runs to launch") + + self._update_run_states() + self._poll() + if self.state == SchedulerState.FLUSH_RUNS: + if self.num_active_runs == 0: + wandb.termlog(f"{LOG_PREFIX}Done polling on runs, exiting") + break + time.sleep(self._polling_sleep) + continue + + for worker_id in self.available_workers: + if self.at_runcap: + wandb.termlog( + f"{LOG_PREFIX}Sweep at run_cap ({self._num_runs_launched})" + ) + self.state = SchedulerState.FLUSH_RUNS + break + + try: + run: Optional[SweepRun] = self._get_next_sweep_run(worker_id) + if not run: + break + except SchedulerError as e: + raise SchedulerError(e) + except Exception as e: + wandb.termerror( + f"{LOG_PREFIX}Failed to get next sweep run: {e}" + ) + self.state = SchedulerState.FAILED + break + + if self._add_to_launch_queue(run): + self._num_runs_launched += 1 + + time.sleep(self._polling_sleep) + except KeyboardInterrupt: + wandb.termwarn(f"{LOG_PREFIX}Scheduler received KeyboardInterrupt. Exiting") + self.state = SchedulerState.STOPPED + self.exit() + return + except Exception as e: + wandb.termlog(f"{LOG_PREFIX}Scheduler failed with exception {e}") + self.state = SchedulerState.FAILED + self.exit() + raise e + else: + # scheduler succeeds if at runcap + if self.state == SchedulerState.FLUSH_RUNS and self.at_runcap: + self.state = SchedulerState.COMPLETED + self.exit() + + def exit(self) -> None: + self._exit() + # _save_state isn't controlled, possibly fails + try: + self._save_state() + except Exception: + wandb.termerror( + f"{LOG_PREFIX}Failed to save state: {traceback.format_exc()}" + ) + + status = "" + if self.state == SchedulerState.FLUSH_RUNS: + self._set_sweep_state("PAUSED") + status = "paused" + elif self.state == SchedulerState.COMPLETED: + self._set_sweep_state("FINISHED") + status = "completed" + elif self.state in [SchedulerState.CANCELLED, SchedulerState.STOPPED]: + self._set_sweep_state("CANCELED") # one L + status = "cancelled" + self._stop_runs() + else: + self.state = SchedulerState.FAILED + self._set_sweep_state("CRASHED") + status = "crashed" + self._stop_runs() + + wandb.termlog(f"{LOG_PREFIX}Scheduler {status}") + self._wandb_run.finish() + + def _get_num_runs_launched(self, runs: List[Dict[str, Any]]) -> int: + """Returns the number of valid runs in the sweep.""" + count = 0 + for run in runs: + # if bad run, shouldn't be counted against run cap + if run.get("state", "") in ["killed", "crashed"] and not run.get( + "summaryMetrics" + ): + _logger.debug( + f"excluding run: {run['name']} with state: {run['state']} from run cap \n{run}" + ) + continue + count += 1 + + return count + + def _try_load_executable(self) -> bool: + """Check existence of valid executable for a run. + + logs and returns False when job is unreachable + """ + if self._kwargs.get("job"): + try: + _job_artifact = self._public_api.job(self._kwargs["job"]) + wandb.termlog( + f"{LOG_PREFIX}Successfully loaded job ({_job_artifact.name}) in scheduler" + ) + except Exception: + wandb.termerror(f"{LOG_PREFIX}{traceback.format_exc()}") + return False + return True + elif self._kwargs.get("image_uri"): + # TODO(gst): check docker existence? Use registry in launch config? + return True + else: + return False + + async def _register_agents(self) -> None: + tasks = [] + register_agent = event_loop_thread_exec(self._api.register_agent) + for worker_id in range(self._num_workers): + _logger.debug(f"{LOG_PREFIX}Starting AgentHeartbeat worker ({worker_id})") + try: + worker = register_agent( + f"{socket.gethostname()}-{worker_id}", # host + sweep_id=self._sweep_id, + project_name=self._project, + entity=self._entity, + ) + tasks.append(worker) + except Exception as e: + _logger.debug(f"failed to register agent: {e}") + self.fail_sweep(f"failed to register agent: {e}") + + finished_tasks = await asyncio.gather(*tasks) + for idx, agent_config in enumerate(finished_tasks): + self._workers[idx] = _Worker( + agent_config=agent_config, + agent_id=agent_config["id"], + ) + + def _yield_runs(self) -> Iterator[Tuple[str, SweepRun]]: + """Thread-safe way to iterate over the runs.""" + with self._threading_lock: + yield from self._runs.items() + + def _cleanup_runs(self, runs_to_remove: List[str]) -> None: + """Helper for removing runs from memory. + + Can be overloaded to prevent deletion of runs, which is useful + for debugging or when polling on completed runs. + """ + with self._threading_lock: + for run_id in runs_to_remove: + wandb.termlog(f"{LOG_PREFIX}Cleaning up finished run ({run_id})") + del self._runs[run_id] + + def _stop_runs(self) -> None: + to_delete = [] + for run_id, _ in self._yield_runs(): + to_delete += [run_id] + + for run_id in to_delete: + wandb.termlog(f"{LOG_PREFIX}Stopping run ({run_id})") + if not self._stop_run(run_id): + wandb.termwarn(f"{LOG_PREFIX}Failed to stop run ({run_id})") + + def _stop_run(self, run_id: str) -> bool: + """Stops a run and removes it from the scheduler.""" + if run_id not in self._runs: + _logger.debug(f"run: {run_id} not in _runs: {self._runs}") + return False + + run = self._runs[run_id] + del self._runs[run_id] + + if not run.queued_run: + _logger.debug( + f"tried to _stop_run but run not queued yet (run_id:{run.id})" + ) + return False + + if not run.state.is_alive: + # run already dead, just delete reference + return True + + # run still alive, send stop signal + encoded_run_id = base64.standard_b64encode( + f"Run:v1:{run_id}:{self._project}:{self._entity}".encode() + ).decode("utf-8") + + try: + success: bool = self._api.stop_run(run_id=encoded_run_id) + if success: + wandb.termlog(f"{LOG_PREFIX}Stopped run {run_id}.") + return True + except Exception as e: + _logger.debug(f"error stopping run ({run_id}): {e}") + + return False + + def _update_scheduler_run_state(self) -> None: + """Update the scheduler state from state of scheduler run and sweep state.""" + state: RunState = self._get_run_state(self._wandb_run.id) + + # map scheduler run-state to scheduler-state + if state == RunState.KILLED: + self.state = SchedulerState.STOPPED + elif state in [RunState.FAILED, RunState.CRASHED]: + self.state = SchedulerState.FAILED + elif state == RunState.FINISHED: + self.state = SchedulerState.COMPLETED + + # check sweep state for completed states, overwrite scheduler state + try: + sweep_state = self._api.get_sweep_state( + self._sweep_id, self._entity, self._project + ) + except Exception as e: + _logger.debug(f"sweep state error: {e}") + return + + if sweep_state == "FINISHED": + self.state = SchedulerState.COMPLETED + elif sweep_state in ["CANCELLED", "STOPPED"]: + self.state = SchedulerState.CANCELLED + elif sweep_state == "PAUSED": + self.state = SchedulerState.FLUSH_RUNS + + def _update_run_states(self) -> None: + """Iterate through runs. + + Get state from backend and deletes runs if not in running state. Threadsafe. + """ + runs_to_remove: List[str] = [] + for run_id, run in self._yield_runs(): + run.state = self._get_run_state(run_id, run.state) + + try: + rqi_state = run.queued_run.state if run.queued_run else None + except (CommError, LaunchError) as e: + _logger.debug(f"Failed to get queued_run.state: {e}") + rqi_state = None + + if not run.state.is_alive or rqi_state == "failed": + _logger.debug(f"({run_id}) states: ({run.state}, {rqi_state})") + runs_to_remove.append(run_id) + self._cleanup_runs(runs_to_remove) + + def _get_metrics_from_run(self, run_id: str) -> List[Any]: + """Use the public api to get metrics from a run. + + Uses the metric name found in the sweep config, any + misspellings will result in an empty list. + """ + try: + queued_run: Optional[QueuedRun] = self._runs[run_id].queued_run + if not queued_run: + return [] + + api_run: Run = self._public_api.run( + f"{queued_run.entity}/{queued_run.project}/{run_id}" + ) + metric_name = self._sweep_config["metric"]["name"] + history = api_run.scan_history(keys=["_step", metric_name]) + metrics = [x[metric_name] for x in history] + + return metrics + except Exception as e: + _logger.debug(f"[_get_metrics_from_run] {e}") + return [] + + def _get_run_info(self, run_id: str) -> Dict[str, Any]: + """Use the public api to get info about a run.""" + try: + info: Dict[str, Any] = self._api.get_run_info( + self._entity, self._project, run_id + ) + if info: + return info + except Exception as e: + _logger.debug(f"[_get_run_info] {e}") + return {} + + def _get_run_state( + self, run_id: str, prev_run_state: RunState = RunState.UNKNOWN + ) -> RunState: + """Use the public api to get state of a run.""" + run_state = None + try: + state = self._api.get_run_state(self._entity, self._project, run_id) + run_state = RunState(state) + except CommError as e: + _logger.debug(f"error getting state for run ({run_id}): {e}") + if prev_run_state == RunState.UNKNOWN: + # triggers when we get an unknown state for the second time + wandb.termwarn( + f"Failed to get runstate for run ({run_id}). Error: {traceback.format_exc()}" + ) + run_state = RunState.FAILED + else: # first time we get unknown state + run_state = RunState.UNKNOWN + except (AttributeError, ValueError): + wandb.termwarn( + f"Bad state ({run_state}) for run ({run_id}). Error: {traceback.format_exc()}" + ) + run_state = RunState.UNKNOWN + return run_state + + def _create_run(self) -> Dict[str, Any]: + """Use the public api to create a blank run.""" + try: + run: List[Dict[str, Any]] = self._api.upsert_run( + project=self._project, + entity=self._entity, + sweep_name=self._sweep_id, + ) + if run: + return run[0] + except Exception as e: + _logger.debug(f"[_create_run] {e}") + raise SchedulerError( + "Error creating run from scheduler, check API connection and CLI version." + ) + return {} + + def _set_sweep_state(self, state: str) -> None: + wandb.termlog(f"{LOG_PREFIX}Updating sweep state to: {state.lower()}") + try: + self._api.set_sweep_state(sweep=self._sweep_id, state=state) + except Exception as e: + _logger.debug(f"[set_sweep_state] {e}") + + def _encode(self, _id: str) -> str: + return ( + base64.b64decode(bytes(_id.encode("utf-8"))).decode("utf-8").split(":")[2] + ) + + def _make_entry_and_launch_config( + self, run: SweepRun + ) -> Tuple[Optional[List[str]], Dict[str, Dict[str, Any]]]: + args = create_sweep_command_args({"args": run.args}) + entry_point, macro_args = make_launch_sweep_entrypoint( + args, self._sweep_config.get("command") + ) + # handle program macro + if entry_point and "${program}" in entry_point: + if not self._sweep_config.get("program"): + raise SchedulerError( + f"{LOG_PREFIX}Program macro in command has no corresponding 'program' in sweep config." + ) + pidx = entry_point.index("${program}") + entry_point[pidx] = self._sweep_config["program"] + + launch_config = copy.deepcopy(self._wandb_run.config.get("launch", {})) + if "overrides" not in launch_config: + launch_config["overrides"] = {"run_config": {}} + if "run_config" not in launch_config["overrides"]: + launch_config["overrides"]["run_config"] = {} + launch_config["overrides"]["run_config"].update(args["args_dict"]) + + if macro_args: # pipe in hyperparam args as params to launch + launch_config["overrides"]["args"] = macro_args + + if entry_point: + unresolved = [x for x in entry_point if str(x).startswith("${")] + if unresolved: + wandb.termwarn( + f"{LOG_PREFIX}Sweep command contains unresolved macros: " + f"{unresolved}, see launch docs for supported macros." + ) + return entry_point, launch_config + + def _add_to_launch_queue(self, run: SweepRun) -> bool: + """Convert a sweeprun into a launch job then push to runqueue.""" + # job and image first from CLI args, then from sweep config + _job = self._kwargs.get("job") or self._sweep_config.get("job") + _sweep_config_uri = self._sweep_config.get("image_uri") + _image_uri = self._kwargs.get("image_uri") or _sweep_config_uri + if _job is None and _image_uri is None: + raise SchedulerError(f"{LOG_PREFIX}No 'job' nor 'image_uri' ({run.id})") + elif _job is not None and _image_uri is not None: + raise SchedulerError(f"{LOG_PREFIX}Sweep has both 'job' and 'image_uri'") + + entry_point, launch_config = self._make_entry_and_launch_config(run) + if entry_point: + wandb.termwarn( + f"{LOG_PREFIX}Sweep command {entry_point} will override" + f' {"job" if _job else "image_uri"} entrypoint' + ) + + # override resource and args of job + _job_launch_config = copy.deepcopy(self._wandb_run.config.get("launch")) or {} + + # default priority is "medium" + _priority = int(launch_config.get("priority", 2)) # type: ignore + + # strip resource_args and template_variables from launch_config + strip_resource_args_and_template_vars(_job_launch_config) + + run_id = run.id or generate_id() + queued_run = launch_add( + run_id=run_id, + entry_point=entry_point, + config=launch_config, + docker_image=_image_uri, # TODO(gst): make agnostic (github? run uri?) + job=_job, + project=self._project, + entity=self._entity, + queue_name=self._kwargs.get("queue"), + project_queue=self._project_queue, + resource=_job_launch_config.get("resource"), + resource_args=_job_launch_config.get("resource_args"), + template_variables=_job_launch_config.get("template_variables"), + author=self._kwargs.get("author"), + sweep_id=self._sweep_id, + priority=_priority, + ) + run.queued_run = queued_run + # TODO(gst): unify run and queued_run state + run.state = RunState.RUNNING # assume it will get picked up + self._runs[run_id] = run + + wandb.termlog( + f"{LOG_PREFIX}Added run ({run_id}) to queue ({self._kwargs.get('queue')})" + ) + return True diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/scheduler_sweep.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/scheduler_sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..faf8d8c897df24ab2d7c0d421b29a9adbe7245f1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/scheduler_sweep.py @@ -0,0 +1,91 @@ +"""Scheduler for classic wandb Sweeps.""" + +import logging +from pprint import pformat as pf +from typing import Any, Dict, List, Optional + +import wandb +from wandb.sdk.launch.sweeps.scheduler import LOG_PREFIX, RunState, Scheduler, SweepRun + +_logger = logging.getLogger(__name__) + + +class SweepScheduler(Scheduler): + """A controller/agent that populates a Launch RunQueue from a sweeps RunQueue.""" + + def __init__( + self, + *args: Any, + **kwargs: Any, + ): + super().__init__(*args, **kwargs) + + def _get_next_sweep_run(self, worker_id: int) -> Optional[SweepRun]: + """Called by the main scheduler execution loop. + + Expected to return a properly formatted SweepRun if the scheduler + is alive, or None and set the appropriate scheduler state: + + FAILED: self.fail_sweep() + STOPPED: self.stop_sweep() + """ + commands: List[Dict[str, Any]] = self._get_sweep_commands(worker_id) + for command in commands: + # The command "type" can be one of "run", "resume", "stop", "exit" + _type = command.get("type") + if _type in ["exit", "stop"]: + self.stop_sweep() + return None + + if _type not in ["run", "resume"]: + self.fail_sweep(f"AgentHeartbeat unknown command: {_type}") + + _run_id: Optional[str] = command.get("run_id") + if not _run_id: + self.fail_sweep(f"No run id in agent heartbeat: {command}") + return None + + if _run_id in self._runs: + wandb.termlog(f"{LOG_PREFIX}Skipping duplicate run: {_run_id}") + continue + + return SweepRun( + id=_run_id, + state=RunState.PENDING, + args=command.get("args", {}), + logs=command.get("logs", []), + worker_id=worker_id, + ) + return None + + def _get_sweep_commands(self, worker_id: int) -> List[Dict[str, Any]]: + """Helper to receive sweep command from backend.""" + # AgentHeartbeat wants a Dict of runs which are running or queued + _run_states: Dict[str, bool] = {} + for run_id, run in self._yield_runs(): + # Filter out runs that are from a different worker thread + if run.worker_id == worker_id and run.state.is_alive: + _run_states[run_id] = True + + _logger.debug(f"Sending states: \n{pf(_run_states)}\n") + commands: List[Dict[str, Any]] = self._api.agent_heartbeat( + agent_id=self._workers[worker_id].agent_id, + metrics={}, + run_states=_run_states, + ) + _logger.debug(f"AgentHeartbeat commands: \n{pf(commands)}\n") + + return commands + + def _exit(self) -> None: + pass + + def _poll(self) -> None: + _logger.debug(f"_poll. _runs: {self._runs}") + pass + + def _load_state(self) -> None: + pass + + def _save_state(self) -> None: + pass diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/utils.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7efba8d9eb772d71b17bffee391493fc3be221 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/utils.py @@ -0,0 +1,316 @@ +import json +import os +import re +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +import yaml + +import wandb +from wandb import util +from wandb.sdk.launch.errors import LaunchError + +if TYPE_CHECKING: + from wandb.apis.public import Api as PublicApi + +DEFAULT_SWEEP_COMMAND: List[str] = [ + "${env}", + "${interpreter}", + "${program}", + "${args}", +] +SWEEP_COMMAND_ENV_VAR_REGEX = re.compile(r"\$\{envvar\:([A-Z0-9_]*)\}") + + +def parse_sweep_id(parts_dict: dict) -> Optional[str]: + """In place parse sweep path from parts dict. + + Arguments: + parts_dict (dict): dict(entity=,project=,name=). Modifies dict inplace. + + Returns: + None or str if there is an error + """ + entity = None + project = None + sweep_id = parts_dict.get("name") + if not isinstance(sweep_id, str): + return "Expected string sweep_id" + + sweep_split = sweep_id.split("/") + if len(sweep_split) == 1: + pass + elif len(sweep_split) == 2: + split_project, sweep_id = sweep_split + project = split_project or project + elif len(sweep_split) == 3: + split_entity, split_project, sweep_id = sweep_split + project = split_project or project + entity = split_entity or entity + else: + return ( + "Expected sweep_id in form of sweep, project/sweep, or entity/project/sweep" + ) + parts_dict.update(dict(name=sweep_id, project=project, entity=entity)) + return None + + +def sweep_config_err_text_from_jsonschema_violations(violations: List[str]) -> str: + """Consolidate schema violation strings from wandb/sweeps into a single string. + + Parameters + ---------- + violations: list of str + The warnings to render. + + Returns: + ------- + violation: str + The consolidated violation text. + + """ + violation_base = ( + "Malformed sweep config detected! This may cause your sweep to behave in unexpected ways.\n" + "To avoid this, please fix the sweep config schema violations below:" + ) + + for i, warning in enumerate(violations): + violations[i] = f" Violation {i + 1}. {warning}" + violation = "\n".join([violation_base] + violations) + + return violation + + +def handle_sweep_config_violations(warnings: List[str]) -> None: + """Echo sweep config schema violation warnings from Gorilla to the terminal. + + Parameters + ---------- + warnings: list of str + The warnings to render. + """ + warning = sweep_config_err_text_from_jsonschema_violations(warnings) + if len(warnings) > 0: + wandb.termwarn(warning) + + +def load_sweep_config(sweep_config_path: str) -> Optional[Dict[str, Any]]: + """Load a sweep yaml from path.""" + try: + yaml_file = open(sweep_config_path) + except OSError: + wandb.termerror(f"Couldn't open sweep file: {sweep_config_path}") + return None + try: + config: Optional[Dict[str, Any]] = yaml.safe_load(yaml_file) + except yaml.YAMLError as err: + wandb.termerror(f"Error in configuration file: {err}") + return None + if not config: + wandb.termerror("Configuration file is empty") + return None + return config + + +def load_launch_sweep_config(config: Optional[str]) -> Any: + if not config: + return {} + + parsed_config = util.load_json_yaml_dict(config) + if parsed_config is None: + raise LaunchError(f"Could not load config from {config}. Check formatting") + return parsed_config + + +def construct_scheduler_args( + sweep_config: Dict[str, Any], + queue: str, + project: str, + author: Optional[str] = None, + return_job: bool = False, +) -> Union[List[str], Dict[str, str], None]: + """Construct sweep scheduler args. + + logs error and returns None if misconfigured, + otherwise returns args as a dict if is_job else a list of strings. + """ + job = sweep_config.get("job") + image_uri = sweep_config.get("image_uri") + if not job and not image_uri: # don't allow empty string + wandb.termerror( + "No 'job' nor 'image_uri' top-level key found in sweep config, exactly one is required for a launch-sweep" + ) + return None + elif job and image_uri: + wandb.termerror( + "Sweep config has both 'job' and 'image_uri' but a launch-sweep can use only one" + ) + return None + + # if scheduler is a job, return args as dict + if return_job: + args_dict: Dict[str, str] = { + "sweep_id": "WANDB_SWEEP_ID", + "queue": queue, + "project": project, + } + if job: + args_dict["job"] = job + elif image_uri: + args_dict["image_uri"] = image_uri + + if author: + args_dict["author"] = author + + return args_dict + + # scheduler uses cli commands, pass args as param list + args = [ + "--queue", + f"{queue!r}", + "--project", + f"{project!r}", + ] + if author: + args += [ + "--author", + f"{author!r}", + ] + if job: + args += [ + "--job", + f"{job!r}", + ] + elif image_uri: + args += ["--image_uri", image_uri] + + return args + + +def create_sweep_command(command: Optional[List] = None) -> List: + """Return sweep command, filling in environment variable macros.""" + # Start from default sweep command + command = command or DEFAULT_SWEEP_COMMAND + for i, chunk in enumerate(command): + # Replace environment variable macros + # Search a str(chunk), but allow matches to be of any (ex: int) type + if SWEEP_COMMAND_ENV_VAR_REGEX.search(str(chunk)): + # Replace from backwards forwards + matches = list(SWEEP_COMMAND_ENV_VAR_REGEX.finditer(chunk)) + for m in matches[::-1]: + # Default to just leaving as is if environment variable does not exist + _var: str = os.environ.get(m.group(1), m.group(1)) + command[i] = f"{command[i][:m.start()]}{_var}{command[i][m.end():]}" + return command + + +def create_sweep_command_args(command: Dict) -> Dict[str, Any]: + """Create various formats of command arguments for the agent. + + Raises: + ValueError: improperly formatted command dict + + """ + if "args" not in command: + raise ValueError('No "args" found in command: {}'.format(command)) + # four different formats of command args + # (1) standard command line flags (e.g. --foo=bar) + flags: List[str] = [] + # (2) flags without hyphens (e.g. foo=bar) + flags_no_hyphens: List[str] = [] + # (3) flags with false booleans omitted (e.g. --foo) + flags_no_booleans: List[str] = [] + # (4) flags as a dictionary (used for constructing a json) + flags_dict: Dict[str, Any] = {} + # (5) flags without equals (e.g. --foo bar) + args_no_equals: List[str] = [] + for param, config in command["args"].items(): + # allow 'None' as a valid value, but error if no value is found + try: + _value: Any = config["value"] + except KeyError: + raise ValueError('No "value" found for command["args"]["{}"]'.format(param)) + + _flag: str = f"{param}={_value}" + flags.append("--" + _flag) + flags_no_hyphens.append(_flag) + args_no_equals += [f"--{param}", str(_value)] + if isinstance(_value, bool): + # omit flags if they are boolean and false + if _value: + flags_no_booleans.append("--" + param) + else: + flags_no_booleans.append("--" + _flag) + flags_dict[param] = _value + return { + "args": flags, + "args_no_equals": args_no_equals, + "args_no_hyphens": flags_no_hyphens, + "args_no_boolean_flags": flags_no_booleans, + "args_json": [json.dumps(flags_dict)], + "args_dict": flags_dict, + } + + +def make_launch_sweep_entrypoint( + args: Dict[str, Any], command: Optional[List[str]] +) -> Tuple[Optional[List[str]], Any]: + """Use args dict from create_sweep_command_args to construct entrypoint. + + If replace is True, remove macros from entrypoint, fill them in with args + and then return the args in separate return value. + """ + if not command: + return None, None + + entry_point = create_sweep_command(command) + macro_args = {} + for macro in args: + mstr = "${" + macro + "}" + if mstr in entry_point: + idx = entry_point.index(mstr) + # only supports 1 macro per entrypoint + macro_args = args[macro] + entry_point = entry_point[:idx] + entry_point[idx + 1 :] + + if len(entry_point) == 0: + return None, macro_args + + return entry_point, macro_args + + +def check_job_exists(public_api: "PublicApi", job: Optional[str]) -> bool: + """Check if the job exists using the public api. + + Returns: True if no job is passed, or if the job exists. + Returns: False if the job is misformatted or doesn't exist. + """ + if not job: + return True + + try: + public_api.job(job) + except Exception as e: + wandb.termerror(f"Failed to load job. {e}") + return False + return True + + +def get_previous_args( + run_spec: Dict[str, Any], +) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """Parse through previous scheduler run_spec. + + returns scheduler_args and settings. + """ + scheduler_args = ( + run_spec.get("overrides", {}).get("run_config", {}).get("scheduler", {}) + ) + # also pipe through top level resource setup + if run_spec.get("resource"): + scheduler_args["resource"] = run_spec["resource"] + if run_spec.get("resource_args"): + scheduler_args["resource_args"] = run_spec["resource_args"] + + settings = run_spec.get("overrides", {}).get("run_config", {}).get("settings", {}) + + return scheduler_args, settings diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/utils.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..163d2fb8cc6edd30d7c27ec910e7f1850c0b0fa4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/utils.py @@ -0,0 +1,746 @@ +import asyncio +import json +import logging +import os +import platform +import re +import subprocess +import sys +from collections import defaultdict +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast + +import click + +import wandb +import wandb.docker as docker +from wandb import util +from wandb.apis.internal import Api +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.git_reference import GitReference +from wandb.sdk.launch.wandb_reference import WandbReference +from wandb.sdk.wandb_config import Config + +from .builder.templates._wandb_bootstrap import ( + FAILED_PACKAGES_POSTFIX, + FAILED_PACKAGES_PREFIX, +) + +FAILED_PACKAGES_REGEX = re.compile( + f"{re.escape(FAILED_PACKAGES_PREFIX)}(.*){re.escape(FAILED_PACKAGES_POSTFIX)}" +) + +if TYPE_CHECKING: # pragma: no cover + from wandb.sdk.launch.agent.job_status_tracker import JobAndRunStatusTracker + + +# TODO: this should be restricted to just Git repos and not S3 and stuff like that +_GIT_URI_REGEX = re.compile( + r"^[^/|^~|^\.].*(git|bitbucket|dev\.azure\.com|\.visualstudio\.com)" +) +_VALID_IP_REGEX = r"^https?://[0-9]+(?:\.[0-9]+){3}(:[0-9]+)?" +_VALID_PIP_PACKAGE_REGEX = r"^[a-zA-Z0-9_.-]+$" +_VALID_WANDB_REGEX = r"^https?://(api.)?wandb" +_WANDB_URI_REGEX = re.compile(r"|".join([_VALID_WANDB_REGEX, _VALID_IP_REGEX])) +_WANDB_QA_URI_REGEX = re.compile( + r"^https?://ap\w.qa.wandb" +) # for testing, not sure if we wanna keep this +_WANDB_DEV_URI_REGEX = re.compile( + r"^https?://ap\w.wandb.test" +) # for testing, not sure if we wanna keep this +_WANDB_LOCAL_DEV_URI_REGEX = re.compile( + r"^https?://localhost" +) # for testing, not sure if we wanna keep this + +API_KEY_REGEX = r"WANDB_API_KEY=\w+(-\w+)?" + +MACRO_REGEX = re.compile(r"\$\{(\w+)\}") + +AZURE_CONTAINER_REGISTRY_URI_REGEX = re.compile( + r"^(?:https://)?([\w]+)\.azurecr\.io/(?P[\w\-]+):?(?P.*)" +) + +ELASTIC_CONTAINER_REGISTRY_URI_REGEX = re.compile( + r"^(?:https://)?(?P[\w-]+)\.dkr\.ecr\.(?P[\w-]+)\.amazonaws\.com/(?P[\.\/\w-]+):?(?P.*)$" +) + +GCP_ARTIFACT_REGISTRY_URI_REGEX = re.compile( + r"^(?:https://)?(?P[\w-]+)-docker\.pkg\.dev/(?P[\w-]+)/(?P[\w-]+)/?(?P[\w-]+)?(?P:.*)?$", + re.IGNORECASE, +) + +S3_URI_RE = re.compile(r"s3://([^/]+)(/(.*))?") +GCS_URI_RE = re.compile(r"gs://([^/]+)(?:/(.*))?") +AZURE_BLOB_REGEX = re.compile( + r"^https://([^\.]+)\.blob\.core\.windows\.net/([^/]+)/?(.*)$" +) + +ARN_PARTITION_RE = re.compile(r"^arn:([^:]+):[^:]*:[^:]*:[^:]*:[^:]*$") + +PROJECT_SYNCHRONOUS = "SYNCHRONOUS" + +LAUNCH_CONFIG_FILE = "~/.config/wandb/launch-config.yaml" +LAUNCH_DEFAULT_PROJECT = "model-registry" + +_logger = logging.getLogger(__name__) +LOG_PREFIX = f"{click.style('launch:', fg='magenta')} " + +MAX_ENV_LENGTHS: Dict[str, int] = defaultdict(lambda: 32670) +MAX_ENV_LENGTHS["SageMakerRunner"] = 512 + +CODE_MOUNT_DIR = "/mnt/wandb" + + +def load_wandb_config() -> Config: + """Load wandb config from WANDB_CONFIG environment variable(s). + + The WANDB_CONFIG environment variable is a json string that can contain + multiple config keys. The WANDB_CONFIG_[0-9]+ environment variables are + used for environments where there is a limit on the length of environment + variables. In that case, we shard the contents of WANDB_CONFIG into + multiple environment variables numbered from 0. + + Returns: + A dictionary of wandb config values. + """ + config_str = os.environ.get("WANDB_CONFIG") + if config_str is None: + config_str = "" + idx = 0 + while True: + chunk = os.environ.get(f"WANDB_CONFIG_{idx}") + if chunk is None: + break + config_str += chunk + idx += 1 + if idx < 1: + raise LaunchError( + "No WANDB_CONFIG or WANDB_CONFIG_[0-9]+ environment variables found" + ) + wandb_config = Config() + try: + env_config = json.loads(config_str) + except json.JSONDecodeError as e: + raise LaunchError(f"Failed to parse WANDB_CONFIG: {e}") from e + + wandb_config.update(env_config) + return wandb_config + + +def event_loop_thread_exec(func: Any) -> Any: + """Wrapper for running any function in an awaitable thread on an event loop. + + Example usage: + ``` + def my_func(arg1, arg2): + return arg1 + arg2 + + future = event_loop_thread_exec(my_func)(2, 2) + assert await future == 4 + ``` + + The returned function must be called within an active event loop. + """ + + async def wrapper(*args: Any, **kwargs: Any) -> Any: + loop = asyncio.get_event_loop() + result = cast( + Any, await loop.run_in_executor(None, lambda: func(*args, **kwargs)) + ) + return result + + return wrapper + + +def _is_wandb_uri(uri: str) -> bool: + return ( + _WANDB_URI_REGEX.match(uri) + or _WANDB_DEV_URI_REGEX.match(uri) + or _WANDB_LOCAL_DEV_URI_REGEX.match(uri) + or _WANDB_QA_URI_REGEX.match(uri) + ) is not None + + +def _is_wandb_dev_uri(uri: str) -> bool: + return bool(_WANDB_DEV_URI_REGEX.match(uri)) + + +def _is_wandb_local_uri(uri: str) -> bool: + return bool(_WANDB_LOCAL_DEV_URI_REGEX.match(uri)) + + +def _is_git_uri(uri: str) -> bool: + return bool(_GIT_URI_REGEX.match(uri)) + + +def sanitize_wandb_api_key(s: str) -> str: + return str(re.sub(API_KEY_REGEX, "WANDB_API_KEY", s)) + + +def get_project_from_job(job: str) -> Optional[str]: + job_parts = job.split("/") + if len(job_parts) == 3: + return job_parts[1] + return None + + +def set_project_entity_defaults( + uri: Optional[str], + job: Optional[str], + api: Api, + project: Optional[str], + entity: Optional[str], + launch_config: Optional[Dict[str, Any]], +) -> Tuple[Optional[str], str]: + # set the target project and entity if not provided + source_uri = None + if uri is not None: + if _is_wandb_uri(uri): + _, source_uri, _ = parse_wandb_uri(uri) + elif _is_git_uri(uri): + source_uri = os.path.splitext(os.path.basename(uri))[0] + elif job is not None: + source_uri = get_project_from_job(job) + if project is None: + config_project = None + if launch_config: + config_project = launch_config.get("project") + project = config_project or source_uri or "" + if entity is None: + entity = get_default_entity(api, launch_config) + prefix = "" + if platform.system() != "Windows" and sys.stdout.encoding == "UTF-8": + prefix = "🚀 " + wandb.termlog( + f"{LOG_PREFIX}{prefix}Launching run into {entity}{'/' + project if project else ''}" + ) + return project, entity + + +def get_default_entity(api: Api, launch_config: Optional[Dict[str, Any]]): + config_entity = None + if launch_config: + config_entity = launch_config.get("entity") + return config_entity or api.default_entity + + +def strip_resource_args_and_template_vars(launch_spec: Dict[str, Any]) -> None: + if launch_spec.get("resource_args", None) and launch_spec.get( + "template_variables", None + ): + wandb.termwarn( + "Launch spec contains both resource_args and template_variables, " + "only one can be set. Using template_variables." + ) + launch_spec.pop("resource_args") + + +def construct_launch_spec( + uri: Optional[str], + job: Optional[str], + api: Api, + name: Optional[str], + project: Optional[str], + entity: Optional[str], + docker_image: Optional[str], + resource: Optional[str], + entry_point: Optional[List[str]], + version: Optional[str], + resource_args: Optional[Dict[str, Any]], + launch_config: Optional[Dict[str, Any]], + run_id: Optional[str], + repository: Optional[str], + author: Optional[str], + sweep_id: Optional[str] = None, +) -> Dict[str, Any]: + """Construct the launch specification from CLI arguments.""" + # override base config (if supplied) with supplied args + launch_spec = launch_config if launch_config is not None else {} + if uri is not None: + launch_spec["uri"] = uri + if job is not None: + launch_spec["job"] = job + project, entity = set_project_entity_defaults( + uri, + job, + api, + project, + entity, + launch_config, + ) + launch_spec["entity"] = entity + if author: + launch_spec["author"] = author + + launch_spec["project"] = project + if name: + launch_spec["name"] = name + if "docker" not in launch_spec: + launch_spec["docker"] = {} + if docker_image: + launch_spec["docker"]["docker_image"] = docker_image + if sweep_id: # all runs in a sweep have this set + launch_spec["sweep_id"] = sweep_id + + if "resource" not in launch_spec: + launch_spec["resource"] = resource if resource else None + + if "git" not in launch_spec: + launch_spec["git"] = {} + if version: + launch_spec["git"]["version"] = version + + if "overrides" not in launch_spec: + launch_spec["overrides"] = {} + + if not isinstance(launch_spec["overrides"].get("args", []), list): + raise LaunchError("override args must be a list of strings") + + if resource_args: + launch_spec["resource_args"] = resource_args + + if entry_point: + launch_spec["overrides"]["entry_point"] = entry_point + + if run_id is not None: + launch_spec["run_id"] = run_id + + if repository: + launch_config = launch_config or {} + if launch_config.get("registry"): + launch_config["registry"]["url"] = repository + else: + launch_config["registry"] = {"url": repository} + + # dont send both resource args and template variables + strip_resource_args_and_template_vars(launch_spec) + + return launch_spec + + +def validate_launch_spec_source(launch_spec: Dict[str, Any]) -> None: + job = launch_spec.get("job") + docker_image = launch_spec.get("docker", {}).get("docker_image") + if bool(job) == bool(docker_image): + raise LaunchError( + "Exactly one of job or docker_image must be specified in the launch " + "spec." + ) + + +def parse_wandb_uri(uri: str) -> Tuple[str, str, str]: + """Parse wandb uri to retrieve entity, project and run name.""" + ref = WandbReference.parse(uri) + if not ref or not ref.entity or not ref.project or not ref.run_id: + raise LaunchError(f"Trouble parsing wandb uri {uri}") + return (ref.entity, ref.project, ref.run_id) + + +def get_local_python_deps( + dir: str, filename: str = "requirements.local.txt" +) -> Optional[str]: + try: + env = os.environ + with open(os.path.join(dir, filename), "w") as f: + subprocess.call(["pip", "freeze"], env=env, stdout=f) + return filename + except subprocess.CalledProcessError as e: + wandb.termerror(f"Command failed: {e}") + return None + + +def diff_pip_requirements(req_1: List[str], req_2: List[str]) -> Dict[str, str]: + """Return a list of pip requirements that are not in req_1 but are in req_2.""" + + def _parse_req(req: List[str]) -> Dict[str, str]: + # TODO: This can be made more exhaustive, but for 99% of cases this is fine + # see https://pip.pypa.io/en/stable/reference/requirements-file-format/#example + d: Dict[str, str] = dict() + for line in req: + _name: str = None # type: ignore + _version: str = None # type: ignore + if line.startswith("#"): # Ignore comments + continue + elif "git+" in line or "hg+" in line: + _name = line.split("#egg=")[1] + _version = line.split("@")[-1].split("#")[0] + elif "==" in line: + _s = line.split("==") + _name = _s[0].lower() + _version = _s[1].split("#")[0].strip() + elif ">=" in line: + _s = line.split(">=") + _name = _s[0].lower() + _version = _s[1].split("#")[0].strip() + elif ">" in line: + _s = line.split(">") + _name = _s[0].lower() + _version = _s[1].split("#")[0].strip() + elif re.match(_VALID_PIP_PACKAGE_REGEX, line) is not None: + _name = line + else: + raise ValueError(f"Unable to parse pip requirements file line: {line}") + if _name is not None: + assert re.match( + _VALID_PIP_PACKAGE_REGEX, _name + ), f"Invalid pip package name {_name}" + d[_name] = _version + return d + + # Use symmetric difference between dict representation to print errors + try: + req_1_dict: Dict[str, str] = _parse_req(req_1) + req_2_dict: Dict[str, str] = _parse_req(req_2) + except (AssertionError, ValueError, IndexError, KeyError) as e: + raise LaunchError(f"Failed to parse pip requirements: {e}") + diff: List[Tuple[str, str]] = [] + for item in set(req_1_dict.items()) ^ set(req_2_dict.items()): + diff.append(item) + # Parse through the diff to make it pretty + pretty_diff: Dict[str, str] = {} + for name, version in diff: + if pretty_diff.get(name) is None: + pretty_diff[name] = version + else: + pretty_diff[name] = f"v{version} and v{pretty_diff[name]}" + return pretty_diff + + +def validate_wandb_python_deps( + requirements_file: Optional[str], + dir: str, +) -> None: + """Warn if local python dependencies differ from wandb requirements.txt.""" + if requirements_file is not None: + requirements_path = os.path.join(dir, requirements_file) + with open(requirements_path) as f: + wandb_python_deps: List[str] = f.read().splitlines() + + local_python_file = get_local_python_deps(dir) + if local_python_file is not None: + local_python_deps_path = os.path.join(dir, local_python_file) + with open(local_python_deps_path) as f: + local_python_deps: List[str] = f.read().splitlines() + + diff_pip_requirements(wandb_python_deps, local_python_deps) + return + _logger.warning("Unable to validate local python dependencies") + + +def apply_patch(patch_string: str, dst_dir: str) -> None: + """Applies a patch file to a directory.""" + _logger.info("Applying diff.patch") + with open(os.path.join(dst_dir, "diff.patch"), "w") as fp: + fp.write(patch_string) + try: + subprocess.check_call( + [ + "patch", + "-s", + f"--directory={dst_dir}", + "-p1", + "-i", + "diff.patch", + ] + ) + except subprocess.CalledProcessError: + raise wandb.Error("Failed to apply diff.patch associated with run.") + + +def _fetch_git_repo(dst_dir: str, uri: str, version: Optional[str]) -> Optional[str]: + """Clones the git repo at ``uri`` into ``dst_dir``. + + checks out commit ``version``. Assumes authentication parameters are + specified by the environment, e.g. by a Git credential helper. + """ + # We defer importing git until the last moment, because the import requires that the git + # executable is available on the PATH, so we only want to fail if we actually need it. + + _logger.info("Fetching git repo") + ref = GitReference(uri, version) + if ref is None: + raise LaunchError(f"Unable to parse git uri: {uri}") + ref.fetch(dst_dir) + if version is None: + version = ref.ref + return version + + +def convert_jupyter_notebook_to_script(fname: str, project_dir: str) -> str: + nbconvert = wandb.util.get_module( + "nbconvert", "nbformat and nbconvert are required to use launch with notebooks" + ) + nbformat = wandb.util.get_module( + "nbformat", "nbformat and nbconvert are required to use launch with notebooks" + ) + + _logger.info("Converting notebook to script") + new_name = fname.replace(".ipynb", ".py") + with open(os.path.join(project_dir, fname)) as fh: + nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT) + for cell in nb.cells: + if cell.cell_type == "code": + source_lines = cell.source.split("\n") + modified_lines = [] + for line in source_lines: + if not line.startswith("!"): + modified_lines.append(line) + cell.source = "\n".join(modified_lines) + + exporter = nbconvert.PythonExporter() + source, meta = exporter.from_notebook_node(nb) + + with open(os.path.join(project_dir, new_name), "w+") as fh: + fh.writelines(source) + return new_name + + +def to_camel_case(maybe_snake_str: str) -> str: + if "_" not in maybe_snake_str: + return maybe_snake_str + components = maybe_snake_str.split("_") + return "".join(x.title() if x else "_" for x in components) + + +def validate_build_and_registry_configs( + build_config: Dict[str, Any], registry_config: Dict[str, Any] +) -> None: + build_config_credentials = build_config.get("credentials", {}) + registry_config_credentials = registry_config.get("credentials", {}) + if ( + build_config_credentials + and registry_config_credentials + and build_config_credentials != registry_config_credentials + ): + raise LaunchError("registry and build config credential mismatch") + + +async def get_kube_context_and_api_client( + kubernetes: Any, + resource_args: Dict[str, Any], +) -> Tuple[Any, Any]: + config_file = resource_args.get("configFile", None) + context = None + if config_file is not None or os.path.exists(os.path.expanduser("~/.kube/config")): + # context only exist in the non-incluster case + ( + all_contexts, + active_context, + ) = kubernetes.config.list_kube_config_contexts(config_file) + context = None + if resource_args.get("context"): + context_name = resource_args["context"] + for c in all_contexts: + if c["name"] == context_name: + context = c + break + raise LaunchError(f"Specified context {context_name} was not found.") + else: + context = active_context + # TODO: We should not really be performing this check if the user is not + # using EKS but I don't see an obvious way to make an eks specific code path + # right here. + util.get_module( + "awscli", + "awscli is required to load a kubernetes context " + "from eks. Please run `pip install wandb[launch]` to install it.", + ) + await kubernetes.config.load_kube_config(config_file, context["name"]) + api_client = await kubernetes.config.new_client_from_config( + config_file, context=context["name"] + ) + return context, api_client + else: + kubernetes.config.load_incluster_config() + api_client = kubernetes.client.api_client.ApiClient() + return context, api_client + + +def resolve_build_and_registry_config( + default_launch_config: Optional[Dict[str, Any]], + build_config: Optional[Dict[str, Any]], + registry_config: Optional[Dict[str, Any]], +) -> Tuple[Dict[str, Any], Dict[str, Any]]: + resolved_build_config: Dict[str, Any] = {} + if build_config is None and default_launch_config is not None: + resolved_build_config = default_launch_config.get("builder", {}) + elif build_config is not None: + resolved_build_config = build_config + resolved_registry_config: Dict[str, Any] = {} + if registry_config is None and default_launch_config is not None: + resolved_registry_config = default_launch_config.get("registry", {}) + elif registry_config is not None: + resolved_registry_config = registry_config + validate_build_and_registry_configs(resolved_build_config, resolved_registry_config) + return resolved_build_config, resolved_registry_config + + +def check_logged_in(api: Api) -> bool: + """Check if a user is logged in. + + Raises an error if the viewer doesn't load (likely a broken API key). Expected time + cost is 0.1-0.2 seconds. + """ + res = api.api.viewer() + if not res: + raise LaunchError( + "Could not connect with current API-key. " + "Please relogin using `wandb login --relogin`" + " and try again (see `wandb login --help` for more options)" + ) + + return True + + +def make_name_dns_safe(name: str) -> str: + resp = name.replace("_", "-").lower() + resp = re.sub(r"[^a-z\.\-]", "", resp) + # Actual length limit is 253, but we want to leave room for the generated suffix + resp = resp[:200] + return resp + + +def warn_failed_packages_from_build_logs( + log: str, image_uri: str, api: Api, job_tracker: Optional["JobAndRunStatusTracker"] +) -> None: + match = FAILED_PACKAGES_REGEX.search(log) + if match: + _msg = f"Failed to install the following packages: {match.group(1)} for image: {image_uri}. Will attempt to launch image without them." + wandb.termwarn(_msg) + if job_tracker is not None: + res = job_tracker.saver.save_contents( + _msg, "failed-packages.log", "warning" + ) + api.update_run_queue_item_warning( + job_tracker.run_queue_item_id, + "Some packages were not successfully installed during the build", + "build", + res, + ) + + +def docker_image_exists(docker_image: str, should_raise: bool = False) -> bool: + """Check if a specific image is already available. + + Optionally raises an exception if the image is not found. + """ + _logger.info("Checking if base image exists...") + try: + docker.run(["docker", "image", "inspect", docker_image]) + return True + except (docker.DockerError, ValueError) as e: + if should_raise: + raise e + _logger.info("Base image not found. Generating new base image") + return False + + +def pull_docker_image(docker_image: str) -> None: + """Pull the requested docker image.""" + try: + docker.run(["docker", "pull", docker_image]) + except docker.DockerError as e: + raise LaunchError(f"Docker server returned error: {e}") + + +def macro_sub(original: str, sub_dict: Dict[str, Optional[str]]) -> str: + """Substitute macros in a string. + + Macros occur in the string in the ${macro} format. The macro names are + substituted with their values from the given dictionary. If a macro + is not found in the dictionary, it is left unchanged. + + Args: + original: The string to substitute macros in. + sub_dict: A dictionary mapping macro names to their values. + + Returns: + The string with the macros substituted. + """ + return MACRO_REGEX.sub( + lambda match: str(sub_dict.get(match.group(1), match.group(0))), original + ) + + +def recursive_macro_sub(source: Any, sub_dict: Dict[str, Optional[str]]) -> Any: + """Recursively substitute macros in a parsed JSON or YAML blob. + + Macros occur in strings at leaves of the blob in the ${macro} format. + The macro names are substituted with their values from the given dictionary. + If a macro is not found in the dictionary, it is left unchanged. + + Arguments: + source: The JSON or YAML blob to substitute macros in. + sub_dict: A dictionary mapping macro names to their values. + + Returns: + The blob with the macros substituted. + """ + if isinstance(source, str): + return macro_sub(source, sub_dict) + elif isinstance(source, list): + return [recursive_macro_sub(item, sub_dict) for item in source] + elif isinstance(source, dict): + return { + key: recursive_macro_sub(value, sub_dict) for key, value in source.items() + } + else: + return source + + +def fetch_and_validate_template_variables( + runqueue: Any, fields: dict +) -> Dict[str, Any]: + template_variables = {} + + variable_schemas = {} + for tv in runqueue.template_variables: + variable_schemas[tv["name"]] = json.loads(tv["schema"]) + + for field in fields: + field_parts = field.split("=") + if len(field_parts) != 2: + raise LaunchError( + f'--set-var value must be in the format "--set-var key1=value1", instead got: {field}' + ) + key, val = field_parts + if key not in variable_schemas: + raise LaunchError( + f"Queue {runqueue.name} does not support overriding {key}." + ) + schema = variable_schemas.get(key, {}) + field_type = schema.get("type") + try: + if field_type == "integer": + val = int(val) + elif field_type == "number": + val = float(val) + + except ValueError: + raise LaunchError(f"Value for {key} must be of type {field_type}.") + template_variables[key] = val + return template_variables + + +def get_entrypoint_file(entrypoint: List[str]) -> Optional[str]: + """Get the entrypoint file from the given command. + + Args: + entrypoint (List[str]): List of command and arguments. + + Returns: + Optional[str]: The entrypoint file if found, otherwise None. + """ + if not entrypoint: + return None + if entrypoint[0].endswith(".py") or entrypoint[0].endswith(".sh"): + return entrypoint[0] + if len(entrypoint) < 2: + return None + return entrypoint[1] + + +def get_current_python_version() -> Tuple[str, str]: + full_version = sys.version.split()[0].split(".") + major = full_version[0] + version = ".".join(full_version[:2]) if len(full_version) >= 2 else major + ".0" + return version, major diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/wandb_reference.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/wandb_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..5de34c04bf3aa77068da19cb6b4af8f1e163709d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/wandb_reference.py @@ -0,0 +1,138 @@ +"""Support for parsing W&B URLs (which might be user provided) into constituent parts.""" + +from dataclasses import dataclass +from enum import IntEnum +from typing import Optional +from urllib.parse import urlparse + +PREFIX_HTTP = "http://" +PREFIX_HTTPS = "https://" + + +class ReferenceType(IntEnum): + RUN = 1 + JOB = 2 + + +# Ideally we would not overload the URL paths as we do. +# TODO: Not sure these are exhaustive, and even if so more special paths might get added. +# Would be good to have restrictions that we could check. +RESERVED_NON_ENTITIES = ( + "create-team", + "fully-connected", + "registry", + "settings", + "subscriptions", +) +RESERVED_NON_PROJECTS = ( + "likes", + "projects", +) +RESERVED_JOB_PATHS = ("_view",) + + +@dataclass +class WandbReference: + # TODO: This will include port, should we separate that out? + host: Optional[str] = None + + entity: Optional[str] = None + project: Optional[str] = None + + # Set when we don't know how to parse yet + path: Optional[str] = None + + # Reference type will determine what other fields are set + ref_type: Optional[ReferenceType] = None + + run_id: Optional[str] = None + + job_name: Optional[str] = None + job_alias: str = "latest" # In addition to an alias can be a version specifier + + def is_bare(self) -> bool: + return self.host is None + + def is_job(self) -> bool: + return self.ref_type == ReferenceType.JOB + + def is_run(self) -> bool: + return self.ref_type == ReferenceType.RUN + + def is_job_or_run(self) -> bool: + return self.is_job() or self.is_run() + + def job_reference(self) -> str: + assert self.is_job() + return f"{self.job_name}:{self.job_alias}" + + def job_reference_scoped(self) -> str: + assert self.entity + assert self.project + unscoped = self.job_reference() + return f"{self.entity}/{self.project}/{unscoped}" + + def url_host(self) -> str: + return f"{PREFIX_HTTPS}{self.host}" if self.host else "" + + def url_entity(self) -> str: + assert self.entity + return f"{self.url_host()}/{self.entity}" + + def url_project(self) -> str: + assert self.project + return f"{self.url_entity()}/{self.project}" + + @staticmethod + def parse(uri: str) -> Optional["WandbReference"]: + """Attempt to parse a string as a W&B URL.""" + # TODO: Error if HTTP and host is not localhost? + if ( + not uri.startswith("/") + and not uri.startswith(PREFIX_HTTP) + and not uri.startswith(PREFIX_HTTPS) + ): + return None + + ref = WandbReference() + + # This takes care of things like query and fragment + parsed = urlparse(uri) + if parsed.netloc: + ref.host = parsed.netloc + + if not parsed.path.startswith("/"): + return ref + + ref.path = parsed.path[1:] + parts = ref.path.split("/") + if len(parts) > 0: + if parts[0] not in RESERVED_NON_ENTITIES: + ref.path = None + ref.entity = parts[0] + if len(parts) > 1: + if parts[1] not in RESERVED_NON_PROJECTS: + ref.project = parts[1] + if len(parts) > 3 and parts[2] == "runs": + ref.ref_type = ReferenceType.RUN + ref.run_id = parts[3] + elif ( + len(parts) > 4 + and parts[2] == "artifacts" + and parts[3] == "job" + ): + ref.ref_type = ReferenceType.JOB + ref.job_name = parts[4] + if len(parts) > 5 and parts[5] not in RESERVED_JOB_PATHS: + ref.job_alias = parts[5] + # TODO: Right now we are not tracking selection as part of URL state in the Jobs tab. + # If that changes we'll want to update this. + + return ref + + @staticmethod + def is_uri_job_or_run(uri: str) -> bool: + ref = WandbReference.parse(uri) + if ref and ref.is_job_or_run(): + return True + return False