diff --git a/.gitattributes b/.gitattributes index 9c2716bf9011563311d23d1b459c9a58c2f53cba..0dc8816dcffb5ff9e5fbfad4fd4af0cc80ab2de8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -142,3 +142,6 @@ parrot/lib/python3.10/site-packages/wandb/vendor/pynvml/__pycache__/pynvml.cpyth parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-BoldOblique.ttf filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-Bold.ttf filter=lfs diff=lfs merge=lfs -text +parrot/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/libreadline.a b/parrot/lib/libreadline.a new file mode 100644 index 0000000000000000000000000000000000000000..ddfa236a3a6b4ac6383a3befb5ea24fbd7edaa47 --- /dev/null +++ b/parrot/lib/libreadline.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34edb0aaf24f86fa37e869bb46389534179d560e141a744b15d854497148663a +size 749782 diff --git a/parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..778975be26ceb9475f8aead9c723fcc470e6337c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5436569093dd69cf0f00b018a9189d08fb2ddd45b65049de429719f1540fa777 +size 459376 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dcb96461cf63b7032f5e23874560a16461bb6d6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/constants.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f34ba9b49709c5efb5ed730fc39ce7c26ed94b28 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/constants.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c2cb0b5097509a4c3da8055c7fcf278fa174b4d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_queue.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_queue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd0ec42a0f3a53b5dc8da7d7f1d2b5c89bcb2963 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_queue.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_relay.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_relay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e73dfe3b94cb5a97924f7efa26d6741dc43d06d0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_relay.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_shared.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_shared.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aa5a05bfbfaf26a3eb50d98453d490d3014237d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_shared.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_sock.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_sock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e64b8cc81c8ee5750bab1855236cc9b0717c9c0d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/interface_sock.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1095180f3c36efded55f1e893054564c0398c78 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future_poll.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future_poll.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3b3c396f01d9510966fc73cf7354c18f7d9252c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/message_future_poll.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e296547f7db76914598771438964daef07dca11 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_queue.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_queue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5fb55ec0ee8a46789504e042002d0f2f8d4d4b7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_queue.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_relay.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_relay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85273018f60be54e4c20bc49b004dd0297c4fcc0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_relay.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_sock.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_sock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..232299f2b5ff3cdc4f5d7524ae6e7650c92ab7ca Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/router_sock.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/summary_record.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/summary_record.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2a5bbc0ae6e19870c60785ad8debbf3282adb42 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/__pycache__/summary_record.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/constants.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..09fe1e26f0ce72e23df8b627ff15e332f6ebc4e6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/constants.py @@ -0,0 +1,4 @@ +# +NOTIFY_PROCESS = 1 +NOTIFY_SHUTDOWN = 2 +NOTIFY_REQUEST = 3 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..aa93cfe0c1724f0b4e03b344c58153db2bd9b177 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface.py @@ -0,0 +1,996 @@ +"""Interface base class - Used to send messages to the internal process. + +InterfaceBase: The abstract class +InterfaceShared: Common routines for socket and queue based implementations +InterfaceQueue: Use multiprocessing queues to send and receive messages +InterfaceSock: Use socket to send and receive messages +InterfaceRelay: Responses are routed to a relay queue (not matching uuids) + +""" + +import gzip +import logging +import os +import sys +import time +from abc import abstractmethod +from pathlib import Path +from secrets import token_hex +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + NewType, + Optional, + Tuple, + Union, +) + +from wandb import termwarn +from wandb.proto import wandb_internal_pb2 as pb +from wandb.proto import wandb_telemetry_pb2 as tpb +from wandb.sdk.artifacts.artifact import Artifact +from wandb.sdk.artifacts.artifact_manifest import ArtifactManifest +from wandb.sdk.artifacts.staging import get_staging_dir +from wandb.sdk.lib import json_util as json +from wandb.util import ( + WandBJSONEncoderOld, + get_h5_typename, + json_dumps_safer, + json_dumps_safer_history, + json_friendly, + json_friendly_val, + maybe_compress_summary, +) + +from ..data_types.utils import history_dict_to_json, val_to_json +from ..lib.mailbox import MailboxHandle +from . import summary_record as sr +from .message_future import MessageFuture + +MANIFEST_FILE_SIZE_THRESHOLD = 100_000 + +GlobStr = NewType("GlobStr", str) + +if sys.version_info >= (3, 8): + from typing import Literal, TypedDict +else: + from typing_extensions import Literal, TypedDict + +PolicyName = Literal["now", "live", "end"] + + +class FilesDict(TypedDict): + files: Iterable[Tuple[GlobStr, PolicyName]] + + +if TYPE_CHECKING: + from ..wandb_run import Run + + +logger = logging.getLogger("wandb") + + +def file_policy_to_enum(policy: "PolicyName") -> "pb.FilesItem.PolicyType.V": + if policy == "now": + enum = pb.FilesItem.PolicyType.NOW + elif policy == "end": + enum = pb.FilesItem.PolicyType.END + elif policy == "live": + enum = pb.FilesItem.PolicyType.LIVE + return enum + + +def file_enum_to_policy(enum: "pb.FilesItem.PolicyType.V") -> "PolicyName": + if enum == pb.FilesItem.PolicyType.NOW: + policy: PolicyName = "now" + elif enum == pb.FilesItem.PolicyType.END: + policy = "end" + elif enum == pb.FilesItem.PolicyType.LIVE: + policy = "live" + return policy + + +class InterfaceBase: + _run: Optional["Run"] + _drop: bool + + def __init__(self) -> None: + self._run = None + self._drop = False + + def _hack_set_run(self, run: "Run") -> None: + self._run = run + current_pid = os.getpid() + self._run._set_iface_pid(current_pid) + + def publish_header(self) -> None: + header = pb.HeaderRecord() + self._publish_header(header) + + @abstractmethod + def _publish_header(self, header: pb.HeaderRecord) -> None: + raise NotImplementedError + + def deliver_status(self) -> MailboxHandle: + return self._deliver_status(pb.StatusRequest()) + + @abstractmethod + def _deliver_status( + self, + status: pb.StatusRequest, + ) -> MailboxHandle: + raise NotImplementedError + + def _make_config( + self, + data: Optional[dict] = None, + key: Optional[Union[Tuple[str, ...], str]] = None, + val: Optional[Any] = None, + obj: Optional[pb.ConfigRecord] = None, + ) -> pb.ConfigRecord: + config = obj or pb.ConfigRecord() + if data: + for k, v in data.items(): + update = config.update.add() + update.key = k + update.value_json = json_dumps_safer(json_friendly(v)[0]) + if key: + update = config.update.add() + if isinstance(key, tuple): + for k in key: + update.nested_key.append(k) + else: + update.key = key + update.value_json = json_dumps_safer(json_friendly(val)[0]) + return config + + def _make_run(self, run: "Run") -> pb.RunRecord: + proto_run = pb.RunRecord() + run._make_proto_run(proto_run) + if run._settings.host: + proto_run.host = run._settings.host + if run._config is not None: + config_dict = run._config._as_dict() # type: ignore + self._make_config(data=config_dict, obj=proto_run.config) + if run._telemetry_obj: + proto_run.telemetry.MergeFrom(run._telemetry_obj) + return proto_run + + def publish_run(self, run: "Run") -> None: + run_record = self._make_run(run) + self._publish_run(run_record) + + @abstractmethod + def _publish_run(self, run: pb.RunRecord) -> None: + raise NotImplementedError + + def publish_cancel(self, cancel_slot: str) -> None: + cancel = pb.CancelRequest(cancel_slot=cancel_slot) + self._publish_cancel(cancel) + + @abstractmethod + def _publish_cancel(self, cancel: pb.CancelRequest) -> None: + raise NotImplementedError + + def publish_config( + self, + data: Optional[dict] = None, + key: Optional[Union[Tuple[str, ...], str]] = None, + val: Optional[Any] = None, + ) -> None: + cfg = self._make_config(data=data, key=key, val=val) + + self._publish_config(cfg) + + @abstractmethod + def _publish_config(self, cfg: pb.ConfigRecord) -> None: + raise NotImplementedError + + @abstractmethod + def _publish_metric(self, metric: pb.MetricRecord) -> None: + raise NotImplementedError + + def _make_summary_from_dict(self, summary_dict: dict) -> pb.SummaryRecord: + summary = pb.SummaryRecord() + for k, v in summary_dict.items(): + update = summary.update.add() + update.key = k + update.value_json = json.dumps(v) + return summary + + def _summary_encode(self, value: Any, path_from_root: str) -> dict: + """Normalize, compress, and encode sub-objects for backend storage. + + value: Object to encode. + path_from_root: `str` dot separated string from the top-level summary to the + current `value`. + + Returns: + A new tree of dict's with large objects replaced with dictionaries + with "_type" entries that say which type the original data was. + """ + # Constructs a new `dict` tree in `json_value` that discards and/or + # encodes objects that aren't JSON serializable. + + if isinstance(value, dict): + json_value = {} + for key, value in value.items(): # noqa: B020 + json_value[key] = self._summary_encode( + value, path_from_root + "." + key + ) + return json_value + else: + friendly_value, converted = json_friendly( + val_to_json(self._run, path_from_root, value, namespace="summary") + ) + json_value, compressed = maybe_compress_summary( + friendly_value, get_h5_typename(value) + ) + if compressed: + # TODO(jhr): impleement me + pass + # self.write_h5(path_from_root, friendly_value) + + return json_value + + def _make_summary(self, summary_record: sr.SummaryRecord) -> pb.SummaryRecord: + pb_summary_record = pb.SummaryRecord() + + for item in summary_record.update: + pb_summary_item = pb_summary_record.update.add() + key_length = len(item.key) + + assert key_length > 0 + + if key_length > 1: + pb_summary_item.nested_key.extend(item.key) + else: + pb_summary_item.key = item.key[0] + + path_from_root = ".".join(item.key) + json_value = self._summary_encode(item.value, path_from_root) + json_value, _ = json_friendly(json_value) # type: ignore + + pb_summary_item.value_json = json.dumps( + json_value, + cls=WandBJSONEncoderOld, + ) + + for item in summary_record.remove: + pb_summary_item = pb_summary_record.remove.add() + key_length = len(item.key) + + assert key_length > 0 + + if key_length > 1: + pb_summary_item.nested_key.extend(item.key) + else: + pb_summary_item.key = item.key[0] + + return pb_summary_record + + def publish_summary(self, summary_record: sr.SummaryRecord) -> None: + pb_summary_record = self._make_summary(summary_record) + self._publish_summary(pb_summary_record) + + @abstractmethod + def _publish_summary(self, summary: pb.SummaryRecord) -> None: + raise NotImplementedError + + def _make_files(self, files_dict: "FilesDict") -> pb.FilesRecord: + files = pb.FilesRecord() + for path, policy in files_dict["files"]: + f = files.files.add() + f.path = path + f.policy = file_policy_to_enum(policy) + return files + + def publish_files(self, files_dict: "FilesDict") -> None: + files = self._make_files(files_dict) + self._publish_files(files) + + @abstractmethod + def _publish_files(self, files: pb.FilesRecord) -> None: + raise NotImplementedError + + def publish_python_packages(self, working_set) -> None: + python_packages = pb.PythonPackagesRequest() + for pkg in working_set: + python_packages.package.add(name=pkg.key, version=pkg.version) + self._publish_python_packages(python_packages) + + @abstractmethod + def _publish_python_packages( + self, python_packages: pb.PythonPackagesRequest + ) -> None: + raise NotImplementedError + + def _make_artifact(self, artifact: "Artifact") -> pb.ArtifactRecord: + proto_artifact = pb.ArtifactRecord() + proto_artifact.type = artifact.type + proto_artifact.name = artifact.name + proto_artifact.client_id = artifact._client_id + proto_artifact.sequence_client_id = artifact._sequence_client_id + proto_artifact.digest = artifact.digest + if artifact.distributed_id: + proto_artifact.distributed_id = artifact.distributed_id + if artifact.description: + proto_artifact.description = artifact.description + if artifact.metadata: + proto_artifact.metadata = json.dumps(json_friendly_val(artifact.metadata)) + if artifact._base_id: + proto_artifact.base_id = artifact._base_id + + ttl_duration_input = artifact._ttl_duration_seconds_to_gql() + if ttl_duration_input: + proto_artifact.ttl_duration_seconds = ttl_duration_input + proto_artifact.incremental_beta1 = artifact.incremental + self._make_artifact_manifest(artifact.manifest, obj=proto_artifact.manifest) + return proto_artifact + + def _make_artifact_manifest( + self, + artifact_manifest: ArtifactManifest, + obj: Optional[pb.ArtifactManifest] = None, + ) -> pb.ArtifactManifest: + proto_manifest = obj or pb.ArtifactManifest() + proto_manifest.version = artifact_manifest.version() + proto_manifest.storage_policy = artifact_manifest.storage_policy.name() + + # Very large manifests need to be written to file to avoid protobuf size limits. + if len(artifact_manifest) > MANIFEST_FILE_SIZE_THRESHOLD: + path = self._write_artifact_manifest_file(artifact_manifest) + proto_manifest.manifest_file_path = path + return proto_manifest + + for k, v in artifact_manifest.storage_policy.config().items() or {}.items(): + cfg = proto_manifest.storage_policy_config.add() + cfg.key = k + cfg.value_json = json.dumps(v) + + for entry in sorted(artifact_manifest.entries.values(), key=lambda k: k.path): + proto_entry = proto_manifest.contents.add() + proto_entry.path = entry.path + proto_entry.digest = entry.digest + if entry.size: + proto_entry.size = entry.size + if entry.birth_artifact_id: + proto_entry.birth_artifact_id = entry.birth_artifact_id + if entry.ref: + proto_entry.ref = entry.ref + if entry.local_path: + proto_entry.local_path = entry.local_path + proto_entry.skip_cache = entry.skip_cache + for k, v in entry.extra.items(): + proto_extra = proto_entry.extra.add() + proto_extra.key = k + proto_extra.value_json = json.dumps(v) + return proto_manifest + + def _write_artifact_manifest_file(self, manifest: ArtifactManifest) -> str: + manifest_dir = Path(get_staging_dir()) / "artifact_manifests" + manifest_dir.mkdir(parents=True, exist_ok=True) + # It would be simpler to use `manifest.to_json()`, but that gets very slow for + # large manifests since it encodes the whole thing as a single JSON object. + filename = f"{time.time()}_{token_hex(8)}.manifest_contents.jl.gz" + manifest_file_path = manifest_dir / filename + with gzip.open(manifest_file_path, mode="wt", compresslevel=1) as f: + for entry in manifest.entries.values(): + f.write(f"{json.dumps(entry.to_json())}\n") + return str(manifest_file_path) + + def deliver_link_artifact( + self, + run: "Run", + artifact: "Artifact", + portfolio_name: str, + aliases: Iterable[str], + entity: Optional[str] = None, + project: Optional[str] = None, + ) -> MailboxHandle: + link_artifact = pb.LinkArtifactRequest() + if artifact.is_draft(): + link_artifact.client_id = artifact._client_id + else: + link_artifact.server_id = artifact.id if artifact.id else "" + link_artifact.portfolio_name = portfolio_name + link_artifact.portfolio_entity = entity or run.entity + link_artifact.portfolio_project = project or run.project + link_artifact.portfolio_aliases.extend(aliases) + + return self._deliver_link_artifact(link_artifact) + + @abstractmethod + def _deliver_link_artifact( + self, link_artifact: pb.LinkArtifactRequest + ) -> MailboxHandle: + raise NotImplementedError + + @staticmethod + def _make_partial_source_str( + source: Any, job_info: Dict[str, Any], metadata: Dict[str, Any] + ) -> str: + """Construct use_artifact.partial.source_info.source as str.""" + source_type = job_info.get("source_type", "").strip() + if source_type == "artifact": + info_source = job_info.get("source", {}) + source.artifact.artifact = info_source.get("artifact", "") + source.artifact.entrypoint.extend(info_source.get("entrypoint", [])) + source.artifact.notebook = info_source.get("notebook", False) + build_context = info_source.get("build_context") + if build_context: + source.artifact.build_context = build_context + dockerfile = info_source.get("dockerfile") + if dockerfile: + source.artifact.dockerfile = dockerfile + elif source_type == "repo": + source.git.git_info.remote = metadata.get("git", {}).get("remote", "") + source.git.git_info.commit = metadata.get("git", {}).get("commit", "") + source.git.entrypoint.extend(metadata.get("entrypoint", [])) + source.git.notebook = metadata.get("notebook", False) + build_context = metadata.get("build_context") + if build_context: + source.git.build_context = build_context + dockerfile = metadata.get("dockerfile") + if dockerfile: + source.git.dockerfile = dockerfile + elif source_type == "image": + source.image.image = metadata.get("docker", "") + else: + raise ValueError("Invalid source type") + + source_str: str = source.SerializeToString() + return source_str + + def _make_proto_use_artifact( + self, + use_artifact: pb.UseArtifactRecord, + job_name: str, + job_info: Dict[str, Any], + metadata: Dict[str, Any], + ) -> pb.UseArtifactRecord: + use_artifact.partial.job_name = job_name + use_artifact.partial.source_info._version = job_info.get("_version", "") + use_artifact.partial.source_info.source_type = job_info.get("source_type", "") + use_artifact.partial.source_info.runtime = job_info.get("runtime", "") + + src_str = self._make_partial_source_str( + source=use_artifact.partial.source_info.source, + job_info=job_info, + metadata=metadata, + ) + use_artifact.partial.source_info.source.ParseFromString(src_str) # type: ignore[arg-type] + + return use_artifact + + def publish_use_artifact( + self, + artifact: "Artifact", + ) -> None: + assert artifact.id is not None, "Artifact must have an id" + + use_artifact = pb.UseArtifactRecord( + id=artifact.id, + type=artifact.type, + name=artifact.name, + ) + + # TODO(gst): move to internal process + if "_partial" in artifact.metadata: + # Download source info from logged partial job artifact + job_info = {} + try: + path = artifact.get_entry("wandb-job.json").download() + with open(path) as f: + job_info = json.load(f) + + except Exception as e: + logger.warning( + f"Failed to download partial job info from artifact {artifact}, : {e}" + ) + termwarn( + f"Failed to download partial job info from artifact {artifact}, : {e}" + ) + return + + try: + use_artifact = self._make_proto_use_artifact( + use_artifact=use_artifact, + job_name=artifact.name, + job_info=job_info, + metadata=artifact.metadata, + ) + except Exception as e: + logger.warning(f"Failed to construct use artifact proto: {e}") + termwarn(f"Failed to construct use artifact proto: {e}") + return + + self._publish_use_artifact(use_artifact) + + @abstractmethod + def _publish_use_artifact(self, proto_artifact: pb.UseArtifactRecord) -> None: + raise NotImplementedError + + def communicate_artifact( + self, + run: "Run", + artifact: "Artifact", + aliases: Iterable[str], + tags: Optional[Iterable[str]] = None, + history_step: Optional[int] = None, + is_user_created: bool = False, + use_after_commit: bool = False, + finalize: bool = True, + ) -> MessageFuture: + proto_run = self._make_run(run) + proto_artifact = self._make_artifact(artifact) + proto_artifact.run_id = proto_run.run_id + proto_artifact.project = proto_run.project + proto_artifact.entity = proto_run.entity + proto_artifact.user_created = is_user_created + proto_artifact.use_after_commit = use_after_commit + proto_artifact.finalize = finalize + + proto_artifact.aliases.extend(aliases or []) + proto_artifact.tags.extend(tags or []) + + log_artifact = pb.LogArtifactRequest() + log_artifact.artifact.CopyFrom(proto_artifact) + if history_step is not None: + log_artifact.history_step = history_step + log_artifact.staging_dir = get_staging_dir() + resp = self._communicate_artifact(log_artifact) + return resp + + @abstractmethod + def _communicate_artifact( + self, log_artifact: pb.LogArtifactRequest + ) -> MessageFuture: + raise NotImplementedError + + def deliver_download_artifact( + self, + artifact_id: str, + download_root: str, + allow_missing_references: bool, + skip_cache: bool, + path_prefix: Optional[str], + ) -> MailboxHandle: + download_artifact = pb.DownloadArtifactRequest() + download_artifact.artifact_id = artifact_id + download_artifact.download_root = download_root + download_artifact.allow_missing_references = allow_missing_references + download_artifact.skip_cache = skip_cache + download_artifact.path_prefix = path_prefix or "" + resp = self._deliver_download_artifact(download_artifact) + return resp + + @abstractmethod + def _deliver_download_artifact( + self, download_artifact: pb.DownloadArtifactRequest + ) -> MailboxHandle: + raise NotImplementedError + + def publish_artifact( + self, + run: "Run", + artifact: "Artifact", + aliases: Iterable[str], + tags: Optional[Iterable[str]] = None, + is_user_created: bool = False, + use_after_commit: bool = False, + finalize: bool = True, + ) -> None: + proto_run = self._make_run(run) + proto_artifact = self._make_artifact(artifact) + proto_artifact.run_id = proto_run.run_id + proto_artifact.project = proto_run.project + proto_artifact.entity = proto_run.entity + proto_artifact.user_created = is_user_created + proto_artifact.use_after_commit = use_after_commit + proto_artifact.finalize = finalize + proto_artifact.aliases.extend(aliases or []) + proto_artifact.tags.extend(tags or []) + self._publish_artifact(proto_artifact) + + @abstractmethod + def _publish_artifact(self, proto_artifact: pb.ArtifactRecord) -> None: + raise NotImplementedError + + def publish_tbdata(self, log_dir: str, save: bool, root_logdir: str = "") -> None: + tbrecord = pb.TBRecord() + tbrecord.log_dir = log_dir + tbrecord.save = save + tbrecord.root_dir = root_logdir + self._publish_tbdata(tbrecord) + + @abstractmethod + def _publish_tbdata(self, tbrecord: pb.TBRecord) -> None: + raise NotImplementedError + + @abstractmethod + def _publish_telemetry(self, telem: tpb.TelemetryRecord) -> None: + raise NotImplementedError + + def publish_partial_history( + self, + data: dict, + user_step: int, + step: Optional[int] = None, + flush: Optional[bool] = None, + publish_step: bool = True, + run: Optional["Run"] = None, + ) -> None: + run = run or self._run + + data = history_dict_to_json(run, data, step=user_step, ignore_copy_err=True) + data.pop("_step", None) + + # add timestamp to the history request, if not already present + # the timestamp might come from the tensorboard log logic + if "_timestamp" not in data: + data["_timestamp"] = time.time() + + partial_history = pb.PartialHistoryRequest() + for k, v in data.items(): + item = partial_history.item.add() + item.key = k + item.value_json = json_dumps_safer_history(v) + + if publish_step and step is not None: + partial_history.step.num = step + if flush is not None: + partial_history.action.flush = flush + self._publish_partial_history(partial_history) + + @abstractmethod + def _publish_partial_history(self, history: pb.PartialHistoryRequest) -> None: + raise NotImplementedError + + def publish_history( + self, + data: dict, + step: Optional[int] = None, + run: Optional["Run"] = None, + publish_step: bool = True, + ) -> None: + run = run or self._run + data = history_dict_to_json(run, data, step=step) + history = pb.HistoryRecord() + if publish_step: + assert step is not None + history.step.num = step + data.pop("_step", None) + for k, v in data.items(): + item = history.item.add() + item.key = k + item.value_json = json_dumps_safer_history(v) + self._publish_history(history) + + @abstractmethod + def _publish_history(self, history: pb.HistoryRecord) -> None: + raise NotImplementedError + + def publish_preempting(self) -> None: + preempt_rec = pb.RunPreemptingRecord() + self._publish_preempting(preempt_rec) + + @abstractmethod + def _publish_preempting(self, preempt_rec: pb.RunPreemptingRecord) -> None: + raise NotImplementedError + + def publish_output(self, name: str, data: str) -> None: + # from vendor.protobuf import google3.protobuf.timestamp + # ts = timestamp.Timestamp() + # ts.GetCurrentTime() + # now = datetime.now() + if name == "stdout": + otype = pb.OutputRecord.OutputType.STDOUT + elif name == "stderr": + otype = pb.OutputRecord.OutputType.STDERR + else: + # TODO(jhr): throw error? + print("unknown type") + o = pb.OutputRecord(output_type=otype, line=data) + o.timestamp.GetCurrentTime() + self._publish_output(o) + + @abstractmethod + def _publish_output(self, outdata: pb.OutputRecord) -> None: + raise NotImplementedError + + def publish_output_raw(self, name: str, data: str) -> None: + # from vendor.protobuf import google3.protobuf.timestamp + # ts = timestamp.Timestamp() + # ts.GetCurrentTime() + # now = datetime.now() + if name == "stdout": + otype = pb.OutputRawRecord.OutputType.STDOUT + elif name == "stderr": + otype = pb.OutputRawRecord.OutputType.STDERR + else: + # TODO(jhr): throw error? + print("unknown type") + o = pb.OutputRawRecord(output_type=otype, line=data) + o.timestamp.GetCurrentTime() + self._publish_output_raw(o) + + @abstractmethod + def _publish_output_raw(self, outdata: pb.OutputRawRecord) -> None: + raise NotImplementedError + + def publish_pause(self) -> None: + pause = pb.PauseRequest() + self._publish_pause(pause) + + @abstractmethod + def _publish_pause(self, pause: pb.PauseRequest) -> None: + raise NotImplementedError + + def publish_resume(self) -> None: + resume = pb.ResumeRequest() + self._publish_resume(resume) + + @abstractmethod + def _publish_resume(self, resume: pb.ResumeRequest) -> None: + raise NotImplementedError + + def publish_alert( + self, title: str, text: str, level: str, wait_duration: int + ) -> None: + proto_alert = pb.AlertRecord() + proto_alert.title = title + proto_alert.text = text + proto_alert.level = level + proto_alert.wait_duration = wait_duration + self._publish_alert(proto_alert) + + @abstractmethod + def _publish_alert(self, alert: pb.AlertRecord) -> None: + raise NotImplementedError + + def _make_exit(self, exit_code: Optional[int]) -> pb.RunExitRecord: + exit = pb.RunExitRecord() + if exit_code is not None: + exit.exit_code = exit_code + return exit + + def publish_exit(self, exit_code: Optional[int]) -> None: + exit_data = self._make_exit(exit_code) + self._publish_exit(exit_data) + + @abstractmethod + def _publish_exit(self, exit_data: pb.RunExitRecord) -> None: + raise NotImplementedError + + def publish_keepalive(self) -> None: + keepalive = pb.KeepaliveRequest() + self._publish_keepalive(keepalive) + + @abstractmethod + def _publish_keepalive(self, keepalive: pb.KeepaliveRequest) -> None: + raise NotImplementedError + + def publish_job_input( + self, + include_paths: List[List[str]], + exclude_paths: List[List[str]], + input_schema: Optional[dict], + run_config: bool = False, + file_path: str = "", + ): + """Publishes a request to add inputs to the job. + + If run_config is True, the wandb.config will be added as a job input. + If file_path is provided, the file at file_path will be added as a job + input. + + The paths provided as arguments are sequences of dictionary keys that + specify a path within the wandb.config. If a path is included, the + corresponding field will be treated as a job input. If a path is + excluded, the corresponding field will not be treated as a job input. + + Args: + include_paths: paths within config to include as job inputs. + exclude_paths: paths within config to exclude as job inputs. + input_schema: A JSON Schema describing which attributes will be + editable from the Launch drawer. + run_config: bool indicating whether wandb.config is the input source. + file_path: path to file to include as a job input. + """ + if run_config and file_path: + raise ValueError( + "run_config and file_path are mutually exclusive arguments." + ) + request = pb.JobInputRequest() + include_records = [pb.JobInputPath(path=path) for path in include_paths] + exclude_records = [pb.JobInputPath(path=path) for path in exclude_paths] + request.include_paths.extend(include_records) + request.exclude_paths.extend(exclude_records) + source = pb.JobInputSource( + run_config=pb.JobInputSource.RunConfigSource(), + ) + if run_config: + source.run_config.CopyFrom(pb.JobInputSource.RunConfigSource()) + else: + source.file.CopyFrom( + pb.JobInputSource.ConfigFileSource(path=file_path), + ) + request.input_source.CopyFrom(source) + if input_schema: + request.input_schema = json_dumps_safer(input_schema) + + return self._publish_job_input(request) + + @abstractmethod + def _publish_job_input(self, request: pb.JobInputRequest) -> MailboxHandle: + raise NotImplementedError + + def join(self) -> None: + # Drop indicates that the internal process has already been shutdown + if self._drop: + return + _ = self._communicate_shutdown() + + @abstractmethod + def _communicate_shutdown(self) -> None: + raise NotImplementedError + + def deliver_run(self, run: "Run") -> MailboxHandle: + run_record = self._make_run(run) + return self._deliver_run(run_record) + + def deliver_sync( + self, + start_offset: int, + final_offset: int, + entity: Optional[str] = None, + project: Optional[str] = None, + run_id: Optional[str] = None, + skip_output_raw: Optional[bool] = None, + ) -> MailboxHandle: + sync = pb.SyncRequest( + start_offset=start_offset, + final_offset=final_offset, + ) + if entity: + sync.overwrite.entity = entity + if project: + sync.overwrite.project = project + if run_id: + sync.overwrite.run_id = run_id + if skip_output_raw: + sync.skip.output_raw = skip_output_raw + return self._deliver_sync(sync) + + @abstractmethod + def _deliver_sync(self, sync: pb.SyncRequest) -> MailboxHandle: + raise NotImplementedError + + @abstractmethod + def _deliver_run(self, run: pb.RunRecord) -> MailboxHandle: + raise NotImplementedError + + def deliver_run_start(self, run_pb: pb.RunRecord) -> MailboxHandle: + run_start = pb.RunStartRequest() + run_start.run.CopyFrom(run_pb) + return self._deliver_run_start(run_start) + + @abstractmethod + def _deliver_run_start(self, run_start: pb.RunStartRequest) -> MailboxHandle: + raise NotImplementedError + + def deliver_attach(self, attach_id: str) -> MailboxHandle: + attach = pb.AttachRequest(attach_id=attach_id) + return self._deliver_attach(attach) + + @abstractmethod + def _deliver_attach(self, status: pb.AttachRequest) -> MailboxHandle: + raise NotImplementedError + + def deliver_check_version( + self, current_version: Optional[str] = None + ) -> MailboxHandle: + check_version = pb.CheckVersionRequest() + if current_version: + check_version.current_version = current_version + return self._deliver_check_version(check_version) + + @abstractmethod + def _deliver_check_version( + self, check_version: pb.CheckVersionRequest + ) -> MailboxHandle: + raise NotImplementedError + + def deliver_stop_status(self) -> MailboxHandle: + status = pb.StopStatusRequest() + return self._deliver_stop_status(status) + + @abstractmethod + def _deliver_stop_status(self, status: pb.StopStatusRequest) -> MailboxHandle: + raise NotImplementedError + + def deliver_network_status(self) -> MailboxHandle: + status = pb.NetworkStatusRequest() + return self._deliver_network_status(status) + + @abstractmethod + def _deliver_network_status(self, status: pb.NetworkStatusRequest) -> MailboxHandle: + raise NotImplementedError + + def deliver_internal_messages(self) -> MailboxHandle: + internal_message = pb.InternalMessagesRequest() + return self._deliver_internal_messages(internal_message) + + @abstractmethod + def _deliver_internal_messages( + self, internal_message: pb.InternalMessagesRequest + ) -> MailboxHandle: + raise NotImplementedError + + def deliver_get_summary(self) -> MailboxHandle: + get_summary = pb.GetSummaryRequest() + return self._deliver_get_summary(get_summary) + + @abstractmethod + def _deliver_get_summary(self, get_summary: pb.GetSummaryRequest) -> MailboxHandle: + raise NotImplementedError + + def deliver_get_system_metrics(self) -> MailboxHandle: + get_summary = pb.GetSystemMetricsRequest() + return self._deliver_get_system_metrics(get_summary) + + @abstractmethod + def _deliver_get_system_metrics( + self, get_summary: pb.GetSystemMetricsRequest + ) -> MailboxHandle: + raise NotImplementedError + + def deliver_exit(self, exit_code: Optional[int]) -> MailboxHandle: + exit_data = self._make_exit(exit_code) + return self._deliver_exit(exit_data) + + @abstractmethod + def _deliver_exit(self, exit_data: pb.RunExitRecord) -> MailboxHandle: + raise NotImplementedError + + def deliver_poll_exit(self) -> MailboxHandle: + poll_exit = pb.PollExitRequest() + return self._deliver_poll_exit(poll_exit) + + @abstractmethod + def _deliver_poll_exit(self, poll_exit: pb.PollExitRequest) -> MailboxHandle: + raise NotImplementedError + + def deliver_request_server_info(self) -> MailboxHandle: + server_info = pb.ServerInfoRequest() + return self._deliver_request_server_info(server_info) + + @abstractmethod + def _deliver_request_server_info( + self, server_info: pb.ServerInfoRequest + ) -> MailboxHandle: + raise NotImplementedError + + def deliver_request_sampled_history(self) -> MailboxHandle: + sampled_history = pb.SampledHistoryRequest() + return self._deliver_request_sampled_history(sampled_history) + + @abstractmethod + def _deliver_request_sampled_history( + self, sampled_history: pb.SampledHistoryRequest + ) -> MailboxHandle: + raise NotImplementedError + + def deliver_request_run_status(self) -> MailboxHandle: + run_status = pb.RunStatusRequest() + return self._deliver_request_run_status(run_status) + + @abstractmethod + def _deliver_request_run_status( + self, run_status: pb.RunStatusRequest + ) -> MailboxHandle: + raise NotImplementedError diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_queue.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..d1392801ebe98ed45aca843cef0be77bcc898fbb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_queue.py @@ -0,0 +1,59 @@ +"""InterfaceQueue - Derived from InterfaceShared using queues to send to internal thread. + +See interface.py for how interface classes relate to each other. + +""" + +import logging +from multiprocessing.process import BaseProcess +from typing import TYPE_CHECKING, Optional + +from ..lib import tracelog +from ..lib.mailbox import Mailbox +from .interface_shared import InterfaceShared +from .router_queue import MessageQueueRouter + +if TYPE_CHECKING: + from queue import Queue + + from wandb.proto import wandb_internal_pb2 as pb + + +logger = logging.getLogger("wandb") + + +class InterfaceQueue(InterfaceShared): + record_q: Optional["Queue[pb.Record]"] + result_q: Optional["Queue[pb.Result]"] + _mailbox: Optional[Mailbox] + + def __init__( + self, + record_q: Optional["Queue[pb.Record]"] = None, + result_q: Optional["Queue[pb.Result]"] = None, + process: Optional[BaseProcess] = None, + process_check: bool = True, + mailbox: Optional[Mailbox] = None, + ) -> None: + self.record_q = record_q + self.result_q = result_q + if self.record_q: + tracelog.annotate_queue(self.record_q, "record_q") + if self.result_q: + tracelog.annotate_queue(self.result_q, "result_q") + super().__init__(process=process, process_check=process_check, mailbox=mailbox) + + def _init_router(self) -> None: + if self.record_q and self.result_q: + self._router = MessageQueueRouter( + self.record_q, self.result_q, mailbox=self._mailbox + ) + + def _publish(self, record: "pb.Record", local: Optional[bool] = None) -> None: + if self._process_check and self._process and not self._process.is_alive(): + raise Exception("The wandb backend process has shutdown") + if local: + record.control.local = local + if self.record_q: + tracelog.log_message_queue(record, self.record_q) + self.record_q.put(record) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_relay.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_relay.py new file mode 100644 index 0000000000000000000000000000000000000000..1e0f6d6bb3bc554bb1b513c043db252301d7e701 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_relay.py @@ -0,0 +1,53 @@ +"""InterfaceRelay - Derived from InterfaceQueue using RelayRouter to preserve uuid req/resp. + +See interface.py for how interface classes relate to each other. + +""" + +import logging +from multiprocessing.process import BaseProcess +from typing import TYPE_CHECKING, Optional + +from wandb.proto import wandb_internal_pb2 as pb + +from ..lib.mailbox import Mailbox +from .interface_queue import InterfaceQueue +from .router_relay import MessageRelayRouter + +if TYPE_CHECKING: + from queue import Queue + + +logger = logging.getLogger("wandb") + + +class InterfaceRelay(InterfaceQueue): + _mailbox: Mailbox + relay_q: Optional["Queue[pb.Result]"] + + def __init__( + self, + mailbox: Mailbox, + record_q: Optional["Queue[pb.Record]"] = None, + result_q: Optional["Queue[pb.Result]"] = None, + relay_q: Optional["Queue[pb.Result]"] = None, + process: Optional[BaseProcess] = None, + process_check: bool = True, + ) -> None: + self.relay_q = relay_q + super().__init__( + record_q=record_q, + result_q=result_q, + process=process, + process_check=process_check, + mailbox=mailbox, + ) + + def _init_router(self) -> None: + if self.record_q and self.result_q and self.relay_q: + self._router = MessageRelayRouter( + request_queue=self.record_q, + response_queue=self.result_q, + relay_queue=self.relay_q, + mailbox=self._mailbox, + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_shared.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_shared.py new file mode 100644 index 0000000000000000000000000000000000000000..d7c281e077e641327b16e6a883b50a7a4e06e46f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_shared.py @@ -0,0 +1,549 @@ +"""InterfaceShared - Derived from InterfaceBase - shared with InterfaceQueue and InterfaceSock. + +See interface.py for how interface classes relate to each other. + +""" + +import logging +import time +from abc import abstractmethod +from multiprocessing.process import BaseProcess +from typing import Any, Optional, cast + +import wandb +from wandb.proto import wandb_internal_pb2 as pb +from wandb.proto import wandb_telemetry_pb2 as tpb +from wandb.util import json_dumps_safer, json_friendly + +from ..lib.mailbox import Mailbox, MailboxHandle +from .interface import InterfaceBase +from .message_future import MessageFuture +from .router import MessageRouter + +logger = logging.getLogger("wandb") + + +class InterfaceShared(InterfaceBase): + process: Optional[BaseProcess] + _process_check: bool + _router: Optional[MessageRouter] + _mailbox: Optional[Mailbox] + _transport_success_timestamp: float + _transport_failed: bool + + def __init__( + self, + process: Optional[BaseProcess] = None, + process_check: bool = True, + mailbox: Optional[Any] = None, + ) -> None: + super().__init__() + self._transport_success_timestamp = time.monotonic() + self._transport_failed = False + self._process = process + self._router = None + self._process_check = process_check + self._mailbox = mailbox + self._init_router() + + @abstractmethod + def _init_router(self) -> None: + raise NotImplementedError + + @property + def transport_failed(self) -> bool: + return self._transport_failed + + @property + def transport_success_timestamp(self) -> float: + return self._transport_success_timestamp + + def _transport_mark_failed(self) -> None: + self._transport_failed = True + + def _transport_mark_success(self) -> None: + self._transport_success_timestamp = time.monotonic() + + def _publish_output(self, outdata: pb.OutputRecord) -> None: + rec = pb.Record() + rec.output.CopyFrom(outdata) + self._publish(rec) + + def _publish_cancel(self, cancel: pb.CancelRequest) -> None: + rec = self._make_request(cancel=cancel) + self._publish(rec) + + def _publish_output_raw(self, outdata: pb.OutputRawRecord) -> None: + rec = pb.Record() + rec.output_raw.CopyFrom(outdata) + self._publish(rec) + + def _publish_tbdata(self, tbrecord: pb.TBRecord) -> None: + rec = self._make_record(tbrecord=tbrecord) + self._publish(rec) + + def _publish_partial_history( + self, partial_history: pb.PartialHistoryRequest + ) -> None: + rec = self._make_request(partial_history=partial_history) + self._publish(rec) + + def _publish_history(self, history: pb.HistoryRecord) -> None: + rec = self._make_record(history=history) + self._publish(rec) + + def _publish_preempting(self, preempt_rec: pb.RunPreemptingRecord) -> None: + rec = self._make_record(preempting=preempt_rec) + self._publish(rec) + + def _publish_telemetry(self, telem: tpb.TelemetryRecord) -> None: + rec = self._make_record(telemetry=telem) + self._publish(rec) + + def _publish_job_input(self, job_input: pb.JobInputRequest) -> MailboxHandle: + record = self._make_request(job_input=job_input) + return self._deliver_record(record) + + def _make_stats(self, stats_dict: dict) -> pb.StatsRecord: + stats = pb.StatsRecord() + stats.stats_type = pb.StatsRecord.StatsType.SYSTEM + stats.timestamp.GetCurrentTime() # todo: fix this, this is wrong :) + for k, v in stats_dict.items(): + item = stats.item.add() + item.key = k + item.value_json = json_dumps_safer(json_friendly(v)[0]) + return stats + + def _make_login(self, api_key: Optional[str] = None) -> pb.LoginRequest: + login = pb.LoginRequest() + if api_key: + login.api_key = api_key + return login + + def _make_request( # noqa: C901 + self, + login: Optional[pb.LoginRequest] = None, + get_summary: Optional[pb.GetSummaryRequest] = None, + pause: Optional[pb.PauseRequest] = None, + resume: Optional[pb.ResumeRequest] = None, + status: Optional[pb.StatusRequest] = None, + stop_status: Optional[pb.StopStatusRequest] = None, + internal_messages: Optional[pb.InternalMessagesRequest] = None, + network_status: Optional[pb.NetworkStatusRequest] = None, + poll_exit: Optional[pb.PollExitRequest] = None, + partial_history: Optional[pb.PartialHistoryRequest] = None, + sampled_history: Optional[pb.SampledHistoryRequest] = None, + run_start: Optional[pb.RunStartRequest] = None, + check_version: Optional[pb.CheckVersionRequest] = None, + log_artifact: Optional[pb.LogArtifactRequest] = None, + download_artifact: Optional[pb.DownloadArtifactRequest] = None, + link_artifact: Optional[pb.LinkArtifactRequest] = None, + defer: Optional[pb.DeferRequest] = None, + attach: Optional[pb.AttachRequest] = None, + server_info: Optional[pb.ServerInfoRequest] = None, + keepalive: Optional[pb.KeepaliveRequest] = None, + run_status: Optional[pb.RunStatusRequest] = None, + sender_mark: Optional[pb.SenderMarkRequest] = None, + sender_read: Optional[pb.SenderReadRequest] = None, + sync: Optional[pb.SyncRequest] = None, + status_report: Optional[pb.StatusReportRequest] = None, + cancel: Optional[pb.CancelRequest] = None, + summary_record: Optional[pb.SummaryRecordRequest] = None, + telemetry_record: Optional[pb.TelemetryRecordRequest] = None, + get_system_metrics: Optional[pb.GetSystemMetricsRequest] = None, + python_packages: Optional[pb.PythonPackagesRequest] = None, + job_input: Optional[pb.JobInputRequest] = None, + ) -> pb.Record: + request = pb.Request() + if login: + request.login.CopyFrom(login) + elif get_summary: + request.get_summary.CopyFrom(get_summary) + elif pause: + request.pause.CopyFrom(pause) + elif resume: + request.resume.CopyFrom(resume) + elif status: + request.status.CopyFrom(status) + elif stop_status: + request.stop_status.CopyFrom(stop_status) + elif internal_messages: + request.internal_messages.CopyFrom(internal_messages) + elif network_status: + request.network_status.CopyFrom(network_status) + elif poll_exit: + request.poll_exit.CopyFrom(poll_exit) + elif partial_history: + request.partial_history.CopyFrom(partial_history) + elif sampled_history: + request.sampled_history.CopyFrom(sampled_history) + elif run_start: + request.run_start.CopyFrom(run_start) + elif check_version: + request.check_version.CopyFrom(check_version) + elif log_artifact: + request.log_artifact.CopyFrom(log_artifact) + elif download_artifact: + request.download_artifact.CopyFrom(download_artifact) + elif link_artifact: + request.link_artifact.CopyFrom(link_artifact) + elif defer: + request.defer.CopyFrom(defer) + elif attach: + request.attach.CopyFrom(attach) + elif server_info: + request.server_info.CopyFrom(server_info) + elif keepalive: + request.keepalive.CopyFrom(keepalive) + elif run_status: + request.run_status.CopyFrom(run_status) + elif sender_mark: + request.sender_mark.CopyFrom(sender_mark) + elif sender_read: + request.sender_read.CopyFrom(sender_read) + elif cancel: + request.cancel.CopyFrom(cancel) + elif status_report: + request.status_report.CopyFrom(status_report) + elif summary_record: + request.summary_record.CopyFrom(summary_record) + elif telemetry_record: + request.telemetry_record.CopyFrom(telemetry_record) + elif get_system_metrics: + request.get_system_metrics.CopyFrom(get_system_metrics) + elif sync: + request.sync.CopyFrom(sync) + elif python_packages: + request.python_packages.CopyFrom(python_packages) + elif job_input: + request.job_input.CopyFrom(job_input) + else: + raise Exception("Invalid request") + record = self._make_record(request=request) + # All requests do not get persisted + record.control.local = True + if status_report: + record.control.flow_control = True + return record + + def _make_record( # noqa: C901 + self, + run: Optional[pb.RunRecord] = None, + config: Optional[pb.ConfigRecord] = None, + files: Optional[pb.FilesRecord] = None, + summary: Optional[pb.SummaryRecord] = None, + history: Optional[pb.HistoryRecord] = None, + stats: Optional[pb.StatsRecord] = None, + exit: Optional[pb.RunExitRecord] = None, + artifact: Optional[pb.ArtifactRecord] = None, + tbrecord: Optional[pb.TBRecord] = None, + alert: Optional[pb.AlertRecord] = None, + final: Optional[pb.FinalRecord] = None, + metric: Optional[pb.MetricRecord] = None, + header: Optional[pb.HeaderRecord] = None, + footer: Optional[pb.FooterRecord] = None, + request: Optional[pb.Request] = None, + telemetry: Optional[tpb.TelemetryRecord] = None, + preempting: Optional[pb.RunPreemptingRecord] = None, + use_artifact: Optional[pb.UseArtifactRecord] = None, + output: Optional[pb.OutputRecord] = None, + output_raw: Optional[pb.OutputRawRecord] = None, + ) -> pb.Record: + record = pb.Record() + if run: + record.run.CopyFrom(run) + elif config: + record.config.CopyFrom(config) + elif summary: + record.summary.CopyFrom(summary) + elif history: + record.history.CopyFrom(history) + elif files: + record.files.CopyFrom(files) + elif stats: + record.stats.CopyFrom(stats) + elif exit: + record.exit.CopyFrom(exit) + elif artifact: + record.artifact.CopyFrom(artifact) + elif tbrecord: + record.tbrecord.CopyFrom(tbrecord) + elif alert: + record.alert.CopyFrom(alert) + elif final: + record.final.CopyFrom(final) + elif header: + record.header.CopyFrom(header) + elif footer: + record.footer.CopyFrom(footer) + elif request: + record.request.CopyFrom(request) + elif telemetry: + record.telemetry.CopyFrom(telemetry) + elif metric: + record.metric.CopyFrom(metric) + elif preempting: + record.preempting.CopyFrom(preempting) + elif use_artifact: + record.use_artifact.CopyFrom(use_artifact) + elif output: + record.output.CopyFrom(output) + elif output_raw: + record.output_raw.CopyFrom(output_raw) + else: + raise Exception("Invalid record") + return record + + @abstractmethod + def _publish(self, record: pb.Record, local: Optional[bool] = None) -> None: + raise NotImplementedError + + def _communicate( + self, rec: pb.Record, timeout: Optional[int] = 30, local: Optional[bool] = None + ) -> Optional[pb.Result]: + return self._communicate_async(rec, local=local).get(timeout=timeout) + + def _communicate_async( + self, rec: pb.Record, local: Optional[bool] = None + ) -> MessageFuture: + assert self._router + if self._process_check and self._process and not self._process.is_alive(): + raise Exception("The wandb backend process has shutdown") + future = self._router.send_and_receive(rec, local=local) + return future + + def communicate_login( + self, api_key: Optional[str] = None, timeout: Optional[int] = 15 + ) -> pb.LoginResponse: + login = self._make_login(api_key) + rec = self._make_request(login=login) + result = self._communicate(rec, timeout=timeout) + if result is None: + # TODO: friendlier error message here + raise wandb.Error( + "Couldn't communicate with backend after {} seconds".format(timeout) + ) + login_response = result.response.login_response + assert login_response + return login_response + + def _publish_defer(self, state: "pb.DeferRequest.DeferState.V") -> None: + defer = pb.DeferRequest(state=state) + rec = self._make_request(defer=defer) + self._publish(rec, local=True) + + def publish_defer(self, state: int = 0) -> None: + self._publish_defer(cast("pb.DeferRequest.DeferState.V", state)) + + def _publish_header(self, header: pb.HeaderRecord) -> None: + rec = self._make_record(header=header) + self._publish(rec) + + def publish_footer(self) -> None: + footer = pb.FooterRecord() + rec = self._make_record(footer=footer) + self._publish(rec) + + def publish_final(self) -> None: + final = pb.FinalRecord() + rec = self._make_record(final=final) + self._publish(rec) + + def publish_login(self, api_key: Optional[str] = None) -> None: + login = self._make_login(api_key) + rec = self._make_request(login=login) + self._publish(rec) + + def _publish_pause(self, pause: pb.PauseRequest) -> None: + rec = self._make_request(pause=pause) + self._publish(rec) + + def _publish_resume(self, resume: pb.ResumeRequest) -> None: + rec = self._make_request(resume=resume) + self._publish(rec) + + def _publish_run(self, run: pb.RunRecord) -> None: + rec = self._make_record(run=run) + self._publish(rec) + + def _publish_config(self, cfg: pb.ConfigRecord) -> None: + rec = self._make_record(config=cfg) + self._publish(rec) + + def _publish_summary(self, summary: pb.SummaryRecord) -> None: + rec = self._make_record(summary=summary) + self._publish(rec) + + def _publish_metric(self, metric: pb.MetricRecord) -> None: + rec = self._make_record(metric=metric) + self._publish(rec) + + def publish_stats(self, stats_dict: dict) -> None: + stats = self._make_stats(stats_dict) + rec = self._make_record(stats=stats) + self._publish(rec) + + def _publish_python_packages( + self, python_packages: pb.PythonPackagesRequest + ) -> None: + rec = self._make_request(python_packages=python_packages) + self._publish(rec) + + def _publish_files(self, files: pb.FilesRecord) -> None: + rec = self._make_record(files=files) + self._publish(rec) + + def _publish_use_artifact(self, use_artifact: pb.UseArtifactRecord) -> Any: + rec = self._make_record(use_artifact=use_artifact) + self._publish(rec) + + def _communicate_artifact(self, log_artifact: pb.LogArtifactRequest) -> Any: + rec = self._make_request(log_artifact=log_artifact) + return self._communicate_async(rec) + + def _deliver_download_artifact( + self, download_artifact: pb.DownloadArtifactRequest + ) -> MailboxHandle: + rec = self._make_request(download_artifact=download_artifact) + return self._deliver_record(rec) + + def _deliver_link_artifact( + self, link_artifact: pb.LinkArtifactRequest + ) -> MailboxHandle: + rec = self._make_request(link_artifact=link_artifact) + return self._deliver_record(rec) + + def _publish_artifact(self, proto_artifact: pb.ArtifactRecord) -> None: + rec = self._make_record(artifact=proto_artifact) + self._publish(rec) + + def _publish_alert(self, proto_alert: pb.AlertRecord) -> None: + rec = self._make_record(alert=proto_alert) + self._publish(rec) + + def _deliver_status( + self, + status: pb.StatusRequest, + ) -> MailboxHandle: + req = self._make_request(status=status) + return self._deliver_record(req) + + def _publish_exit(self, exit_data: pb.RunExitRecord) -> None: + rec = self._make_record(exit=exit_data) + self._publish(rec) + + def _publish_keepalive(self, keepalive: pb.KeepaliveRequest) -> None: + record = self._make_request(keepalive=keepalive) + self._publish(record) + + def _communicate_shutdown(self) -> None: + # shutdown + request = pb.Request(shutdown=pb.ShutdownRequest()) + record = self._make_record(request=request) + _ = self._communicate(record) + + def _get_mailbox(self) -> Mailbox: + mailbox = self._mailbox + assert mailbox + return mailbox + + def _deliver_record(self, record: pb.Record) -> MailboxHandle: + mailbox = self._get_mailbox() + handle = mailbox._deliver_record(record, interface=self) + return handle + + def _deliver_run(self, run: pb.RunRecord) -> MailboxHandle: + record = self._make_record(run=run) + return self._deliver_record(record) + + def _deliver_sync(self, sync: pb.SyncRequest) -> MailboxHandle: + record = self._make_request(sync=sync) + return self._deliver_record(record) + + def _deliver_run_start(self, run_start: pb.RunStartRequest) -> MailboxHandle: + record = self._make_request(run_start=run_start) + return self._deliver_record(record) + + def _deliver_get_summary(self, get_summary: pb.GetSummaryRequest) -> MailboxHandle: + record = self._make_request(get_summary=get_summary) + return self._deliver_record(record) + + def _deliver_get_system_metrics( + self, get_system_metrics: pb.GetSystemMetricsRequest + ) -> MailboxHandle: + record = self._make_request(get_system_metrics=get_system_metrics) + return self._deliver_record(record) + + def _deliver_exit(self, exit_data: pb.RunExitRecord) -> MailboxHandle: + record = self._make_record(exit=exit_data) + return self._deliver_record(record) + + def _deliver_poll_exit(self, poll_exit: pb.PollExitRequest) -> MailboxHandle: + record = self._make_request(poll_exit=poll_exit) + return self._deliver_record(record) + + def _deliver_stop_status(self, stop_status: pb.StopStatusRequest) -> MailboxHandle: + record = self._make_request(stop_status=stop_status) + return self._deliver_record(record) + + def _deliver_attach(self, attach: pb.AttachRequest) -> MailboxHandle: + record = self._make_request(attach=attach) + return self._deliver_record(record) + + def _deliver_check_version( + self, check_version: pb.CheckVersionRequest + ) -> MailboxHandle: + record = self._make_request(check_version=check_version) + return self._deliver_record(record) + + def _deliver_network_status( + self, network_status: pb.NetworkStatusRequest + ) -> MailboxHandle: + record = self._make_request(network_status=network_status) + return self._deliver_record(record) + + def _deliver_internal_messages( + self, internal_message: pb.InternalMessagesRequest + ) -> MailboxHandle: + record = self._make_request(internal_messages=internal_message) + return self._deliver_record(record) + + def _deliver_request_server_info( + self, server_info: pb.ServerInfoRequest + ) -> MailboxHandle: + record = self._make_request(server_info=server_info) + return self._deliver_record(record) + + def _deliver_request_sampled_history( + self, sampled_history: pb.SampledHistoryRequest + ) -> MailboxHandle: + record = self._make_request(sampled_history=sampled_history) + return self._deliver_record(record) + + def _deliver_request_run_status( + self, run_status: pb.RunStatusRequest + ) -> MailboxHandle: + record = self._make_request(run_status=run_status) + return self._deliver_record(record) + + def _transport_keepalive_failed(self, keepalive_interval: int = 5) -> bool: + if self._transport_failed: + return True + + now = time.monotonic() + if now < self._transport_success_timestamp + keepalive_interval: + return False + + try: + self.publish_keepalive() + except Exception: + self._transport_mark_failed() + else: + self._transport_mark_success() + return self._transport_failed + + def join(self) -> None: + super().join() + + if self._router: + self._router.join() diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_sock.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_sock.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba47c4bdece1ae341dcd685ceba7de164036435 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/interface_sock.py @@ -0,0 +1,61 @@ +"""InterfaceSock - Derived from InterfaceShared using a socket to send to internal thread. + +See interface.py for how interface classes relate to each other. + +""" + +import logging +from typing import TYPE_CHECKING, Any, Optional + +from ..lib.mailbox import Mailbox +from ..lib.sock_client import SockClient +from .interface_shared import InterfaceShared +from .message_future import MessageFuture +from .router_sock import MessageSockRouter + +if TYPE_CHECKING: + from wandb.proto import wandb_internal_pb2 as pb + + from ..wandb_run import Run + + +logger = logging.getLogger("wandb") + + +class InterfaceSock(InterfaceShared): + _stream_id: Optional[str] + _sock_client: SockClient + _mailbox: Mailbox + + def __init__(self, sock_client: SockClient, mailbox: Mailbox) -> None: + # _sock_client is used when abstract method _init_router() is called by constructor + self._sock_client = sock_client + super().__init__(mailbox=mailbox) + self._process_check = False + self._stream_id = None + + def _init_router(self) -> None: + self._router = MessageSockRouter(self._sock_client, mailbox=self._mailbox) + + def _hack_set_run(self, run: "Run") -> None: + super()._hack_set_run(run) + assert run._run_id + self._stream_id = run._run_id + + def _assign(self, record: Any) -> None: + assert self._stream_id + record._info.stream_id = self._stream_id + + def _publish(self, record: "pb.Record", local: Optional[bool] = None) -> None: + self._assign(record) + self._sock_client.send_record_publish(record) + + def _communicate_async( + self, rec: "pb.Record", local: Optional[bool] = None + ) -> MessageFuture: + self._assign(rec) + assert self._router + if self._process_check and self._process and not self._process.is_alive(): + raise Exception("The wandb backend process has shutdown") + future = self._router.send_and_receive(rec, local=local) + return future diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc3860e40db9180e1f1101de4e100b14f3f1298 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future.py @@ -0,0 +1,27 @@ +"""MessageFuture - represents a message result of an asynchronous operation. + +Base class MessageFuture for MessageFutureObject and MessageFuturePoll + +""" + +import threading +from abc import abstractmethod +from typing import Optional + +from wandb.proto import wandb_internal_pb2 as pb + + +class MessageFuture: + _object: Optional[pb.Result] + + def __init__(self) -> None: + self._object = None + self._object_ready = threading.Event() + + def _set_object(self, obj: pb.Result) -> None: + self._object = obj + self._object_ready.set() + + @abstractmethod + def get(self, timeout: Optional[int] = None) -> Optional[pb.Result]: + raise NotImplementedError diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future_poll.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future_poll.py new file mode 100644 index 0000000000000000000000000000000000000000..2c41b381b31d6b6c13791f268f2c37b68b8fe2c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/message_future_poll.py @@ -0,0 +1,50 @@ +"""MessageFuturePoll - Derived from MessageFuture but implementing polling loop. + +MessageFuture represents a message result of an asynchronous operation. + +MessageFuturePoll implements a polling loop to periodically query for a +completed async operation. + +""" + +import time +from typing import Any, Optional + +from wandb.proto import wandb_internal_pb2 as pb + +from .message_future import MessageFuture + + +class MessageFuturePoll(MessageFuture): + _fn: Any + _xid: str + + def __init__(self, fn: Any, xid: str) -> None: + super().__init__() + self._fn = fn + self._xid = xid + + def get(self, timeout: Optional[int] = None) -> Optional[pb.Result]: + self._poll(timeout=timeout) + if self._object_ready.is_set(): + return self._object + return None + + def _poll(self, timeout: Optional[int] = None) -> None: + if self._object_ready.is_set(): + return + done = False + start_time = time.time() + sleep_time = 0.5 + while not done: + result = self._fn(xid=self._xid) + if result: + self._set_object(result) + done = True + continue + now_time = time.time() + if timeout and start_time - now_time > timeout: + done = True + continue + time.sleep(sleep_time) + sleep_time = min(sleep_time * 2, 5) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router.py new file mode 100644 index 0000000000000000000000000000000000000000..d73b5049b4d30ebca1ce9ea4fb0dded26c41fa49 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router.py @@ -0,0 +1,118 @@ +"""Router - handle message router (base class). + +Router to manage responses. + +""" + +import logging +import threading +import uuid +from abc import abstractmethod +from typing import TYPE_CHECKING, Dict, Optional + +from ..lib import mailbox, tracelog +from .message_future import MessageFuture + +if TYPE_CHECKING: + from queue import Queue + + from wandb.proto import wandb_internal_pb2 as pb + + +logger = logging.getLogger("wandb") + + +class MessageRouterClosedError(Exception): + """Router has been closed.""" + + pass + + +class MessageFutureObject(MessageFuture): + def __init__(self) -> None: + super().__init__() + + def get(self, timeout: Optional[int] = None) -> Optional["pb.Result"]: + is_set = self._object_ready.wait(timeout) + if is_set and self._object: + return self._object + return None + + +class MessageRouter: + _pending_reqs: Dict[str, MessageFutureObject] + _request_queue: "Queue[pb.Record]" + _response_queue: "Queue[pb.Result]" + _mailbox: Optional[mailbox.Mailbox] + + def __init__(self, mailbox: Optional[mailbox.Mailbox] = None) -> None: + self._mailbox = mailbox + self._pending_reqs = {} + self._lock = threading.Lock() + + self._join_event = threading.Event() + self._thread = threading.Thread(target=self.message_loop) + self._thread.name = "MsgRouterThr" + self._thread.daemon = True + self._thread.start() + + @abstractmethod + def _read_message(self) -> Optional["pb.Result"]: + raise NotImplementedError + + @abstractmethod + def _send_message(self, record: "pb.Record") -> None: + raise NotImplementedError + + def message_loop(self) -> None: + while not self._join_event.is_set(): + try: + msg = self._read_message() + except EOFError: + # On abnormal shutdown the queue will be destroyed underneath + # resulting in EOFError. message_loop needs to exit.. + logger.warning("EOFError seen in message_loop") + break + except MessageRouterClosedError: + logger.warning("message_loop has been closed") + break + if not msg: + continue + self._handle_msg_rcv(msg) + + def send_and_receive( + self, rec: "pb.Record", local: Optional[bool] = None + ) -> MessageFuture: + rec.control.req_resp = True + if local: + rec.control.local = local + rec.uuid = uuid.uuid4().hex + future = MessageFutureObject() + with self._lock: + self._pending_reqs[rec.uuid] = future + + self._send_message(rec) + + return future + + def join(self) -> None: + self._join_event.set() + self._thread.join() + + def _handle_msg_rcv(self, msg: "pb.Result") -> None: + # deliver mailbox addressed messages to mailbox + if self._mailbox and msg.control.mailbox_slot: + self._mailbox.deliver(msg) + return + with self._lock: + future = self._pending_reqs.pop(msg.uuid, None) + if future is None: + # TODO (cvp): saw this in tests, seemed benign enough to ignore, but + # could point to other issues. + if msg.uuid != "": + tracelog.log_message_assert(msg) + logger.warning( + "No listener found for msg with uuid %s (%s)", msg.uuid, msg + ) + return + future._set_object(msg) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_queue.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..509d46afa081aa8631d9f67701e76ce479cb966d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_queue.py @@ -0,0 +1,44 @@ +"""Router - handle message router (queue). + +Router to manage responses from a queue. + +""" + +import queue +from typing import TYPE_CHECKING, Optional + +from ..lib import tracelog +from ..lib.mailbox import Mailbox +from .router import MessageRouter + +if TYPE_CHECKING: + from queue import Queue + + from wandb.proto import wandb_internal_pb2 as pb + + +class MessageQueueRouter(MessageRouter): + _request_queue: "Queue[pb.Record]" + _response_queue: "Queue[pb.Result]" + + def __init__( + self, + request_queue: "Queue[pb.Record]", + response_queue: "Queue[pb.Result]", + mailbox: Optional[Mailbox] = None, + ) -> None: + self._request_queue = request_queue + self._response_queue = response_queue + super().__init__(mailbox=mailbox) + + def _read_message(self) -> Optional["pb.Result"]: + try: + msg = self._response_queue.get(timeout=1) + except queue.Empty: + return None + tracelog.log_message_dequeue(msg, self._response_queue) + return msg + + def _send_message(self, record: "pb.Record") -> None: + tracelog.log_message_queue(record, self._request_queue) + self._request_queue.put(record) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_relay.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_relay.py new file mode 100644 index 0000000000000000000000000000000000000000..e26022f4cea79555cbf406b8e5c0a9a8fdbe9bec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_relay.py @@ -0,0 +1,39 @@ +"""Router - handle message router (relay). + +Router to manage responses from a queue with relay. + +""" + +from typing import TYPE_CHECKING + +from ..lib import tracelog +from ..lib.mailbox import Mailbox +from .router_queue import MessageQueueRouter + +if TYPE_CHECKING: + from queue import Queue + + from wandb.proto import wandb_internal_pb2 as pb + + +class MessageRelayRouter(MessageQueueRouter): + _relay_queue: "Queue[pb.Result]" + + def __init__( + self, + request_queue: "Queue[pb.Record]", + response_queue: "Queue[pb.Result]", + relay_queue: "Queue[pb.Result]", + mailbox: Mailbox, + ) -> None: + self._relay_queue = relay_queue + super().__init__( + request_queue=request_queue, response_queue=response_queue, mailbox=mailbox + ) + + def _handle_msg_rcv(self, msg: "pb.Result") -> None: + if msg.control.relay_id: + tracelog.log_message_queue(msg, self._relay_queue) + self._relay_queue.put(msg) + return + super()._handle_msg_rcv(msg) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_sock.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_sock.py new file mode 100644 index 0000000000000000000000000000000000000000..aabbaf31d3b8f00e9ff8937fe55c5af5bfdb21fb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/router_sock.py @@ -0,0 +1,36 @@ +"""Router - handle message router (sock). + +Router to manage responses from a socket client. + +""" + +from typing import TYPE_CHECKING, Optional + +from ..lib.mailbox import Mailbox +from ..lib.sock_client import SockClient, SockClientClosedError +from .router import MessageRouter, MessageRouterClosedError + +if TYPE_CHECKING: + from wandb.proto import wandb_internal_pb2 as pb + + +class MessageSockRouter(MessageRouter): + _sock_client: SockClient + _mailbox: Mailbox + + def __init__(self, sock_client: SockClient, mailbox: Mailbox) -> None: + self._sock_client = sock_client + super().__init__(mailbox=mailbox) + + def _read_message(self) -> Optional["pb.Result"]: + try: + resp = self._sock_client.read_server_response(timeout=1) + except SockClientClosedError: + raise MessageRouterClosedError + if not resp: + return None + msg = resp.result_communicate + return msg + + def _send_message(self, record: "pb.Record") -> None: + self._sock_client.send_record_communicate(record) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/interface/summary_record.py b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/summary_record.py new file mode 100644 index 0000000000000000000000000000000000000000..2050a39080759ef12c54364aaf76b1dee6ab7e1d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/interface/summary_record.py @@ -0,0 +1,67 @@ +"""Summary Record. + +This module implements a summary record as an intermediate format before being converted +to a protocol buffer. +""" + +import typing as t + + +class SummaryRecord: + """Encodes a diff -- analogous to the SummaryRecord protobuf message.""" + + update: t.List["SummaryItem"] + remove: t.List["SummaryItem"] + + def __init__(self): + self.update = [] + self.remove = [] + + def __str__(self): + s = "SummaryRecord:\n Update:\n " + s += "\n ".join([str(item) for item in self.update]) + s += "\n Remove:\n " + s += "\n ".join([str(item) for item in self.remove]) + s += "\n" + return s + + __repr__ = __str__ + + def _add_next_parent(self, parent_key): + with_next_parent = SummaryRecord() + with_next_parent.update = [ + item._add_next_parent(parent_key) for item in self.update + ] + with_next_parent.remove = [ + item._add_next_parent(parent_key) for item in self.remove + ] + + return with_next_parent + + +class SummaryItem: + """Analogous to the SummaryItem protobuf message.""" + + key: t.Tuple[str] + value: t.Any + + def __init__(self): + self.key = tuple() + self.value = None + + def __str__(self): + return "SummaryItem: key: " + str(self.key) + " value: " + str(self.value) + + __repr__ = __str__ + + def _add_next_parent(self, parent_key): + with_next_parent = SummaryItem() + + key = self.key + if not isinstance(key, tuple): + key = (key,) + + with_next_parent.key = (parent_key,) + self.key + with_next_parent.value = self.value + + return with_next_parent diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__init__.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/datastore.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/datastore.py new file mode 100644 index 0000000000000000000000000000000000000000..270404c62df2638c0b21536d1f36d57b911aeb68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/datastore.py @@ -0,0 +1,297 @@ +"""leveldb log datastore. + +Format is described at: + https://github.com/google/leveldb/blob/master/doc/log_format.md + +block := record* trailer? +record := + checksum: uint32 // crc32c of type and data[] ; little-endian + length: uint16 // little-endian + type: uint8 // One of FULL, FIRST, MIDDLE, LAST + data: uint8[length] + +header := + ident: char[4] + magic: uint16 + version: uint8 +""" + +# TODO: possibly restructure code by porting the C++ or go implementation + +import logging +import os +import struct +import zlib +from typing import TYPE_CHECKING, Optional, Tuple + +import wandb + +if TYPE_CHECKING: + from typing import IO, Any + + from wandb.proto.wandb_internal_pb2 import Record + +logger = logging.getLogger(__name__) + +LEVELDBLOG_HEADER_LEN = 7 +LEVELDBLOG_BLOCK_LEN = 32768 +LEVELDBLOG_DATA_LEN = LEVELDBLOG_BLOCK_LEN - LEVELDBLOG_HEADER_LEN + +LEVELDBLOG_FULL = 1 +LEVELDBLOG_FIRST = 2 +LEVELDBLOG_MIDDLE = 3 +LEVELDBLOG_LAST = 4 + +LEVELDBLOG_HEADER_IDENT = ":W&B" +LEVELDBLOG_HEADER_MAGIC = ( + 0xBEE1 # zlib.crc32(bytes("Weights & Biases", 'iso8859-1')) & 0xffff +) +LEVELDBLOG_HEADER_VERSION = 0 + +try: + bytes("", "ascii") + + def strtobytes(x): + """Strtobytes.""" + return bytes(x, "iso8859-1") + + # def bytestostr(x): + # return str(x, 'iso8859-1') + +except Exception: + strtobytes = str + # bytestostr = str + + +class DataStore: + _index: int + _flush_offset: int + + def __init__(self) -> None: + self._opened_for_scan = False + self._fp: Optional[IO[Any]] = None + self._index = 0 + self._flush_offset = 0 + self._size_bytes = 0 + + self._crc = [0] * (LEVELDBLOG_LAST + 1) + for x in range(1, LEVELDBLOG_LAST + 1): + self._crc[x] = zlib.crc32(strtobytes(chr(x))) & 0xFFFFFFFF + + assert ( + wandb._assert_is_internal_process # type: ignore + ), "DataStore can only be used in the internal process" + + def open_for_write(self, fname: str) -> None: + self._fname = fname + logger.info("open: %s", fname) + open_flags = "xb" + self._fp = open(fname, open_flags) + self._write_header() + + def open_for_append(self, fname): + # TODO: implement + self._fname = fname + logger.info("open: %s", fname) + self._fp = open(fname, "wb") + # do something with _index + + def open_for_scan(self, fname): + self._fname = fname + logger.info("open for scan: %s", fname) + self._fp = open(fname, "r+b") + self._index = 0 + self._size_bytes = os.stat(fname).st_size + self._opened_for_scan = True + self._read_header() + + def seek(self, offset: int) -> None: + self._fp.seek(offset) # type: ignore + self._index = offset + + def get_offset(self) -> int: + offset = self._fp.tell() # type: ignore + return offset + + def in_last_block(self): + """Determine if we're in the last block to handle in-progress writes.""" + return self._index > self._size_bytes - LEVELDBLOG_DATA_LEN + + def scan_record(self): + assert self._opened_for_scan, "file not open for scanning" + # TODO(jhr): handle some assertions as file corruption issues + # assume we have enough room to read header, checked by caller? + header = self._fp.read(LEVELDBLOG_HEADER_LEN) + if len(header) == 0: + return None + assert ( + len(header) == LEVELDBLOG_HEADER_LEN + ), "record header is {} bytes instead of the expected {}".format( + len(header), LEVELDBLOG_HEADER_LEN + ) + fields = struct.unpack(" LEVELDBLOG_DATA_LEN: + self._write_record( + s[data_used : data_used + LEVELDBLOG_DATA_LEN], + LEVELDBLOG_MIDDLE, + ) + data_used += LEVELDBLOG_DATA_LEN + data_left -= LEVELDBLOG_DATA_LEN + + # write last and flush the entire block to disk + self._write_record(s[data_used:], LEVELDBLOG_LAST) + self._fp.flush() + os.fsync(self._fp.fileno()) + self._flush_offset = self._index + + return start_offset, self._index, self._flush_offset + + def ensure_flushed(self, off: int) -> None: + self._fp.flush() # type: ignore + + def write(self, obj: "Record") -> Tuple[int, int, int]: + """Write a protocol buffer. + + Arguments: + obj: Protocol buffer to write. + + Returns: + (start_offset, end_offset, flush_offset) if successful, + None otherwise + + """ + raw_size = obj.ByteSize() + s = obj.SerializeToString() + assert len(s) == raw_size, "invalid serialization" + ret = self._write_data(s) + return ret + + def close(self) -> None: + if self._fp is not None: + logger.info("close: %s", self._fname) + self._fp.close() diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/handler.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5569477058c84b49c3249f806ee46767973d5d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/handler.py @@ -0,0 +1,911 @@ +"""Handle Manager.""" + +import json +import logging +import math +import numbers +import time +from collections import defaultdict +from queue import Queue +from threading import Event +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Sequence, + Tuple, + cast, +) + +from wandb.proto.wandb_internal_pb2 import ( + HistoryRecord, + InternalMessages, + MetricRecord, + Record, + Result, + RunRecord, + SampledHistoryItem, + SummaryItem, + SummaryRecord, + SummaryRecordRequest, + SystemMetricSample, + SystemMetricsBuffer, +) + +from ..interface.interface_queue import InterfaceQueue +from ..lib import handler_util, proto_util, tracelog, wburls +from . import context, sample, tb_watcher +from .settings_static import SettingsStatic +from .system.system_monitor import SystemMonitor + +if TYPE_CHECKING: + from wandb.proto.wandb_internal_pb2 import MetricSummary + + +SummaryDict = Dict[str, Any] + +logger = logging.getLogger(__name__) + +# Update (March 5, 2024): Since ~2020/2021, when constructing the summary +# object, we had replaced the artifact path for media types with the latest +# artifact path. The primary purpose of this was to support live updating of +# media objects in the UI (since the default artifact path was fully qualified +# and would not update). However, in March of 2024, a bug was discovered with +# this approach which causes this path to be incorrect in cases where the media +# object is logged to another artifact before being logged to the run. Setting +# this to `False` disables this copy behavior. The impact is that users will +# need to refresh to see updates. Ironically, this updating behavior is not +# currently supported in the UI, so the impact of this change is minimal. +REPLACE_SUMMARY_ART_PATH_WITH_LATEST = False + + +def _dict_nested_set(target: Dict[str, Any], key_list: Sequence[str], v: Any) -> None: + # recurse down the dictionary structure: + + for k in key_list[:-1]: + target.setdefault(k, {}) + new_target = target.get(k) + if TYPE_CHECKING: + new_target = cast(Dict[str, Any], new_target) + target = new_target + # use the last element of the key to write the leaf: + target[key_list[-1]] = v + + +class HandleManager: + _consolidated_summary: SummaryDict + _sampled_history: Dict[str, sample.UniformSampleAccumulator] + _partial_history: Dict[str, Any] + _run_proto: Optional[RunRecord] + _settings: SettingsStatic + _record_q: "Queue[Record]" + _result_q: "Queue[Result]" + _stopped: Event + _writer_q: "Queue[Record]" + _interface: InterfaceQueue + _system_monitor: Optional[SystemMonitor] + _tb_watcher: Optional[tb_watcher.TBWatcher] + _metric_defines: Dict[str, MetricRecord] + _metric_globs: Dict[str, MetricRecord] + _metric_track: Dict[Tuple[str, ...], float] + _metric_copy: Dict[Tuple[str, ...], Any] + _track_time: Optional[float] + _accumulate_time: float + _run_start_time: Optional[float] + _context_keeper: context.ContextKeeper + + def __init__( + self, + settings: SettingsStatic, + record_q: "Queue[Record]", + result_q: "Queue[Result]", + stopped: Event, + writer_q: "Queue[Record]", + interface: InterfaceQueue, + context_keeper: context.ContextKeeper, + ) -> None: + self._settings = settings + self._record_q = record_q + self._result_q = result_q + self._stopped = stopped + self._writer_q = writer_q + self._interface = interface + self._context_keeper = context_keeper + + self._tb_watcher = None + self._system_monitor = None + self._step = 0 + + self._track_time = None + self._accumulate_time = 0 + self._run_start_time = None + + # keep track of summary from key/val updates + self._consolidated_summary = dict() + self._sampled_history = defaultdict(sample.UniformSampleAccumulator) + self._run_proto = None + self._partial_history = dict() + self._metric_defines = defaultdict(MetricRecord) + self._metric_globs = defaultdict(MetricRecord) + self._metric_track = dict() + self._metric_copy = dict() + self._internal_messages = InternalMessages() + + self._dropped_history = False + + def __len__(self) -> int: + return self._record_q.qsize() + + def handle(self, record: Record) -> None: + self._context_keeper.add_from_record(record) + record_type = record.WhichOneof("record_type") + assert record_type + handler_str = "handle_" + record_type + handler: Callable[[Record], None] = getattr(self, handler_str, None) # type: ignore + assert handler, f"unknown handle: {handler_str}" # type: ignore + handler(record) + + def handle_request(self, record: Record) -> None: + request_type = record.request.WhichOneof("request_type") + assert request_type + handler_str = "handle_request_" + request_type + handler: Callable[[Record], None] = getattr(self, handler_str, None) # type: ignore + if request_type != "network_status": + logger.debug(f"handle_request: {request_type}") + assert handler, f"unknown handle: {handler_str}" # type: ignore + handler(record) + + def _dispatch_record(self, record: Record, always_send: bool = False) -> None: + if always_send: + record.control.always_send = True + tracelog.log_message_queue(record, self._writer_q) + self._writer_q.put(record) + + def _respond_result(self, result: Result) -> None: + tracelog.log_message_queue(result, self._result_q) + context_id = context.context_id_from_result(result) + self._context_keeper.release(context_id) + self._result_q.put(result) + + def debounce(self) -> None: + pass + + def handle_request_cancel(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_defer(self, record: Record) -> None: + defer = record.request.defer + state = defer.state + + logger.info(f"handle defer: {state}") + # only handle flush tb (sender handles the rest) + if state == defer.FLUSH_STATS: + # TODO(jhr): this could block so we dont really want to call shutdown + # from handler thread + if self._system_monitor is not None: + self._system_monitor.finish() + elif state == defer.FLUSH_TB: + if self._tb_watcher: + # shutdown tensorboard workers so we get all metrics flushed + self._tb_watcher.finish() + self._tb_watcher = None + elif state == defer.FLUSH_PARTIAL_HISTORY: + self._flush_partial_history() + elif state == defer.FLUSH_SUM: + self._save_summary(self._consolidated_summary, flush=True) + + # defer is used to drive the sender finish state machine + self._dispatch_record(record, always_send=True) + + def handle_request_login(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_python_packages(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_run(self, record: Record) -> None: + if self._settings._offline: + self._run_proto = record.run + result = proto_util._result_from_record(record) + result.run_result.run.CopyFrom(record.run) + self._respond_result(result) + self._dispatch_record(record) + + def handle_stats(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_config(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_output(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_output_raw(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_files(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_link_artifact(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_use_artifact(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_artifact(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_alert(self, record: Record) -> None: + self._dispatch_record(record) + + def _save_summary(self, summary_dict: SummaryDict, flush: bool = False) -> None: + summary = SummaryRecord() + for k, v in summary_dict.items(): + update = summary.update.add() + update.key = k + update.value_json = json.dumps(v) + if flush: + record = Record(summary=summary) + self._dispatch_record(record) + elif not self._settings._offline: + # Send this summary update as a request since we aren't persisting every update + summary_record = SummaryRecordRequest(summary=summary) + request_record = self._interface._make_request( + summary_record=summary_record + ) + self._dispatch_record(request_record) + + def _save_history( + self, + history: HistoryRecord, + ) -> None: + for item in history.item: + # TODO(jhr) save nested keys? + k = item.key + v = json.loads(item.value_json) + if isinstance(v, numbers.Real): + self._sampled_history[k].add(v) + + def _update_summary_metrics( + self, + s: "MetricSummary", + kl: List[str], + v: "numbers.Real", + float_v: float, + goal_max: Optional[bool], + ) -> bool: + updated = False + best_key: Optional[Tuple[str, ...]] = None + if s.none: + return False + if s.copy: + # non-key list copy already done in _update_summary + if len(kl) > 1: + _dict_nested_set(self._consolidated_summary, kl, v) + return True + if s.last: + last_key = tuple(kl + ["last"]) + old_last = self._metric_track.get(last_key) + if old_last is None or float_v != old_last: + self._metric_track[last_key] = float_v + _dict_nested_set(self._consolidated_summary, last_key, v) + updated = True + if s.best: + best_key = tuple(kl + ["best"]) + if s.max or best_key and goal_max: + max_key = tuple(kl + ["max"]) + old_max = self._metric_track.get(max_key) + if old_max is None or float_v > old_max: + self._metric_track[max_key] = float_v + if s.max: + _dict_nested_set(self._consolidated_summary, max_key, v) + updated = True + if best_key: + _dict_nested_set(self._consolidated_summary, best_key, v) + updated = True + # defaulting to minimize if goal is not specified + if s.min or best_key and not goal_max: + min_key = tuple(kl + ["min"]) + old_min = self._metric_track.get(min_key) + if old_min is None or float_v < old_min: + self._metric_track[min_key] = float_v + if s.min: + _dict_nested_set(self._consolidated_summary, min_key, v) + updated = True + if best_key: + _dict_nested_set(self._consolidated_summary, best_key, v) + updated = True + if s.mean: + tot_key = tuple(kl + ["tot"]) + num_key = tuple(kl + ["num"]) + avg_key = tuple(kl + ["mean"]) + tot = self._metric_track.get(tot_key, 0.0) + num = self._metric_track.get(num_key, 0) + tot += float_v + num += 1 + self._metric_track[tot_key] = tot + self._metric_track[num_key] = num + _dict_nested_set(self._consolidated_summary, avg_key, tot / num) + updated = True + return updated + + def _update_summary_leaf( + self, + kl: List[str], + v: Any, + d: Optional[MetricRecord] = None, + ) -> bool: + has_summary = d and d.HasField("summary") + if len(kl) == 1: + copy_key = tuple(kl) + old_copy = self._metric_copy.get(copy_key) + if old_copy is None or v != old_copy: + self._metric_copy[copy_key] = v + # Store copy metric if not specified, or copy behavior + if not has_summary or (d and d.summary.copy): + self._consolidated_summary[kl[0]] = v + return True + if not d: + return False + if not has_summary: + return False + if not isinstance(v, numbers.Real): + return False + if math.isnan(v): + return False + float_v = float(v) + goal_max = None + if d.goal: + goal_max = d.goal == d.GOAL_MAXIMIZE + if self._update_summary_metrics( + d.summary, kl=kl, v=v, float_v=float_v, goal_max=goal_max + ): + return True + return False + + def _update_summary_list( + self, + kl: List[str], + v: Any, + d: Optional[MetricRecord] = None, + ) -> bool: + metric_key = ".".join([k.replace(".", "\\.") for k in kl]) + d = self._metric_defines.get(metric_key, d) + # if the dict has _type key, it's a wandb table object + if isinstance(v, dict) and not handler_util.metric_is_wandb_dict(v): + updated = False + for nk, nv in v.items(): + if self._update_summary_list(kl=kl[:] + [nk], v=nv, d=d): + updated = True + return updated + # If the dict is a media object, update the pointer to the latest alias + elif ( + REPLACE_SUMMARY_ART_PATH_WITH_LATEST + and isinstance(v, dict) + and handler_util.metric_is_wandb_dict(v) + ): + if "_latest_artifact_path" in v and "artifact_path" in v: + # TODO: Make non-destructive? + v["artifact_path"] = v["_latest_artifact_path"] + updated = self._update_summary_leaf(kl=kl, v=v, d=d) + return updated + + def _update_summary_media_objects(self, v: Dict[str, Any]) -> Dict[str, Any]: + # For now, non-recursive - just top level + for nk, nv in v.items(): + if REPLACE_SUMMARY_ART_PATH_WITH_LATEST and ( + isinstance(nv, dict) + and handler_util.metric_is_wandb_dict(nv) + and "_latest_artifact_path" in nv + and "artifact_path" in nv + ): + # TODO: Make non-destructive? + nv["artifact_path"] = nv["_latest_artifact_path"] + v[nk] = nv + return v + + def _update_summary(self, history_dict: Dict[str, Any]) -> List[str]: + # keep old behavior fast path if no define metrics have been used + if not self._metric_defines: + history_dict = self._update_summary_media_objects(history_dict) + self._consolidated_summary.update(history_dict) + return list(history_dict.keys()) + updated_keys = [] + for k, v in history_dict.items(): + if self._update_summary_list(kl=[k], v=v): + updated_keys.append(k) + return updated_keys + + def _history_assign_step( + self, + history: HistoryRecord, + history_dict: Dict[str, Any], + ) -> None: + has_step = history.HasField("step") + item = history.item.add() + item.key = "_step" + if has_step: + step = history.step.num + history_dict["_step"] = step + item.value_json = json.dumps(step) + self._step = step + 1 + else: + history_dict["_step"] = self._step + item.value_json = json.dumps(self._step) + self._step += 1 + + def _history_define_metric(self, hkey: str) -> Optional[MetricRecord]: + """Check for hkey match in glob metrics and return the defined metric.""" + # Dont define metric for internal metrics + if hkey.startswith("_"): + return None + for k, mglob in self._metric_globs.items(): + if k.endswith("*"): + if hkey.startswith(k[:-1]): + m = MetricRecord() + m.CopyFrom(mglob) + m.ClearField("glob_name") + m.options.defined = False + m.name = hkey + return m + return None + + def _history_update_leaf( + self, + kl: List[str], + v: Any, + history_dict: Dict[str, Any], + update_history: Dict[str, Any], + ) -> None: + hkey = ".".join([k.replace(".", "\\.") for k in kl]) + m = self._metric_defines.get(hkey) + if not m: + m = self._history_define_metric(hkey) + if not m: + return + mr = Record() + mr.metric.CopyFrom(m) + mr.control.local = True # Dont store this, just send it + self._handle_defined_metric(mr) + + if m.options.step_sync and m.step_metric: + if m.step_metric not in history_dict: + copy_key = tuple([m.step_metric]) + step = self._metric_copy.get(copy_key) + if step is not None: + update_history[m.step_metric] = step + + def _history_update_list( + self, + kl: List[str], + v: Any, + history_dict: Dict[str, Any], + update_history: Dict[str, Any], + ) -> None: + if isinstance(v, dict): + for nk, nv in v.items(): + self._history_update_list( + kl=kl[:] + [nk], + v=nv, + history_dict=history_dict, + update_history=update_history, + ) + return + self._history_update_leaf( + kl=kl, v=v, history_dict=history_dict, update_history=update_history + ) + + def _history_update( + self, + history: HistoryRecord, + history_dict: Dict[str, Any], + ) -> None: + # if syncing an old run, we can skip this logic + if history_dict.get("_step") is None: + self._history_assign_step(history, history_dict) + + update_history: Dict[str, Any] = {} + # Look for metric matches + if self._metric_defines or self._metric_globs: + for hkey, hval in history_dict.items(): + self._history_update_list([hkey], hval, history_dict, update_history) + + if update_history: + history_dict.update(update_history) + for k, v in update_history.items(): + item = history.item.add() + item.key = k + item.value_json = json.dumps(v) + + def handle_history(self, record: Record) -> None: + history_dict = proto_util.dict_from_proto_list(record.history.item) + + # Inject _runtime if it is not present + if history_dict is not None: + if "_runtime" not in history_dict: + self._history_assign_runtime(record.history, history_dict) + + self._history_update(record.history, history_dict) + self._dispatch_record(record) + self._save_history(record.history) + # update summary from history + updated_keys = self._update_summary(history_dict) + if updated_keys: + updated_items = {k: self._consolidated_summary[k] for k in updated_keys} + self._save_summary(updated_items) + + def _flush_partial_history( + self, + step: Optional[int] = None, + ) -> None: + if not self._partial_history: + return + + history = HistoryRecord() + for k, v in self._partial_history.items(): + item = history.item.add() + item.key = k + item.value_json = json.dumps(v) + if step is not None: + history.step.num = step + self.handle_history(Record(history=history)) + self._partial_history = {} + + def handle_request_sender_mark_report(self, record: Record) -> None: + self._dispatch_record(record, always_send=True) + + def handle_request_status_report(self, record: Record) -> None: + self._dispatch_record(record, always_send=True) + + def handle_request_partial_history(self, record: Record) -> None: + partial_history = record.request.partial_history + + flush = None + if partial_history.HasField("action"): + flush = partial_history.action.flush + + step = None + if partial_history.HasField("step"): + step = partial_history.step.num + + history_dict = proto_util.dict_from_proto_list(partial_history.item) + if step is not None: + if step < self._step: + if not self._dropped_history: + message = ( + "Step only supports monotonically increasing values, use define_metric to set a custom x " + f"axis. For details see: {wburls.wburls.get('wandb_define_metric')}" + ) + self._internal_messages.warning.append(message) + self._dropped_history = True + message = ( + f"(User provided step: {step} is less than current step: {self._step}. " + f"Dropping entry: {history_dict})." + ) + self._internal_messages.warning.append(message) + return + elif step > self._step: + self._flush_partial_history() + self._step = step + elif flush is None: + flush = True + + self._partial_history.update(history_dict) + + if flush: + self._flush_partial_history(self._step) + + def handle_summary(self, record: Record) -> None: + summary = record.summary + for item in summary.update: + if len(item.nested_key) > 0: + # we use either key or nested_key -- not both + assert item.key == "" + key = tuple(item.nested_key) + else: + # no counter-assertion here, because technically + # summary[""] is valid + key = (item.key,) + + target = self._consolidated_summary + + # recurse down the dictionary structure: + for prop in key[:-1]: + target = target[prop] + + # use the last element of the key to write the leaf: + target[key[-1]] = json.loads(item.value_json) + + for item in summary.remove: + if len(item.nested_key) > 0: + # we use either key or nested_key -- not both + assert item.key == "" + key = tuple(item.nested_key) + else: + # no counter-assertion here, because technically + # summary[""] is valid + key = (item.key,) + + target = self._consolidated_summary + + # recurse down the dictionary structure: + for prop in key[:-1]: + target = target[prop] + + # use the last element of the key to erase the leaf: + del target[key[-1]] + + self._save_summary(self._consolidated_summary) + + def handle_exit(self, record: Record) -> None: + if self._track_time is not None: + self._accumulate_time += time.time() - self._track_time + record.exit.runtime = int(self._accumulate_time) + self._dispatch_record(record, always_send=True) + + def handle_final(self, record: Record) -> None: + self._dispatch_record(record, always_send=True) + + def handle_preempting(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_header(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_footer(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_check_version(self, record: Record) -> None: + if self._settings._offline: + result = proto_util._result_from_record(record) + self._respond_result(result) + else: + self._dispatch_record(record) + + def handle_request_attach(self, record: Record) -> None: + result = proto_util._result_from_record(record) + attach_id = record.request.attach.attach_id + assert attach_id + assert self._run_proto + result.response.attach_response.run.CopyFrom(self._run_proto) + self._respond_result(result) + + def handle_request_log_artifact(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_telemetry(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_run_start(self, record: Record) -> None: + run_start = record.request.run_start + assert run_start + assert run_start.run + + self._run_proto = run_start.run + + self._run_start_time = run_start.run.start_time.ToMicroseconds() / 1e6 + + self._track_time = time.time() + if run_start.run.resumed and run_start.run.runtime: + self._accumulate_time = run_start.run.runtime + else: + self._accumulate_time = 0 + + # system monitor + self._system_monitor = SystemMonitor( + self._settings, + self._interface, + ) + if not self._settings._disable_stats: + self._system_monitor.start() + if not self._settings._disable_meta and not run_start.run.resumed: + self._system_monitor.probe(publish=True) + + self._tb_watcher = tb_watcher.TBWatcher( + self._settings, interface=self._interface, run_proto=run_start.run + ) + + if run_start.run.resumed or run_start.run.forked: + self._step = run_start.run.starting_step + result = proto_util._result_from_record(record) + self._respond_result(result) + + def handle_request_resume(self, record: Record) -> None: + if self._system_monitor is not None: + logger.info("starting system metrics thread") + self._system_monitor.start() + + if self._track_time is not None: + self._accumulate_time += time.time() - self._track_time + self._track_time = time.time() + + def handle_request_pause(self, record: Record) -> None: + if self._system_monitor is not None: + logger.info("stopping system metrics thread") + self._system_monitor.finish() + if self._track_time is not None: + self._accumulate_time += time.time() - self._track_time + self._track_time = None + + def handle_request_poll_exit(self, record: Record) -> None: + self._dispatch_record(record, always_send=True) + + def handle_request_stop_status(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_network_status(self, record: Record) -> None: + self._dispatch_record(record) + + def handle_request_internal_messages(self, record: Record) -> None: + result = proto_util._result_from_record(record) + result.response.internal_messages_response.messages.CopyFrom( + self._internal_messages + ) + self._internal_messages.Clear() + self._respond_result(result) + + def handle_request_status(self, record: Record) -> None: + result = proto_util._result_from_record(record) + self._respond_result(result) + + def handle_request_get_summary(self, record: Record) -> None: + result = proto_util._result_from_record(record) + for key, value in self._consolidated_summary.items(): + item = SummaryItem() + item.key = key + item.value_json = json.dumps(value) + result.response.get_summary_response.item.append(item) + self._respond_result(result) + + def handle_request_get_system_metrics(self, record: Record) -> None: + result = proto_util._result_from_record(record) + if self._system_monitor is None: + return + + buffer = self._system_monitor.buffer + for key, samples in buffer.items(): + buff = [] + for s in samples: + sms = SystemMetricSample() + sms.timestamp.FromMicroseconds(int(s[0] * 1e6)) + sms.value = s[1] + buff.append(sms) + + result.response.get_system_metrics_response.system_metrics[key].CopyFrom( + SystemMetricsBuffer(record=buff) + ) + + self._respond_result(result) + + def handle_tbrecord(self, record: Record) -> None: + logger.info("handling tbrecord: %s", record) + if self._tb_watcher: + tbrecord = record.tbrecord + self._tb_watcher.add(tbrecord.log_dir, tbrecord.save, tbrecord.root_dir) + self._dispatch_record(record) + + def _handle_defined_metric(self, record: Record) -> None: + metric = record.metric + if metric._control.overwrite: + self._metric_defines[metric.name].CopyFrom(metric) + else: + self._metric_defines[metric.name].MergeFrom(metric) + + # before dispatching, make sure step_metric is defined, if not define it and + # dispatch it locally first + metric = self._metric_defines[metric.name] + if metric.step_metric and metric.step_metric not in self._metric_defines: + m = MetricRecord(name=metric.step_metric) + self._metric_defines[metric.step_metric] = m + mr = Record() + mr.metric.CopyFrom(m) + mr.control.local = True # Don't store this, just send it + self._dispatch_record(mr) + + self._dispatch_record(record) + + def _handle_glob_metric(self, record: Record) -> None: + metric = record.metric + if metric._control.overwrite: + self._metric_globs[metric.glob_name].CopyFrom(metric) + else: + self._metric_globs[metric.glob_name].MergeFrom(metric) + self._dispatch_record(record) + + def handle_metric(self, record: Record) -> None: + """Handle MetricRecord. + + Walkthrough of the life of a MetricRecord: + + Metric defined: + - run.define_metric() parses arguments create wandb_metric.Metric + - build MetricRecord publish to interface + - handler (this function) keeps list of metrics published: + - self._metric_defines: Fully defined metrics + - self._metric_globs: metrics that have a wildcard + - dispatch writer and sender thread + - writer: records are saved to persistent store + - sender: fully defined metrics get mapped into metadata for UI + + History logged: + - handle_history + - check if metric matches _metric_defines + - if not, check if metric matches _metric_globs + - if _metric globs match, generate defined metric and call _handle_metric + + Args: + record (Record): Metric record to process + """ + if record.metric.name: + self._handle_defined_metric(record) + elif record.metric.glob_name: + self._handle_glob_metric(record) + + def handle_request_sampled_history(self, record: Record) -> None: + result = proto_util._result_from_record(record) + for key, sampled in self._sampled_history.items(): + item = SampledHistoryItem() + item.key = key + values: Iterable[Any] = sampled.get() + if all(isinstance(i, numbers.Integral) for i in values): + try: + item.values_int.extend(values) + except ValueError: + # it is safe to ignore these as this is for display information + pass + elif all(isinstance(i, numbers.Real) for i in values): + item.values_float.extend(values) + result.response.sampled_history_response.item.append(item) + self._respond_result(result) + + def handle_request_server_info(self, record: Record) -> None: + self._dispatch_record(record, always_send=True) + + def handle_request_keepalive(self, record: Record) -> None: + """Handle a keepalive request. + + Keepalive is a noop, we just want to verify transport is alive. + """ + + def handle_request_run_status(self, record: Record) -> None: + self._dispatch_record(record, always_send=True) + + def handle_request_shutdown(self, record: Record) -> None: + # TODO(jhr): should we drain things and stop new requests from coming in? + result = proto_util._result_from_record(record) + self._respond_result(result) + self._stopped.set() + + def finish(self) -> None: + logger.info("shutting down handler") + if self._system_monitor is not None: + self._system_monitor.finish() + if self._tb_watcher: + self._tb_watcher.finish() + # self._context_keeper._debug_print_orphans() + + def __next__(self) -> Record: + return self._record_q.get(block=True) + + next = __next__ + + def _history_assign_runtime( + self, + history: HistoryRecord, + history_dict: Dict[str, Any], + ) -> None: + # _runtime calculation is meaningless if there is no _timestamp + if "_timestamp" not in history_dict: + return + # if it is offline sync, self._run_start_time is None + # in that case set it to the first tfevent timestamp + if self._run_start_time is None: + self._run_start_time = history_dict["_timestamp"] + history_dict["_runtime"] = history_dict["_timestamp"] - self._run_start_time + item = history.item.add() + item.key = "_runtime" + item.value_json = json.dumps(history_dict[item.key]) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/internal_api.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/internal_api.py new file mode 100644 index 0000000000000000000000000000000000000000..90c3a6e15e6974f169ecead89aba59fdca7ee81b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/internal_api.py @@ -0,0 +1,4287 @@ +import ast +import base64 +import datetime +import functools +import http.client +import json +import logging +import os +import re +import socket +import sys +import threading +from copy import deepcopy +from pathlib import Path +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + TextIO, + Tuple, + Union, +) + +import click +import requests +import yaml +from wandb_gql import Client, gql +from wandb_gql.client import RetryError + +import wandb +from wandb import env, util +from wandb.apis.normalize import normalize_exceptions, parse_backend_error_messages +from wandb.errors import AuthenticationError, CommError, UnsupportedError, UsageError +from wandb.integration.sagemaker import parse_sm_secrets +from wandb.old.settings import Settings +from wandb.sdk.internal.thread_local_settings import _thread_local_api_settings +from wandb.sdk.lib.gql_request import GraphQLSession +from wandb.sdk.lib.hashutil import B64MD5, md5_file_b64 + +from ..lib import credentials, retry +from ..lib.filenames import DIFF_FNAME, METADATA_FNAME +from ..lib.gitlib import GitRepo +from . import context +from .progress import Progress + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + if sys.version_info >= (3, 8): + from typing import Literal, TypedDict + else: + from typing_extensions import Literal, TypedDict + + from .progress import ProgressFn + + class CreateArtifactFileSpecInput(TypedDict, total=False): + """Corresponds to `type CreateArtifactFileSpecInput` in schema.graphql.""" + + artifactID: str # noqa: N815 + name: str + md5: str + mimetype: Optional[str] + artifactManifestID: Optional[str] # noqa: N815 + uploadPartsInput: Optional[List[Dict[str, object]]] # noqa: N815 + + class CreateArtifactFilesResponseFile(TypedDict): + id: str + name: str + displayName: str # noqa: N815 + uploadUrl: Optional[str] # noqa: N815 + uploadHeaders: Sequence[str] # noqa: N815 + uploadMultipartUrls: "UploadPartsResponse" # noqa: N815 + storagePath: str # noqa: N815 + artifact: "CreateArtifactFilesResponseFileNode" + + class CreateArtifactFilesResponseFileNode(TypedDict): + id: str + + class UploadPartsResponse(TypedDict): + uploadUrlParts: List["UploadUrlParts"] # noqa: N815 + uploadID: str # noqa: N815 + + class UploadUrlParts(TypedDict): + partNumber: int # noqa: N815 + uploadUrl: str # noqa: N815 + + class CompleteMultipartUploadArtifactInput(TypedDict): + """Corresponds to `type CompleteMultipartUploadArtifactInput` in schema.graphql.""" + + completeMultipartAction: str # noqa: N815 + completedParts: Dict[int, str] # noqa: N815 + artifactID: str # noqa: N815 + storagePath: str # noqa: N815 + uploadID: str # noqa: N815 + md5: str + + class CompleteMultipartUploadArtifactResponse(TypedDict): + digest: str + + class DefaultSettings(TypedDict): + section: str + git_remote: str + ignore_globs: Optional[List[str]] + base_url: Optional[str] + root_dir: Optional[str] + api_key: Optional[str] + entity: Optional[str] + project: Optional[str] + _extra_http_headers: Optional[Mapping[str, str]] + _proxies: Optional[Mapping[str, str]] + + _Response = MutableMapping + SweepState = Literal["RUNNING", "PAUSED", "CANCELED", "FINISHED"] + Number = Union[int, float] + +# class _MappingSupportsCopy(Protocol): +# def copy(self) -> "_MappingSupportsCopy": ... +# def keys(self) -> Iterable: ... +# def __getitem__(self, name: str) -> Any: ... + +httpclient_logger = logging.getLogger("http.client") +if os.environ.get("WANDB_DEBUG"): + httpclient_logger.setLevel(logging.DEBUG) + + +def check_httpclient_logger_handler() -> None: + # Only enable http.client logging if WANDB_DEBUG is set + if not os.environ.get("WANDB_DEBUG"): + return + if httpclient_logger.handlers: + return + + # Enable HTTPConnection debug logging to the logging framework + level = logging.DEBUG + + def httpclient_log(*args: Any) -> None: + httpclient_logger.log(level, " ".join(args)) + + # mask the print() built-in in the http.client module to use logging instead + http.client.print = httpclient_log # type: ignore[attr-defined] + # enable debugging + http.client.HTTPConnection.debuglevel = 1 + + root_logger = logging.getLogger("wandb") + if root_logger.handlers: + httpclient_logger.addHandler(root_logger.handlers[0]) + + +class _ThreadLocalData(threading.local): + context: Optional[context.Context] + + def __init__(self) -> None: + self.context = None + + +class Api: + """W&B Internal Api wrapper. + + Note: + Settings are automatically overridden by looking for + a `wandb/settings` file in the current working directory or its parent + directory. If none can be found, we look in the current user's home + directory. + + Arguments: + default_settings(dict, optional): If you aren't using a settings + file, or you wish to override the section to use in the settings file + Override the settings here. + """ + + HTTP_TIMEOUT = env.get_http_timeout(20) + FILE_PUSHER_TIMEOUT = env.get_file_pusher_timeout() + _global_context: context.Context + _local_data: _ThreadLocalData + + def __init__( + self, + default_settings: Optional[ + Union[ + "wandb.sdk.wandb_settings.Settings", + "wandb.sdk.internal.settings_static.SettingsStatic", + Settings, + dict, + ] + ] = None, + load_settings: bool = True, + retry_timedelta: datetime.timedelta = datetime.timedelta( # noqa: B008 # okay because it's immutable + days=7 + ), + environ: MutableMapping = os.environ, + retry_callback: Optional[Callable[[int, str], Any]] = None, + ) -> None: + self._environ = environ + self._global_context = context.Context() + self._local_data = _ThreadLocalData() + self.default_settings: DefaultSettings = { + "section": "default", + "git_remote": "origin", + "ignore_globs": [], + "base_url": "https://api.wandb.ai", + "root_dir": None, + "api_key": None, + "entity": None, + "project": None, + "_extra_http_headers": None, + "_proxies": None, + } + self.retry_timedelta = retry_timedelta + # todo: Old Settings do not follow the SupportsKeysAndGetItem Protocol + default_settings = default_settings or {} + self.default_settings.update(default_settings) # type: ignore + self.retry_uploads = 10 + self._settings = Settings( + load_settings=load_settings, + root_dir=self.default_settings.get("root_dir"), + ) + self.git = GitRepo(remote=self.settings("git_remote")) + # Mutable settings set by the _file_stream_api + self.dynamic_settings = { + "system_sample_seconds": 2, + "system_samples": 15, + "heartbeat_seconds": 30, + } + + # todo: remove these hacky hacks after settings refactor is complete + # keeping this code here to limit scope and so that it is easy to remove later + self._extra_http_headers = self.settings("_extra_http_headers") or json.loads( + self._environ.get("WANDB__EXTRA_HTTP_HEADERS", "{}") + ) + self._extra_http_headers.update(_thread_local_api_settings.headers or {}) + + auth = None + if self.access_token is not None: + self._extra_http_headers["Authorization"] = f"Bearer {self.access_token}" + elif _thread_local_api_settings.cookies is None: + auth = ("api", self.api_key or "") + + proxies = self.settings("_proxies") or json.loads( + self._environ.get("WANDB__PROXIES", "{}") + ) + + self.client = Client( + transport=GraphQLSession( + headers={ + "User-Agent": self.user_agent, + "X-WANDB-USERNAME": env.get_username(env=self._environ), + "X-WANDB-USER-EMAIL": env.get_user_email(env=self._environ), + **self._extra_http_headers, + }, + use_json=True, + # this timeout won't apply when the DNS lookup fails. in that case, it will be 60s + # https://bugs.python.org/issue22889 + timeout=self.HTTP_TIMEOUT, + auth=auth, + url=f"{self.settings('base_url')}/graphql", + cookies=_thread_local_api_settings.cookies, + proxies=proxies, + ) + ) + + self.retry_callback = retry_callback + self._retry_gql = retry.Retry( + self.execute, + retry_timedelta=retry_timedelta, + check_retry_fn=util.no_retry_auth, + retryable_exceptions=(RetryError, requests.RequestException), + retry_callback=retry_callback, + ) + self._current_run_id: Optional[str] = None + self._file_stream_api = None + self._upload_file_session = requests.Session() + if self.FILE_PUSHER_TIMEOUT: + self._upload_file_session.put = functools.partial( # type: ignore + self._upload_file_session.put, + timeout=self.FILE_PUSHER_TIMEOUT, + ) + if proxies: + self._upload_file_session.proxies.update(proxies) + # This Retry class is initialized once for each Api instance, so this + # defaults to retrying 1 million times per process or 7 days + self.upload_file_retry = normalize_exceptions( + retry.retriable(retry_timedelta=retry_timedelta)(self.upload_file) + ) + self.upload_multipart_file_chunk_retry = normalize_exceptions( + retry.retriable(retry_timedelta=retry_timedelta)( + self.upload_multipart_file_chunk + ) + ) + self._client_id_mapping: Dict[str, str] = {} + # Large file uploads to azure can optionally use their SDK + self._azure_blob_module = util.get_module("azure.storage.blob") + + self.query_types: Optional[List[str]] = None + self.mutation_types: Optional[List[str]] = None + self.server_info_types: Optional[List[str]] = None + self.server_use_artifact_input_info: Optional[List[str]] = None + self.server_create_artifact_input_info: Optional[List[str]] = None + self.server_artifact_fields_info: Optional[List[str]] = None + self._max_cli_version: Optional[str] = None + self._server_settings_type: Optional[List[str]] = None + self.fail_run_queue_item_input_info: Optional[List[str]] = None + self.create_launch_agent_input_info: Optional[List[str]] = None + self.server_create_run_queue_supports_drc: Optional[bool] = None + self.server_create_run_queue_supports_priority: Optional[bool] = None + self.server_supports_template_variables: Optional[bool] = None + self.server_push_to_run_queue_supports_priority: Optional[bool] = None + + def gql(self, *args: Any, **kwargs: Any) -> Any: + ret = self._retry_gql( + *args, + retry_cancel_event=self.context.cancel_event, + **kwargs, + ) + return ret + + def set_local_context(self, api_context: Optional[context.Context]) -> None: + self._local_data.context = api_context + + def clear_local_context(self) -> None: + self._local_data.context = None + + @property + def context(self) -> context.Context: + return self._local_data.context or self._global_context + + def reauth(self) -> None: + """Ensure the current api key is set in the transport.""" + self.client.transport.session.auth = ("api", self.api_key or "") + + def relocate(self) -> None: + """Ensure the current api points to the right server.""" + self.client.transport.url = "{}/graphql".format(self.settings("base_url")) + + def execute(self, *args: Any, **kwargs: Any) -> "_Response": + """Wrapper around execute that logs in cases of failure.""" + try: + return self.client.execute(*args, **kwargs) # type: ignore + except requests.exceptions.HTTPError as err: + response = err.response + assert response is not None + logger.error(f"{response.status_code} response executing GraphQL.") + logger.error(response.text) + for error in parse_backend_error_messages(response): + wandb.termerror(f"Error while calling W&B API: {error} ({response})") + raise + + def disabled(self) -> Union[str, bool]: + return self._settings.get(Settings.DEFAULT_SECTION, "disabled", fallback=False) # type: ignore + + def set_current_run_id(self, run_id: str) -> None: + self._current_run_id = run_id + + @property + def current_run_id(self) -> Optional[str]: + return self._current_run_id + + @property + def user_agent(self) -> str: + return f"W&B Internal Client {wandb.__version__}" + + @property + def api_key(self) -> Optional[str]: + if _thread_local_api_settings.api_key: + return _thread_local_api_settings.api_key + auth = requests.utils.get_netrc_auth(self.api_url) + key = None + if auth: + key = auth[-1] + + # Environment should take precedence + env_key: Optional[str] = self._environ.get(env.API_KEY) + sagemaker_key: Optional[str] = parse_sm_secrets().get(env.API_KEY) + default_key: Optional[str] = self.default_settings.get("api_key") + return env_key or key or sagemaker_key or default_key + + @property + def access_token(self) -> Optional[str]: + """Retrieves an access token for authentication. + + This function attempts to exchange an identity token for a temporary + access token from the server, and save it to the credentials file. + It uses the path to the identity token as defined in the environment + variables. If the environment variable is not set, it returns None. + + Returns: + Optional[str]: The access token if available, otherwise None if + no identity token is supplied. + Raises: + AuthenticationError: If the path to the identity token is not found. + """ + token_file_str = self._environ.get(env.IDENTITY_TOKEN_FILE) + if not token_file_str: + return None + + token_file = Path(token_file_str) + if not token_file.exists(): + raise AuthenticationError(f"Identity token file not found: {token_file}") + + base_url = self.settings("base_url") + credentials_file = env.get_credentials_file( + str(credentials.DEFAULT_WANDB_CREDENTIALS_FILE), self._environ + ) + return credentials.access_token(base_url, token_file, credentials_file) + + @property + def api_url(self) -> str: + return self.settings("base_url") # type: ignore + + @property + def app_url(self) -> str: + return wandb.util.app_url(self.api_url) + + @property + def default_entity(self) -> str: + return self.viewer().get("entity") # type: ignore + + def settings(self, key: Optional[str] = None, section: Optional[str] = None) -> Any: + """The settings overridden from the wandb/settings file. + + Arguments: + key (str, optional): If provided only this setting is returned + section (str, optional): If provided this section of the setting file is + used, defaults to "default" + + Returns: + A dict with the current settings + + { + "entity": "models", + "base_url": "https://api.wandb.ai", + "project": None + } + """ + result = self.default_settings.copy() + result.update(self._settings.items(section=section)) # type: ignore + result.update( + { + "entity": env.get_entity( + self._settings.get( + Settings.DEFAULT_SECTION, + "entity", + fallback=result.get("entity"), + ), + env=self._environ, + ), + "project": env.get_project( + self._settings.get( + Settings.DEFAULT_SECTION, + "project", + fallback=result.get("project"), + ), + env=self._environ, + ), + "base_url": env.get_base_url( + self._settings.get( + Settings.DEFAULT_SECTION, + "base_url", + fallback=result.get("base_url"), + ), + env=self._environ, + ), + "ignore_globs": env.get_ignore( + self._settings.get( + Settings.DEFAULT_SECTION, + "ignore_globs", + fallback=result.get("ignore_globs"), + ), + env=self._environ, + ), + } + ) + + return result if key is None else result[key] # type: ignore + + def clear_setting( + self, key: str, globally: bool = False, persist: bool = False + ) -> None: + self._settings.clear( + Settings.DEFAULT_SECTION, key, globally=globally, persist=persist + ) + + def set_setting( + self, key: str, value: Any, globally: bool = False, persist: bool = False + ) -> None: + self._settings.set( + Settings.DEFAULT_SECTION, key, value, globally=globally, persist=persist + ) + if key == "entity": + env.set_entity(value, env=self._environ) + elif key == "project": + env.set_project(value, env=self._environ) + elif key == "base_url": + self.relocate() + + def parse_slug( + self, slug: str, project: Optional[str] = None, run: Optional[str] = None + ) -> Tuple[str, str]: + """Parse a slug into a project and run. + + Arguments: + slug (str): The slug to parse + project (str, optional): The project to use, if not provided it will be + inferred from the slug + run (str, optional): The run to use, if not provided it will be inferred + from the slug + + Returns: + A dict with the project and run + """ + if slug and "/" in slug: + parts = slug.split("/") + project = parts[0] + run = parts[1] + else: + project = project or self.settings().get("project") + if project is None: + raise CommError("No default project configured.") + run = run or slug or self.current_run_id or env.get_run(env=self._environ) + assert run, "run must be specified" + return project, run + + @normalize_exceptions + def server_info_introspection(self) -> Tuple[List[str], List[str], List[str]]: + query_string = """ + query ProbeServerCapabilities { + QueryType: __type(name: "Query") { + ...fieldData + } + MutationType: __type(name: "Mutation") { + ...fieldData + } + ServerInfoType: __type(name: "ServerInfo") { + ...fieldData + } + } + + fragment fieldData on __Type { + fields { + name + } + } + """ + if ( + self.query_types is None + or self.mutation_types is None + or self.server_info_types is None + ): + query = gql(query_string) + res = self.gql(query) + + self.query_types = [ + field.get("name", "") + for field in res.get("QueryType", {}).get("fields", [{}]) + ] + self.mutation_types = [ + field.get("name", "") + for field in res.get("MutationType", {}).get("fields", [{}]) + ] + self.server_info_types = [ + field.get("name", "") + for field in res.get("ServerInfoType", {}).get("fields", [{}]) + ] + return self.query_types, self.server_info_types, self.mutation_types + + @normalize_exceptions + def server_settings_introspection(self) -> None: + query_string = """ + query ProbeServerSettings { + ServerSettingsType: __type(name: "ServerSettings") { + ...fieldData + } + } + + fragment fieldData on __Type { + fields { + name + } + } + """ + if self._server_settings_type is None: + query = gql(query_string) + res = self.gql(query) + self._server_settings_type = ( + [ + field.get("name", "") + for field in res.get("ServerSettingsType", {}).get("fields", [{}]) + ] + if res + else [] + ) + + def server_use_artifact_input_introspection(self) -> List: + query_string = """ + query ProbeServerUseArtifactInput { + UseArtifactInputInfoType: __type(name: "UseArtifactInput") { + name + inputFields { + name + } + } + } + """ + + if self.server_use_artifact_input_info is None: + query = gql(query_string) + res = self.gql(query) + self.server_use_artifact_input_info = [ + field.get("name", "") + for field in res.get("UseArtifactInputInfoType", {}).get( + "inputFields", [{}] + ) + ] + return self.server_use_artifact_input_info + + @normalize_exceptions + def launch_agent_introspection(self) -> Optional[str]: + query = gql( + """ + query LaunchAgentIntrospection { + LaunchAgentType: __type(name: "LaunchAgent") { + name + } + } + """ + ) + + res = self.gql(query) + return res.get("LaunchAgentType") or None + + @normalize_exceptions + def create_run_queue_introspection(self) -> Tuple[bool, bool, bool]: + _, _, mutations = self.server_info_introspection() + query_string = """ + query ProbeCreateRunQueueInput { + CreateRunQueueInputType: __type(name: "CreateRunQueueInput") { + name + inputFields { + name + } + } + } + """ + if ( + self.server_create_run_queue_supports_drc is None + or self.server_create_run_queue_supports_priority is None + ): + query = gql(query_string) + res = self.gql(query) + if res is None: + raise CommError("Could not get CreateRunQueue input from GQL.") + self.server_create_run_queue_supports_drc = "defaultResourceConfigID" in [ + x["name"] + for x in ( + res.get("CreateRunQueueInputType", {}).get("inputFields", [{}]) + ) + ] + self.server_create_run_queue_supports_priority = "prioritizationMode" in [ + x["name"] + for x in ( + res.get("CreateRunQueueInputType", {}).get("inputFields", [{}]) + ) + ] + return ( + "createRunQueue" in mutations, + self.server_create_run_queue_supports_drc, + self.server_create_run_queue_supports_priority, + ) + + @normalize_exceptions + def push_to_run_queue_introspection(self) -> Tuple[bool, bool]: + query_string = """ + query ProbePushToRunQueueInput { + PushToRunQueueInputType: __type(name: "PushToRunQueueInput") { + name + inputFields { + name + } + } + } + """ + + if ( + self.server_supports_template_variables is None + or self.server_push_to_run_queue_supports_priority is None + ): + query = gql(query_string) + res = self.gql(query) + self.server_supports_template_variables = "templateVariableValues" in [ + x["name"] + for x in ( + res.get("PushToRunQueueInputType", {}).get("inputFields", [{}]) + ) + ] + self.server_push_to_run_queue_supports_priority = "priority" in [ + x["name"] + for x in ( + res.get("PushToRunQueueInputType", {}).get("inputFields", [{}]) + ) + ] + + return ( + self.server_supports_template_variables, + self.server_push_to_run_queue_supports_priority, + ) + + @normalize_exceptions + def create_default_resource_config_introspection(self) -> bool: + _, _, mutations = self.server_info_introspection() + return "createDefaultResourceConfig" in mutations + + @normalize_exceptions + def fail_run_queue_item_introspection(self) -> bool: + _, _, mutations = self.server_info_introspection() + return "failRunQueueItem" in mutations + + @normalize_exceptions + def fail_run_queue_item_fields_introspection(self) -> List: + if self.fail_run_queue_item_input_info: + return self.fail_run_queue_item_input_info + query_string = """ + query ProbeServerFailRunQueueItemInput { + FailRunQueueItemInputInfoType: __type(name:"FailRunQueueItemInput") { + inputFields{ + name + } + } + } + """ + + query = gql(query_string) + res = self.gql(query) + + self.fail_run_queue_item_input_info = [ + field.get("name", "") + for field in res.get("FailRunQueueItemInputInfoType", {}).get( + "inputFields", [{}] + ) + ] + return self.fail_run_queue_item_input_info + + @normalize_exceptions + def fail_run_queue_item( + self, + run_queue_item_id: str, + message: str, + stage: str, + file_paths: Optional[List[str]] = None, + ) -> bool: + if not self.fail_run_queue_item_introspection(): + return False + variable_values: Dict[str, Union[str, Optional[List[str]]]] = { + "runQueueItemId": run_queue_item_id, + } + if "message" in self.fail_run_queue_item_fields_introspection(): + variable_values.update({"message": message, "stage": stage}) + if file_paths is not None: + variable_values["filePaths"] = file_paths + mutation_string = """ + mutation failRunQueueItem($runQueueItemId: ID!, $message: String!, $stage: String!, $filePaths: [String!]) { + failRunQueueItem( + input: { + runQueueItemId: $runQueueItemId + message: $message + stage: $stage + filePaths: $filePaths + } + ) { + success + } + } + """ + else: + mutation_string = """ + mutation failRunQueueItem($runQueueItemId: ID!) { + failRunQueueItem( + input: { + runQueueItemId: $runQueueItemId + } + ) { + success + } + } + """ + + mutation = gql(mutation_string) + response = self.gql(mutation, variable_values=variable_values) + result: bool = response["failRunQueueItem"]["success"] + return result + + @normalize_exceptions + def update_run_queue_item_warning_introspection(self) -> bool: + _, _, mutations = self.server_info_introspection() + return "updateRunQueueItemWarning" in mutations + + @normalize_exceptions + def update_run_queue_item_warning( + self, + run_queue_item_id: str, + message: str, + stage: str, + file_paths: Optional[List[str]] = None, + ) -> bool: + if not self.update_run_queue_item_warning_introspection(): + return False + mutation = gql( + """ + mutation updateRunQueueItemWarning($runQueueItemId: ID!, $message: String!, $stage: String!, $filePaths: [String!]) { + updateRunQueueItemWarning( + input: { + runQueueItemId: $runQueueItemId + message: $message + stage: $stage + filePaths: $filePaths + } + ) { + success + } + } + """ + ) + response = self.gql( + mutation, + variable_values={ + "runQueueItemId": run_queue_item_id, + "message": message, + "stage": stage, + "filePaths": file_paths, + }, + ) + result: bool = response["updateRunQueueItemWarning"]["success"] + return result + + @normalize_exceptions + def viewer(self) -> Dict[str, Any]: + query = gql( + """ + query Viewer{ + viewer { + id + entity + username + flags + teams { + edges { + node { + name + } + } + } + } + } + """ + ) + res = self.gql(query) + return res.get("viewer") or {} + + @normalize_exceptions + def max_cli_version(self) -> Optional[str]: + if self._max_cli_version is not None: + return self._max_cli_version + + query_types, server_info_types, _ = self.server_info_introspection() + cli_version_exists = ( + "serverInfo" in query_types and "cliVersionInfo" in server_info_types + ) + if not cli_version_exists: + return None + + _, server_info = self.viewer_server_info() + self._max_cli_version = server_info.get("cliVersionInfo", {}).get( + "max_cli_version" + ) + return self._max_cli_version + + @normalize_exceptions + def viewer_server_info(self) -> Tuple[Dict[str, Any], Dict[str, Any]]: + local_query = """ + latestLocalVersionInfo { + outOfDate + latestVersionString + versionOnThisInstanceString + } + """ + cli_query = """ + serverInfo { + cliVersionInfo + _LOCAL_QUERY_ + } + """ + query_template = """ + query Viewer{ + viewer { + id + entity + username + email + flags + teams { + edges { + node { + name + } + } + } + } + _CLI_QUERY_ + } + """ + query_types, server_info_types, _ = self.server_info_introspection() + + cli_version_exists = ( + "serverInfo" in query_types and "cliVersionInfo" in server_info_types + ) + + local_version_exists = ( + "serverInfo" in query_types + and "latestLocalVersionInfo" in server_info_types + ) + + cli_query_string = "" if not cli_version_exists else cli_query + local_query_string = "" if not local_version_exists else local_query + + query_string = query_template.replace("_CLI_QUERY_", cli_query_string).replace( + "_LOCAL_QUERY_", local_query_string + ) + query = gql(query_string) + res = self.gql(query) + return res.get("viewer") or {}, res.get("serverInfo") or {} + + @normalize_exceptions + def list_projects(self, entity: Optional[str] = None) -> List[Dict[str, str]]: + """List projects in W&B scoped by entity. + + Arguments: + entity (str, optional): The entity to scope this project to. + + Returns: + [{"id","name","description"}] + """ + query = gql( + """ + query EntityProjects($entity: String) { + models(first: 10, entityName: $entity) { + edges { + node { + id + name + description + } + } + } + } + """ + ) + project_list: List[Dict[str, str]] = self._flatten_edges( + self.gql( + query, variable_values={"entity": entity or self.settings("entity")} + )["models"] + ) + return project_list + + @normalize_exceptions + def project(self, project: str, entity: Optional[str] = None) -> "_Response": + """Retrieve project. + + Arguments: + project (str): The project to get details for + entity (str, optional): The entity to scope this project to. + + Returns: + [{"id","name","repo","dockerImage","description"}] + """ + query = gql( + """ + query ProjectDetails($entity: String, $project: String) { + model(name: $project, entityName: $entity) { + id + name + repo + dockerImage + description + } + } + """ + ) + response: _Response = self.gql( + query, variable_values={"entity": entity, "project": project} + )["model"] + return response + + @normalize_exceptions + def sweep( + self, + sweep: str, + specs: str, + project: Optional[str] = None, + entity: Optional[str] = None, + ) -> Dict[str, Any]: + """Retrieve sweep. + + Arguments: + sweep (str): The sweep to get details for + specs (str): history specs + project (str, optional): The project to scope this sweep to. + entity (str, optional): The entity to scope this sweep to. + + Returns: + [{"id","name","repo","dockerImage","description"}] + """ + query = gql( + """ + query SweepWithRuns($entity: String, $project: String, $sweep: String!, $specs: [JSONString!]!) { + project(name: $project, entityName: $entity) { + sweep(sweepName: $sweep) { + id + name + method + state + description + config + createdAt + heartbeatAt + updatedAt + earlyStopJobRunning + bestLoss + controller + scheduler + runs { + edges { + node { + name + state + config + exitcode + heartbeatAt + shouldStop + failed + stopped + running + summaryMetrics + sampledHistory(specs: $specs) + } + } + } + } + } + } + """ + ) + entity = entity or self.settings("entity") + project = project or self.settings("project") + response = self.gql( + query, + variable_values={ + "entity": entity, + "project": project, + "sweep": sweep, + "specs": specs, + }, + ) + if response["project"] is None or response["project"]["sweep"] is None: + raise ValueError(f"Sweep {entity}/{project}/{sweep} not found") + data: Dict[str, Any] = response["project"]["sweep"] + if data: + data["runs"] = self._flatten_edges(data["runs"]) + return data + + @normalize_exceptions + def list_runs( + self, project: str, entity: Optional[str] = None + ) -> List[Dict[str, str]]: + """List runs in W&B scoped by project. + + Arguments: + project (str): The project to scope the runs to + entity (str, optional): The entity to scope this project to. Defaults to public models + + Returns: + [{"id","name","description"}] + """ + query = gql( + """ + query ProjectRuns($model: String!, $entity: String) { + model(name: $model, entityName: $entity) { + buckets(first: 10) { + edges { + node { + id + name + displayName + description + } + } + } + } + } + """ + ) + return self._flatten_edges( + self.gql( + query, + variable_values={ + "entity": entity or self.settings("entity"), + "model": project or self.settings("project"), + }, + )["model"]["buckets"] + ) + + @normalize_exceptions + def run_config( + self, project: str, run: Optional[str] = None, entity: Optional[str] = None + ) -> Tuple[str, Dict[str, Any], Optional[str], Dict[str, Any]]: + """Get the relevant configs for a run. + + Arguments: + project (str): The project to download, (can include bucket) + run (str, optional): The run to download + entity (str, optional): The entity to scope this project to. + """ + check_httpclient_logger_handler() + + query = gql( + """ + query RunConfigs( + $name: String!, + $entity: String, + $run: String!, + $pattern: String!, + $includeConfig: Boolean!, + ) { + model(name: $name, entityName: $entity) { + bucket(name: $run) { + config @include(if: $includeConfig) + commit @include(if: $includeConfig) + files(pattern: $pattern) { + pageInfo { + hasNextPage + endCursor + } + edges { + node { + name + directUrl + } + } + } + } + } + } + """ + ) + + variable_values = { + "name": project, + "run": run, + "entity": entity, + "includeConfig": True, + } + + commit: str = "" + config: Dict[str, Any] = {} + patch: Optional[str] = None + metadata: Dict[str, Any] = {} + + # If we use the `names` parameter on the `files` node, then the server + # will helpfully give us and 'open' file handle to the files that don't + # exist. This is so that we can upload data to it. However, in this + # case, we just want to download that file and not upload to it, so + # let's instead query for the files that do exist using `pattern` + # (with no wildcards). + # + # Unfortunately we're unable to construct a single pattern that matches + # our 2 files, we would need something like regex for that. + for filename in [DIFF_FNAME, METADATA_FNAME]: + variable_values["pattern"] = filename + response = self.gql(query, variable_values=variable_values) + if response["model"] is None: + raise CommError(f"Run {entity}/{project}/{run} not found") + run_obj: Dict = response["model"]["bucket"] + # we only need to fetch this config once + if variable_values["includeConfig"]: + commit = run_obj["commit"] + config = json.loads(run_obj["config"] or "{}") + variable_values["includeConfig"] = False + if run_obj["files"] is not None: + for file_edge in run_obj["files"]["edges"]: + name = file_edge["node"]["name"] + url = file_edge["node"]["directUrl"] + res = requests.get(url) + res.raise_for_status() + if name == METADATA_FNAME: + metadata = res.json() + elif name == DIFF_FNAME: + patch = res.text + + return commit, config, patch, metadata + + @normalize_exceptions + def run_resume_status( + self, entity: str, project_name: str, name: str + ) -> Optional[Dict[str, Any]]: + """Check if a run exists and get resume information. + + Arguments: + entity (str): The entity to scope this project to. + project_name (str): The project to download, (can include bucket) + name (str): The run to download + """ + # Pulling wandbConfig.start_time is required so that we can determine if a run has actually started + query = gql( + """ + query RunResumeStatus($project: String, $entity: String, $name: String!) { + model(name: $project, entityName: $entity) { + id + name + entity { + id + name + } + + bucket(name: $name, missingOk: true) { + id + name + summaryMetrics + displayName + logLineCount + historyLineCount + eventsLineCount + historyTail + eventsTail + config + tags + wandbConfig(keys: ["t"]) + } + } + } + """ + ) + + response = self.gql( + query, + variable_values={ + "entity": entity, + "project": project_name, + "name": name, + }, + ) + + if "model" not in response or "bucket" not in (response["model"] or {}): + return None + + project = response["model"] + self.set_setting("project", project_name) + if "entity" in project: + self.set_setting("entity", project["entity"]["name"]) + + result: Dict[str, Any] = project["bucket"] + + return result + + @normalize_exceptions + def check_stop_requested( + self, project_name: str, entity_name: str, run_id: str + ) -> bool: + query = gql( + """ + query RunStoppedStatus($projectName: String, $entityName: String, $runId: String!) { + project(name:$projectName, entityName:$entityName) { + run(name:$runId) { + stopped + } + } + } + """ + ) + + response = self.gql( + query, + variable_values={ + "projectName": project_name, + "entityName": entity_name, + "runId": run_id, + }, + ) + + project = response.get("project", None) + if not project: + return False + run = project.get("run", None) + if not run: + return False + + status: bool = run["stopped"] + return status + + def format_project(self, project: str) -> str: + return re.sub(r"\W+", "-", project.lower()).strip("-_") + + @normalize_exceptions + def upsert_project( + self, + project: str, + id: Optional[str] = None, + description: Optional[str] = None, + entity: Optional[str] = None, + ) -> Dict[str, Any]: + """Create a new project. + + Arguments: + project (str): The project to create + description (str, optional): A description of this project + entity (str, optional): The entity to scope this project to. + """ + mutation = gql( + """ + mutation UpsertModel($name: String!, $id: String, $entity: String!, $description: String, $repo: String) { + upsertModel(input: { id: $id, name: $name, entityName: $entity, description: $description, repo: $repo }) { + model { + name + description + } + } + } + """ + ) + response = self.gql( + mutation, + variable_values={ + "name": self.format_project(project), + "entity": entity or self.settings("entity"), + "description": description, + "id": id, + }, + ) + # TODO(jhr): Commenting out 'repo' field for cling, add back + # 'description': description, 'repo': self.git.remote_url, 'id': id}) + result: Dict[str, Any] = response["upsertModel"]["model"] + return result + + @normalize_exceptions + def entity_is_team(self, entity: str) -> bool: + query = gql( + """ + query EntityIsTeam($entity: String!) { + entity(name: $entity) { + id + isTeam + } + } + """ + ) + variable_values = { + "entity": entity, + } + + res = self.gql(query, variable_values) + if res.get("entity") is None: + raise Exception( + f"Error fetching entity {entity} " + "check that you have access to this entity" + ) + + is_team: bool = res["entity"]["isTeam"] + return is_team + + @normalize_exceptions + def get_project_run_queues(self, entity: str, project: str) -> List[Dict[str, str]]: + query = gql( + """ + query ProjectRunQueues($entity: String!, $projectName: String!){ + project(entityName: $entity, name: $projectName) { + runQueues { + id + name + createdBy + access + } + } + } + """ + ) + variable_values = { + "projectName": project, + "entity": entity, + } + + res = self.gql(query, variable_values) + if res.get("project") is None: + # circular dependency: (LAUNCH_DEFAULT_PROJECT = model-registry) + if project == "model-registry": + msg = ( + f"Error fetching run queues for {entity} " + "check that you have access to this entity and project" + ) + else: + msg = ( + f"Error fetching run queues for {entity}/{project} " + "check that you have access to this entity and project" + ) + + raise Exception(msg) + + project_run_queues: List[Dict[str, str]] = res["project"]["runQueues"] + return project_run_queues + + @normalize_exceptions + def create_default_resource_config( + self, + entity: str, + resource: str, + config: str, + template_variables: Optional[Dict[str, Union[float, int, str]]], + ) -> Optional[Dict[str, Any]]: + if not self.create_default_resource_config_introspection(): + raise Exception() + supports_template_vars, _ = self.push_to_run_queue_introspection() + + mutation_params = """ + $entityName: String!, + $resource: String!, + $config: JSONString! + """ + mutation_inputs = """ + entityName: $entityName, + resource: $resource, + config: $config + """ + + if supports_template_vars: + mutation_params += ", $templateVariables: JSONString" + mutation_inputs += ", templateVariables: $templateVariables" + else: + if template_variables is not None: + raise UnsupportedError( + "server does not support template variables, please update server instance to >=0.46" + ) + + variable_values = { + "entityName": entity, + "resource": resource, + "config": config, + } + if supports_template_vars: + if template_variables is not None: + variable_values["templateVariables"] = json.dumps(template_variables) + else: + variable_values["templateVariables"] = "{}" + + query = gql( + f""" + mutation createDefaultResourceConfig( + {mutation_params} + ) {{ + createDefaultResourceConfig( + input: {{ + {mutation_inputs} + }} + ) {{ + defaultResourceConfigID + success + }} + }} + """ + ) + + result: Optional[Dict[str, Any]] = self.gql(query, variable_values)[ + "createDefaultResourceConfig" + ] + return result + + @normalize_exceptions + def create_run_queue( + self, + entity: str, + project: str, + queue_name: str, + access: str, + prioritization_mode: Optional[str] = None, + config_id: Optional[str] = None, + ) -> Optional[Dict[str, Any]]: + ( + create_run_queue, + supports_drc, + supports_prioritization, + ) = self.create_run_queue_introspection() + if not create_run_queue: + raise UnsupportedError( + "run queue creation is not supported by this version of " + "wandb server. Consider updating to the latest version." + ) + if not supports_drc and config_id is not None: + raise UnsupportedError( + "default resource configurations are not supported by this version " + "of wandb server. Consider updating to the latest version." + ) + if not supports_prioritization and prioritization_mode is not None: + raise UnsupportedError( + "launch prioritization is not supported by this version of " + "wandb server. Consider updating to the latest version." + ) + + if supports_prioritization: + query = gql( + """ + mutation createRunQueue( + $entity: String!, + $project: String!, + $queueName: String!, + $access: RunQueueAccessType!, + $prioritizationMode: RunQueuePrioritizationMode, + $defaultResourceConfigID: ID, + ) { + createRunQueue( + input: { + entityName: $entity, + projectName: $project, + queueName: $queueName, + access: $access, + prioritizationMode: $prioritizationMode + defaultResourceConfigID: $defaultResourceConfigID + } + ) { + success + queueID + } + } + """ + ) + variable_values = { + "entity": entity, + "project": project, + "queueName": queue_name, + "access": access, + "prioritizationMode": prioritization_mode, + "defaultResourceConfigID": config_id, + } + else: + query = gql( + """ + mutation createRunQueue( + $entity: String!, + $project: String!, + $queueName: String!, + $access: RunQueueAccessType!, + $defaultResourceConfigID: ID, + ) { + createRunQueue( + input: { + entityName: $entity, + projectName: $project, + queueName: $queueName, + access: $access, + defaultResourceConfigID: $defaultResourceConfigID + } + ) { + success + queueID + } + } + """ + ) + variable_values = { + "entity": entity, + "project": project, + "queueName": queue_name, + "access": access, + "defaultResourceConfigID": config_id, + } + + result: Optional[Dict[str, Any]] = self.gql(query, variable_values)[ + "createRunQueue" + ] + return result + + @normalize_exceptions + def push_to_run_queue_by_name( + self, + entity: str, + project: str, + queue_name: str, + run_spec: str, + template_variables: Optional[Dict[str, Union[int, float, str]]], + priority: Optional[int] = None, + ) -> Optional[Dict[str, Any]]: + self.push_to_run_queue_introspection() + """Queryless mutation, should be used before legacy fallback method.""" + + mutation_params = """ + $entityName: String!, + $projectName: String!, + $queueName: String!, + $runSpec: JSONString! + """ + + mutation_input = """ + entityName: $entityName, + projectName: $projectName, + queueName: $queueName, + runSpec: $runSpec + """ + + variables: Dict[str, Any] = { + "entityName": entity, + "projectName": project, + "queueName": queue_name, + "runSpec": run_spec, + } + if self.server_push_to_run_queue_supports_priority: + if priority is not None: + variables["priority"] = priority + mutation_params += ", $priority: Int" + mutation_input += ", priority: $priority" + else: + if priority is not None: + raise UnsupportedError( + "server does not support priority, please update server instance to >=0.46" + ) + + if self.server_supports_template_variables: + if template_variables is not None: + variables.update( + {"templateVariableValues": json.dumps(template_variables)} + ) + mutation_params += ", $templateVariableValues: JSONString" + mutation_input += ", templateVariableValues: $templateVariableValues" + else: + if template_variables is not None: + raise UnsupportedError( + "server does not support template variables, please update server instance to >=0.46" + ) + + mutation = gql( + f""" + mutation pushToRunQueueByName( + {mutation_params} + ) {{ + pushToRunQueueByName( + input: {{ + {mutation_input} + }} + ) {{ + runQueueItemId + runSpec + }} + }} + """ + ) + + try: + result: Optional[Dict[str, Any]] = self.gql( + mutation, variables, check_retry_fn=util.no_retry_4xx + ).get("pushToRunQueueByName") + if not result: + return None + + if result.get("runSpec"): + run_spec = json.loads(str(result["runSpec"])) + result["runSpec"] = run_spec + + return result + except Exception as e: + if ( + 'Cannot query field "runSpec" on type "PushToRunQueueByNamePayload"' + not in str(e) + ): + return None + + mutation_no_runspec = gql( + """ + mutation pushToRunQueueByName( + $entityName: String!, + $projectName: String!, + $queueName: String!, + $runSpec: JSONString!, + ) { + pushToRunQueueByName( + input: { + entityName: $entityName, + projectName: $projectName, + queueName: $queueName, + runSpec: $runSpec + } + ) { + runQueueItemId + } + } + """ + ) + + try: + result = self.gql( + mutation_no_runspec, variables, check_retry_fn=util.no_retry_4xx + ).get("pushToRunQueueByName") + except Exception: + result = None + + return result + + @normalize_exceptions + def push_to_run_queue( + self, + queue_name: str, + launch_spec: Dict[str, str], + template_variables: Optional[dict], + project_queue: str, + priority: Optional[int] = None, + ) -> Optional[Dict[str, Any]]: + self.push_to_run_queue_introspection() + entity = launch_spec.get("queue_entity") or launch_spec["entity"] + run_spec = json.dumps(launch_spec) + + push_result = self.push_to_run_queue_by_name( + entity, project_queue, queue_name, run_spec, template_variables, priority + ) + + if push_result: + return push_result + + if priority is not None: + # Cannot proceed with legacy method if priority is set + return None + + """ Legacy Method """ + queues_found = self.get_project_run_queues(entity, project_queue) + matching_queues = [ + q + for q in queues_found + if q["name"] == queue_name + # ensure user has access to queue + and ( + # TODO: User created queues in the UI have USER access + q["access"] in ["PROJECT", "USER"] + or q["createdBy"] == self.default_entity + ) + ] + if not matching_queues: + # in the case of a missing default queue. create it + if queue_name == "default": + wandb.termlog( + f"No default queue existing for entity: {entity} in project: {project_queue}, creating one." + ) + res = self.create_run_queue( + launch_spec["entity"], + project_queue, + queue_name, + access="PROJECT", + ) + + if res is None or res.get("queueID") is None: + wandb.termerror( + f"Unable to create default queue for entity: {entity} on project: {project_queue}. Run could not be added to a queue" + ) + return None + queue_id = res["queueID"] + + else: + if project_queue == "model-registry": + _msg = f"Unable to push to run queue {queue_name}. Queue not found." + else: + _msg = f"Unable to push to run queue {project_queue}/{queue_name}. Queue not found." + wandb.termwarn(_msg) + return None + elif len(matching_queues) > 1: + wandb.termerror( + f"Unable to push to run queue {queue_name}. More than one queue found with this name." + ) + return None + else: + queue_id = matching_queues[0]["id"] + spec_json = json.dumps(launch_spec) + variables = {"queueID": queue_id, "runSpec": spec_json} + + mutation_params = """ + $queueID: ID!, + $runSpec: JSONString! + """ + mutation_input = """ + queueID: $queueID, + runSpec: $runSpec + """ + if self.server_supports_template_variables: + if template_variables is not None: + mutation_params += ", $templateVariableValues: JSONString" + mutation_input += ", templateVariableValues: $templateVariableValues" + variables.update( + {"templateVariableValues": json.dumps(template_variables)} + ) + else: + if template_variables is not None: + raise UnsupportedError( + "server does not support template variables, please update server instance to >=0.46" + ) + + mutation = gql( + f""" + mutation pushToRunQueue( + {mutation_params} + ) {{ + pushToRunQueue( + input: {{{mutation_input}}} + ) {{ + runQueueItemId + }} + }} + """ + ) + + response = self.gql(mutation, variable_values=variables) + if not response.get("pushToRunQueue"): + raise CommError(f"Error pushing run queue item to queue {queue_name}.") + + result: Optional[Dict[str, Any]] = response["pushToRunQueue"] + return result + + @normalize_exceptions + def pop_from_run_queue( + self, + queue_name: str, + entity: Optional[str] = None, + project: Optional[str] = None, + agent_id: Optional[str] = None, + ) -> Optional[Dict[str, Any]]: + mutation = gql( + """ + mutation popFromRunQueue($entity: String!, $project: String!, $queueName: String!, $launchAgentId: ID) { + popFromRunQueue(input: { + entityName: $entity, + projectName: $project, + queueName: $queueName, + launchAgentId: $launchAgentId + }) { + runQueueItemId + runSpec + } + } + """ + ) + response = self.gql( + mutation, + variable_values={ + "entity": entity, + "project": project, + "queueName": queue_name, + "launchAgentId": agent_id, + }, + ) + result: Optional[Dict[str, Any]] = response["popFromRunQueue"] + return result + + @normalize_exceptions + def ack_run_queue_item(self, item_id: str, run_id: Optional[str] = None) -> bool: + mutation = gql( + """ + mutation ackRunQueueItem($itemId: ID!, $runId: String!) { + ackRunQueueItem(input: { runQueueItemId: $itemId, runName: $runId }) { + success + } + } + """ + ) + response = self.gql( + mutation, variable_values={"itemId": item_id, "runId": str(run_id)} + ) + if not response["ackRunQueueItem"]["success"]: + raise CommError( + "Error acking run queue item. Item may have already been acknowledged by another process" + ) + result: bool = response["ackRunQueueItem"]["success"] + return result + + @normalize_exceptions + def create_launch_agent_fields_introspection(self) -> List: + if self.create_launch_agent_input_info: + return self.create_launch_agent_input_info + query_string = """ + query ProbeServerCreateLaunchAgentInput { + CreateLaunchAgentInputInfoType: __type(name:"CreateLaunchAgentInput") { + inputFields{ + name + } + } + } + """ + + query = gql(query_string) + res = self.gql(query) + + self.create_launch_agent_input_info = [ + field.get("name", "") + for field in res.get("CreateLaunchAgentInputInfoType", {}).get( + "inputFields", [{}] + ) + ] + return self.create_launch_agent_input_info + + @normalize_exceptions + def create_launch_agent( + self, + entity: str, + project: str, + queues: List[str], + agent_config: Dict[str, Any], + version: str, + gorilla_agent_support: bool, + ) -> dict: + project_queues = self.get_project_run_queues(entity, project) + if not project_queues: + # create default queue if it doesn't already exist + default = self.create_run_queue( + entity, project, "default", access="PROJECT" + ) + if default is None or default.get("queueID") is None: + raise CommError( + "Unable to create default queue for {}/{}. No queues for agent to poll".format( + entity, project + ) + ) + project_queues = [{"id": default["queueID"], "name": "default"}] + polling_queue_ids = [ + q["id"] for q in project_queues if q["name"] in queues + ] # filter to poll specified queues + if len(polling_queue_ids) != len(queues): + raise CommError( + f"Could not start launch agent: Not all of requested queues ({', '.join(queues)}) found. " + f"Available queues for this project: {','.join([q['name'] for q in project_queues])}" + ) + + if not gorilla_agent_support: + # if gorilla doesn't support launch agents, return a client-generated id + return { + "success": True, + "launchAgentId": None, + } + + hostname = socket.gethostname() + + variable_values = { + "entity": entity, + "project": project, + "queues": polling_queue_ids, + "hostname": hostname, + } + + mutation_params = """ + $entity: String!, + $project: String!, + $queues: [ID!]!, + $hostname: String! + """ + + mutation_input = """ + entityName: $entity, + projectName: $project, + runQueues: $queues, + hostname: $hostname + """ + + if "agentConfig" in self.create_launch_agent_fields_introspection(): + variable_values["agentConfig"] = json.dumps(agent_config) + mutation_params += ", $agentConfig: JSONString" + mutation_input += ", agentConfig: $agentConfig" + if "version" in self.create_launch_agent_fields_introspection(): + variable_values["version"] = version + mutation_params += ", $version: String" + mutation_input += ", version: $version" + + mutation = gql( + f""" + mutation createLaunchAgent( + {mutation_params} + ) {{ + createLaunchAgent( + input: {{ + {mutation_input} + }} + ) {{ + launchAgentId + }} + }} + """ + ) + result: dict = self.gql(mutation, variable_values)["createLaunchAgent"] + return result + + @normalize_exceptions + def update_launch_agent_status( + self, + agent_id: str, + status: str, + gorilla_agent_support: bool, + ) -> dict: + if not gorilla_agent_support: + # if gorilla doesn't support launch agents, this is a no-op + return { + "success": True, + } + + mutation = gql( + """ + mutation updateLaunchAgent($agentId: ID!, $agentStatus: String){ + updateLaunchAgent( + input: { + launchAgentId: $agentId + agentStatus: $agentStatus + } + ) { + success + } + } + """ + ) + variable_values = { + "agentId": agent_id, + "agentStatus": status, + } + result: dict = self.gql(mutation, variable_values)["updateLaunchAgent"] + return result + + @normalize_exceptions + def get_launch_agent(self, agent_id: str, gorilla_agent_support: bool) -> dict: + if not gorilla_agent_support: + return { + "id": None, + "name": "", + "stopPolling": False, + } + query = gql( + """ + query LaunchAgent($agentId: ID!) { + launchAgent(id: $agentId) { + id + name + runQueues + hostname + agentStatus + stopPolling + heartbeatAt + } + } + """ + ) + variable_values = { + "agentId": agent_id, + } + result: dict = self.gql(query, variable_values)["launchAgent"] + return result + + @normalize_exceptions + def upsert_run( + self, + id: Optional[str] = None, + name: Optional[str] = None, + project: Optional[str] = None, + host: Optional[str] = None, + group: Optional[str] = None, + tags: Optional[List[str]] = None, + config: Optional[dict] = None, + description: Optional[str] = None, + entity: Optional[str] = None, + state: Optional[str] = None, + display_name: Optional[str] = None, + notes: Optional[str] = None, + repo: Optional[str] = None, + job_type: Optional[str] = None, + program_path: Optional[str] = None, + commit: Optional[str] = None, + sweep_name: Optional[str] = None, + summary_metrics: Optional[str] = None, + num_retries: Optional[int] = None, + ) -> Tuple[dict, bool, Optional[List]]: + """Update a run. + + Arguments: + id (str, optional): The existing run to update + name (str, optional): The name of the run to create + group (str, optional): Name of the group this run is a part of + project (str, optional): The name of the project + host (str, optional): The name of the host + tags (list, optional): A list of tags to apply to the run + config (dict, optional): The latest config params + description (str, optional): A description of this project + entity (str, optional): The entity to scope this project to. + display_name (str, optional): The display name of this project + notes (str, optional): Notes about this run + repo (str, optional): Url of the program's repository. + state (str, optional): State of the program. + job_type (str, optional): Type of job, e.g 'train'. + program_path (str, optional): Path to the program. + commit (str, optional): The Git SHA to associate the run with + sweep_name (str, optional): The name of the sweep this run is a part of + summary_metrics (str, optional): The JSON summary metrics + num_retries (int, optional): Number of retries + """ + query_string = """ + mutation UpsertBucket( + $id: String, + $name: String, + $project: String, + $entity: String, + $groupName: String, + $description: String, + $displayName: String, + $notes: String, + $commit: String, + $config: JSONString, + $host: String, + $debug: Boolean, + $program: String, + $repo: String, + $jobType: String, + $state: String, + $sweep: String, + $tags: [String!], + $summaryMetrics: JSONString, + ) { + upsertBucket(input: { + id: $id, + name: $name, + groupName: $groupName, + modelName: $project, + entityName: $entity, + description: $description, + displayName: $displayName, + notes: $notes, + config: $config, + commit: $commit, + host: $host, + debug: $debug, + jobProgram: $program, + jobRepo: $repo, + jobType: $jobType, + state: $state, + sweep: $sweep, + tags: $tags, + summaryMetrics: $summaryMetrics, + }) { + bucket { + id + name + displayName + description + config + sweepName + project { + id + name + entity { + id + name + } + } + historyLineCount + } + inserted + _Server_Settings_ + } + } + """ + self.server_settings_introspection() + + server_settings_string = ( + """ + serverSettings { + serverMessages{ + utfText + plainText + htmlText + messageType + messageLevel + } + } + """ + if self._server_settings_type + else "" + ) + + query_string = query_string.replace("_Server_Settings_", server_settings_string) + mutation = gql(query_string) + config_str = json.dumps(config) if config else None + if not description or description.isspace(): + description = None + + kwargs = {} + if num_retries is not None: + kwargs["num_retries"] = num_retries + + variable_values = { + "id": id, + "entity": entity or self.settings("entity"), + "name": name, + "project": project or util.auto_project_name(program_path), + "groupName": group, + "tags": tags, + "description": description, + "config": config_str, + "commit": commit, + "displayName": display_name, + "notes": notes, + "host": None if self.settings().get("anonymous") == "true" else host, + "debug": env.is_debug(env=self._environ), + "repo": repo, + "program": program_path, + "jobType": job_type, + "state": state, + "sweep": sweep_name, + "summaryMetrics": summary_metrics, + } + + # retry conflict errors for 2 minutes, default to no_auth_retry + check_retry_fn = util.make_check_retry_fn( + check_fn=util.check_retry_conflict_or_gone, + check_timedelta=datetime.timedelta(minutes=2), + fallback_retry_fn=util.no_retry_auth, + ) + + response = self.gql( + mutation, + variable_values=variable_values, + check_retry_fn=check_retry_fn, + **kwargs, + ) + + run_obj: Dict[str, Dict[str, Dict[str, str]]] = response["upsertBucket"][ + "bucket" + ] + project_obj: Dict[str, Dict[str, str]] = run_obj.get("project", {}) + if project_obj: + self.set_setting("project", project_obj["name"]) + entity_obj = project_obj.get("entity", {}) + if entity_obj: + self.set_setting("entity", entity_obj["name"]) + + server_messages = None + if self._server_settings_type: + server_messages = ( + response["upsertBucket"] + .get("serverSettings", {}) + .get("serverMessages", []) + ) + + return ( + response["upsertBucket"]["bucket"], + response["upsertBucket"]["inserted"], + server_messages, + ) + + @normalize_exceptions + def rewind_run( + self, + run_name: str, + metric_name: str, + metric_value: float, + program_path: Optional[str] = None, + entity: Optional[str] = None, + project: Optional[str] = None, + num_retries: Optional[int] = None, + ) -> dict: + """Rewinds a run to a previous state. + + Arguments: + run_name (str): The name of the run to rewind + metric_name (str): The name of the metric to rewind to + metric_value (float): The value of the metric to rewind to + program_path (str, optional): Path to the program + entity (str, optional): The entity to scope this project to + project (str, optional): The name of the project + num_retries (int, optional): Number of retries + + Returns: + A dict with the rewound run + + { + "id": "run_id", + "name": "run_name", + "displayName": "run_display_name", + "description": "run_description", + "config": "stringified_run_config_json", + "sweepName": "run_sweep_name", + "project": { + "id": "project_id", + "name": "project_name", + "entity": { + "id": "entity_id", + "name": "entity_name" + } + }, + "historyLineCount": 100, + } + """ + query_string = """ + mutation RewindRun($runName: String!, $entity: String, $project: String, $metricName: String!, $metricValue: Float!) { + rewindRun(input: {runName: $runName, entityName: $entity, projectName: $project, metricName: $metricName, metricValue: $metricValue}) { + rewoundRun { + id + name + displayName + description + config + sweepName + project { + id + name + entity { + id + name + } + } + historyLineCount + } + } + } + """ + + mutation = gql(query_string) + + kwargs = {} + if num_retries is not None: + kwargs["num_retries"] = num_retries + + variable_values = { + "runName": run_name, + "entity": entity or self.settings("entity"), + "project": project or util.auto_project_name(program_path), + "metricName": metric_name, + "metricValue": metric_value, + } + + # retry conflict errors for 2 minutes, default to no_auth_retry + check_retry_fn = util.make_check_retry_fn( + check_fn=util.check_retry_conflict_or_gone, + check_timedelta=datetime.timedelta(minutes=2), + fallback_retry_fn=util.no_retry_auth, + ) + + response = self.gql( + mutation, + variable_values=variable_values, + check_retry_fn=check_retry_fn, + **kwargs, + ) + + run_obj: Dict[str, Dict[str, Dict[str, str]]] = response.get( + "rewindRun", {} + ).get("rewoundRun", {}) + project_obj: Dict[str, Dict[str, str]] = run_obj.get("project", {}) + if project_obj: + self.set_setting("project", project_obj["name"]) + entity_obj = project_obj.get("entity", {}) + if entity_obj: + self.set_setting("entity", entity_obj["name"]) + + return run_obj + + @normalize_exceptions + def get_run_info( + self, + entity: str, + project: str, + name: str, + ) -> dict: + query = gql( + """ + query RunInfo($project: String!, $entity: String!, $name: String!) { + project(name: $project, entityName: $entity) { + run(name: $name) { + runInfo { + program + args + os + python + colab + executable + codeSaved + cpuCount + gpuCount + gpu + git { + remote + commit + } + } + } + } + } + """ + ) + variable_values = {"project": project, "entity": entity, "name": name} + res = self.gql(query, variable_values) + if res.get("project") is None: + raise CommError( + "Error fetching run info for {}/{}/{}. Check that this project exists and you have access to this entity and project".format( + entity, project, name + ) + ) + elif res["project"].get("run") is None: + raise CommError( + "Error fetching run info for {}/{}/{}. Check that this run id exists".format( + entity, project, name + ) + ) + run_info: dict = res["project"]["run"]["runInfo"] + return run_info + + @normalize_exceptions + def get_run_state(self, entity: str, project: str, name: str) -> str: + query = gql( + """ + query RunState( + $project: String!, + $entity: String!, + $name: String!) { + project(name: $project, entityName: $entity) { + run(name: $name) { + state + } + } + } + """ + ) + variable_values = { + "project": project, + "entity": entity, + "name": name, + } + res = self.gql(query, variable_values) + if res.get("project") is None or res["project"].get("run") is None: + raise CommError(f"Error fetching run state for {entity}/{project}/{name}.") + run_state: str = res["project"]["run"]["state"] + return run_state + + @normalize_exceptions + def create_run_files_introspection(self) -> bool: + _, _, mutations = self.server_info_introspection() + return "createRunFiles" in mutations + + @normalize_exceptions + def upload_urls( + self, + project: str, + files: Union[List[str], Dict[str, IO]], + run: Optional[str] = None, + entity: Optional[str] = None, + description: Optional[str] = None, + ) -> Tuple[str, List[str], Dict[str, Dict[str, Any]]]: + """Generate temporary resumable upload urls. + + Arguments: + project (str): The project to download + files (list or dict): The filenames to upload + run (str, optional): The run to upload to + entity (str, optional): The entity to scope this project to. + description (str, optional): description + + Returns: + (run_id, upload_headers, file_info) + run_id: id of run we uploaded files to + upload_headers: A list of headers to use when uploading files. + file_info: A dict of filenames and urls. + { + "run_id": "run_id", + "upload_headers": [""], + "file_info": [ + { "weights.h5": { "uploadUrl": "https://weights.url" } }, + { "model.json": { "uploadUrl": "https://model.json" } } + ] + } + """ + run_name = run or self.current_run_id + assert run_name, "run must be specified" + entity = entity or self.settings("entity") + assert entity, "entity must be specified" + + has_create_run_files_mutation = self.create_run_files_introspection() + if not has_create_run_files_mutation: + return self.legacy_upload_urls(project, files, run, entity, description) + + query = gql( + """ + mutation CreateRunFiles($entity: String!, $project: String!, $run: String!, $files: [String!]!) { + createRunFiles(input: {entityName: $entity, projectName: $project, runName: $run, files: $files}) { + runID + uploadHeaders + files { + name + uploadUrl + } + } + } + """ + ) + + query_result = self.gql( + query, + variable_values={ + "project": project, + "run": run_name, + "entity": entity, + "files": [file for file in files], + }, + ) + + result = query_result["createRunFiles"] + run_id = result["runID"] + if not run_id: + raise CommError( + f"Error uploading files to {entity}/{project}/{run_name}. Check that this project exists and you have access to this entity and project" + ) + file_name_urls = {file["name"]: file for file in result["files"]} + return run_id, result["uploadHeaders"], file_name_urls + + def legacy_upload_urls( + self, + project: str, + files: Union[List[str], Dict[str, IO]], + run: Optional[str] = None, + entity: Optional[str] = None, + description: Optional[str] = None, + ) -> Tuple[str, List[str], Dict[str, Dict[str, Any]]]: + """Generate temporary resumable upload urls. + + A new mutation createRunFiles was introduced after 0.15.4. + This function is used to support older versions. + """ + query = gql( + """ + query RunUploadUrls($name: String!, $files: [String]!, $entity: String, $run: String!, $description: String) { + model(name: $name, entityName: $entity) { + bucket(name: $run, desc: $description) { + id + files(names: $files) { + uploadHeaders + edges { + node { + name + url(upload: true) + updatedAt + } + } + } + } + } + } + """ + ) + run_id = run or self.current_run_id + assert run_id, "run must be specified" + entity = entity or self.settings("entity") + query_result = self.gql( + query, + variable_values={ + "name": project, + "run": run_id, + "entity": entity, + "files": [file for file in files], + "description": description, + }, + ) + + run_obj = query_result["model"]["bucket"] + if run_obj: + for file_node in run_obj["files"]["edges"]: + file = file_node["node"] + # we previously used "url" field but now use "uploadUrl" + # replace the "url" field with "uploadUrl for downstream compatibility + if "url" in file and "uploadUrl" not in file: + file["uploadUrl"] = file.pop("url") + + result = { + file["name"]: file for file in self._flatten_edges(run_obj["files"]) + } + return run_obj["id"], run_obj["files"]["uploadHeaders"], result + else: + raise CommError(f"Run does not exist {entity}/{project}/{run_id}.") + + @normalize_exceptions + def download_urls( + self, + project: str, + run: Optional[str] = None, + entity: Optional[str] = None, + ) -> Dict[str, Dict[str, str]]: + """Generate download urls. + + Arguments: + project (str): The project to download + run (str): The run to upload to + entity (str, optional): The entity to scope this project to. Defaults to wandb models + + Returns: + A dict of extensions and urls + + { + 'weights.h5': { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }, + 'model.json': { "url": "https://model.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' } + } + """ + query = gql( + """ + query RunDownloadUrls($name: String!, $entity: String, $run: String!) { + model(name: $name, entityName: $entity) { + bucket(name: $run) { + files { + edges { + node { + name + url + md5 + updatedAt + } + } + } + } + } + } + """ + ) + run = run or self.current_run_id + assert run, "run must be specified" + entity = entity or self.settings("entity") + query_result = self.gql( + query, + variable_values={ + "name": project, + "run": run, + "entity": entity, + }, + ) + if query_result["model"] is None: + raise CommError(f"Run does not exist {entity}/{project}/{run}.") + files = self._flatten_edges(query_result["model"]["bucket"]["files"]) + return {file["name"]: file for file in files if file} + + @normalize_exceptions + def download_url( + self, + project: str, + file_name: str, + run: Optional[str] = None, + entity: Optional[str] = None, + ) -> Optional[Dict[str, str]]: + """Generate download urls. + + Arguments: + project (str): The project to download + file_name (str): The name of the file to download + run (str): The run to upload to + entity (str, optional): The entity to scope this project to. Defaults to wandb models + + Returns: + A dict of extensions and urls + + { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' } + + """ + query = gql( + """ + query RunDownloadUrl($name: String!, $fileName: String!, $entity: String, $run: String!) { + model(name: $name, entityName: $entity) { + bucket(name: $run) { + files(names: [$fileName]) { + edges { + node { + name + url + md5 + updatedAt + } + } + } + } + } + } + """ + ) + run = run or self.current_run_id + assert run, "run must be specified" + query_result = self.gql( + query, + variable_values={ + "name": project, + "run": run, + "fileName": file_name, + "entity": entity or self.settings("entity"), + }, + ) + if query_result["model"]: + files = self._flatten_edges(query_result["model"]["bucket"]["files"]) + return files[0] if len(files) > 0 and files[0].get("updatedAt") else None + else: + return None + + @normalize_exceptions + def download_file(self, url: str) -> Tuple[int, requests.Response]: + """Initiate a streaming download. + + Arguments: + url (str): The url to download + + Returns: + A tuple of the content length and the streaming response + """ + check_httpclient_logger_handler() + + http_headers = _thread_local_api_settings.headers or {} + + auth = None + if self.access_token is not None: + http_headers["Authorization"] = f"Bearer {self.access_token}" + elif _thread_local_api_settings.cookies is None: + auth = ("api", self.api_key or "") + + response = requests.get( + url, + auth=auth, + cookies=_thread_local_api_settings.cookies or {}, + headers=http_headers, + stream=True, + ) + response.raise_for_status() + return int(response.headers.get("content-length", 0)), response + + @normalize_exceptions + def download_write_file( + self, + metadata: Dict[str, str], + out_dir: Optional[str] = None, + ) -> Tuple[str, Optional[requests.Response]]: + """Download a file from a run and write it to wandb/. + + Arguments: + metadata (obj): The metadata object for the file to download. Comes from Api.download_urls(). + out_dir (str, optional): The directory to write the file to. Defaults to wandb/ + + Returns: + A tuple of the file's local path and the streaming response. The streaming response is None if the file + already existed and was up-to-date. + """ + filename = metadata["name"] + path = os.path.join(out_dir or self.settings("wandb_dir"), filename) + if self.file_current(filename, B64MD5(metadata["md5"])): + return path, None + + size, response = self.download_file(metadata["url"]) + + with util.fsync_open(path, "wb") as file: + for data in response.iter_content(chunk_size=1024): + file.write(data) + + return path, response + + def upload_file_azure( + self, url: str, file: Any, extra_headers: Dict[str, str] + ) -> None: + """Upload a file to azure.""" + from azure.core.exceptions import AzureError # type: ignore + + # Configure the client without retries so our existing logic can handle them + client = self._azure_blob_module.BlobClient.from_blob_url( + url, retry_policy=self._azure_blob_module.LinearRetry(retry_total=0) + ) + try: + if extra_headers.get("Content-MD5") is not None: + md5: Optional[bytes] = base64.b64decode(extra_headers["Content-MD5"]) + else: + md5 = None + content_settings = self._azure_blob_module.ContentSettings( + content_md5=md5, + content_type=extra_headers.get("Content-Type"), + ) + client.upload_blob( + file, + max_concurrency=4, + length=len(file), + overwrite=True, + content_settings=content_settings, + ) + except AzureError as e: + if hasattr(e, "response"): + response = requests.models.Response() + response.status_code = e.response.status_code + response.headers = e.response.headers + raise requests.exceptions.RequestException(e.message, response=response) + else: + raise requests.exceptions.ConnectionError(e.message) + + def upload_multipart_file_chunk( + self, + url: str, + upload_chunk: bytes, + extra_headers: Optional[Dict[str, str]] = None, + ) -> Optional[requests.Response]: + """Upload a file chunk to S3 with failure resumption. + + Arguments: + url: The url to download + upload_chunk: The path to the file you want to upload + extra_headers: A dictionary of extra headers to send with the request + + Returns: + The `requests` library response object + """ + check_httpclient_logger_handler() + try: + if env.is_debug(env=self._environ): + logger.debug("upload_file: %s", url) + response = self._upload_file_session.put( + url, data=upload_chunk, headers=extra_headers + ) + if env.is_debug(env=self._environ): + logger.debug("upload_file: %s complete", url) + response.raise_for_status() + except requests.exceptions.RequestException as e: + logger.error(f"upload_file exception {url}: {e}") + request_headers = e.request.headers if e.request is not None else "" + logger.error(f"upload_file request headers: {request_headers!r}") + response_content = e.response.content if e.response is not None else "" + logger.error(f"upload_file response body: {response_content!r}") + status_code = e.response.status_code if e.response is not None else 0 + # S3 reports retryable request timeouts out-of-band + is_aws_retryable = status_code == 400 and "RequestTimeout" in str( + response_content + ) + # Retry errors from cloud storage or local network issues + if ( + status_code in (308, 408, 409, 429, 500, 502, 503, 504) + or isinstance( + e, + (requests.exceptions.Timeout, requests.exceptions.ConnectionError), + ) + or is_aws_retryable + ): + _e = retry.TransientError(exc=e) + raise _e.with_traceback(sys.exc_info()[2]) + else: + wandb._sentry.reraise(e) + return response + + def upload_file( + self, + url: str, + file: IO[bytes], + callback: Optional["ProgressFn"] = None, + extra_headers: Optional[Dict[str, str]] = None, + ) -> Optional[requests.Response]: + """Upload a file to W&B with failure resumption. + + Arguments: + url: The url to download + file: The path to the file you want to upload + callback: A callback which is passed the number of + bytes uploaded since the last time it was called, used to report progress + extra_headers: A dictionary of extra headers to send with the request + + Returns: + The `requests` library response object + """ + check_httpclient_logger_handler() + extra_headers = extra_headers.copy() if extra_headers else {} + response: Optional[requests.Response] = None + progress = Progress(file, callback=callback) + try: + if "x-ms-blob-type" in extra_headers and self._azure_blob_module: + self.upload_file_azure(url, progress, extra_headers) + else: + if "x-ms-blob-type" in extra_headers: + wandb.termwarn( + "Azure uploads over 256MB require the azure SDK, install with pip install wandb[azure]", + repeat=False, + ) + if env.is_debug(env=self._environ): + logger.debug("upload_file: %s", url) + response = self._upload_file_session.put( + url, data=progress, headers=extra_headers + ) + if env.is_debug(env=self._environ): + logger.debug("upload_file: %s complete", url) + response.raise_for_status() + except requests.exceptions.RequestException as e: + logger.error(f"upload_file exception {url}: {e}") + request_headers = e.request.headers if e.request is not None else "" + logger.error(f"upload_file request headers: {request_headers}") + response_content = e.response.content if e.response is not None else "" + logger.error(f"upload_file response body: {response_content!r}") + status_code = e.response.status_code if e.response is not None else 0 + # S3 reports retryable request timeouts out-of-band + is_aws_retryable = ( + "x-amz-meta-md5" in extra_headers + and status_code == 400 + and "RequestTimeout" in str(response_content) + ) + # We need to rewind the file for the next retry (the file passed in is `seek`'ed to 0) + progress.rewind() + # Retry errors from cloud storage or local network issues + if ( + status_code in (308, 408, 409, 429, 500, 502, 503, 504) + or isinstance( + e, + (requests.exceptions.Timeout, requests.exceptions.ConnectionError), + ) + or is_aws_retryable + ): + _e = retry.TransientError(exc=e) + raise _e.with_traceback(sys.exc_info()[2]) + else: + wandb._sentry.reraise(e) + + return response + + @normalize_exceptions + def register_agent( + self, + host: str, + sweep_id: Optional[str] = None, + project_name: Optional[str] = None, + entity: Optional[str] = None, + ) -> dict: + """Register a new agent. + + Arguments: + host (str): hostname + sweep_id (str): sweep id + project_name: (str): model that contains sweep + entity: (str): entity that contains sweep + """ + mutation = gql( + """ + mutation CreateAgent( + $host: String! + $projectName: String, + $entityName: String, + $sweep: String! + ) { + createAgent(input: { + host: $host, + projectName: $projectName, + entityName: $entityName, + sweep: $sweep, + }) { + agent { + id + } + } + } + """ + ) + if entity is None: + entity = self.settings("entity") + if project_name is None: + project_name = self.settings("project") + + response = self.gql( + mutation, + variable_values={ + "host": host, + "entityName": entity, + "projectName": project_name, + "sweep": sweep_id, + }, + check_retry_fn=util.no_retry_4xx, + ) + result: dict = response["createAgent"]["agent"] + return result + + def agent_heartbeat( + self, agent_id: str, metrics: dict, run_states: dict + ) -> List[Dict[str, Any]]: + """Notify server about agent state, receive commands. + + Arguments: + agent_id (str): agent_id + metrics (dict): system metrics + run_states (dict): run_id: state mapping + Returns: + List of commands to execute. + """ + mutation = gql( + """ + mutation Heartbeat( + $id: ID!, + $metrics: JSONString, + $runState: JSONString + ) { + agentHeartbeat(input: { + id: $id, + metrics: $metrics, + runState: $runState + }) { + agent { + id + } + commands + } + } + """ + ) + + if agent_id is None: + raise ValueError("Cannot call heartbeat with an unregistered agent.") + + try: + response = self.gql( + mutation, + variable_values={ + "id": agent_id, + "metrics": json.dumps(metrics), + "runState": json.dumps(run_states), + }, + timeout=60, + ) + except Exception as e: + # GQL raises exceptions with stringified python dictionaries :/ + message = ast.literal_eval(e.args[0])["message"] + logger.error("Error communicating with W&B: %s", message) + return [] + else: + result: List[Dict[str, Any]] = json.loads( + response["agentHeartbeat"]["commands"] + ) + return result + + @staticmethod + def _validate_config_and_fill_distribution(config: dict) -> dict: + # verify that parameters are well specified. + # TODO(dag): deprecate this in favor of jsonschema validation once + # apiVersion 2 is released and local controller is integrated with + # wandb/client. + + # avoid modifying the original config dict in + # case it is reused outside the calling func + config = deepcopy(config) + + # explicitly cast to dict in case config was passed as a sweepconfig + # sweepconfig does not serialize cleanly to yaml and breaks graphql, + # but it is a subclass of dict, so this conversion is clean + config = dict(config) + + if "parameters" not in config: + # still shows an anaconda warning, but doesn't error + return config + + for parameter_name in config["parameters"]: + parameter = config["parameters"][parameter_name] + if "min" in parameter and "max" in parameter: + if "distribution" not in parameter: + if isinstance(parameter["min"], int) and isinstance( + parameter["max"], int + ): + parameter["distribution"] = "int_uniform" + elif isinstance(parameter["min"], float) and isinstance( + parameter["max"], float + ): + parameter["distribution"] = "uniform" + else: + raise ValueError( + "Parameter {} is ambiguous, please specify bounds as both floats (for a float_" + "uniform distribution) or ints (for an int_uniform distribution).".format( + parameter_name + ) + ) + return config + + @normalize_exceptions + def upsert_sweep( + self, + config: dict, + controller: Optional[str] = None, + launch_scheduler: Optional[str] = None, + scheduler: Optional[str] = None, + obj_id: Optional[str] = None, + project: Optional[str] = None, + entity: Optional[str] = None, + state: Optional[str] = None, + prior_runs: Optional[List[str]] = None, + template_variable_values: Optional[Dict[str, Any]] = None, + ) -> Tuple[str, List[str]]: + """Upsert a sweep object. + + Arguments: + config (dict): sweep config (will be converted to yaml) + controller (str): controller to use + launch_scheduler (str): launch scheduler to use + scheduler (str): scheduler to use + obj_id (str): object id + project (str): project to use + entity (str): entity to use + state (str): state + prior_runs (list): IDs of existing runs to add to the sweep + template_variable_values (dict): template variable values + """ + project_query = """ + project { + id + name + entity { + id + name + } + } + """ + mutation_str = """ + mutation UpsertSweep( + $id: ID, + $config: String, + $description: String, + $entityName: String, + $projectName: String, + $controller: JSONString, + $scheduler: JSONString, + $state: String, + $priorRunsFilters: JSONString, + ) { + upsertSweep(input: { + id: $id, + config: $config, + description: $description, + entityName: $entityName, + projectName: $projectName, + controller: $controller, + scheduler: $scheduler, + state: $state, + priorRunsFilters: $priorRunsFilters, + }) { + sweep { + name + _PROJECT_QUERY_ + } + configValidationWarnings + } + } + """ + # TODO(jhr): we need protocol versioning to know schema is not supported + # for now we will just try both new and old query + mutation_5 = gql( + mutation_str.replace( + "$controller: JSONString,", + "$controller: JSONString,$launchScheduler: JSONString, $templateVariableValues: JSONString,", + ) + .replace( + "controller: $controller,", + "controller: $controller,launchScheduler: $launchScheduler,templateVariableValues: $templateVariableValues,", + ) + .replace("_PROJECT_QUERY_", project_query) + ) + # launchScheduler was introduced in core v0.14.0 + mutation_4 = gql( + mutation_str.replace( + "$controller: JSONString,", + "$controller: JSONString,$launchScheduler: JSONString,", + ) + .replace( + "controller: $controller,", + "controller: $controller,launchScheduler: $launchScheduler", + ) + .replace("_PROJECT_QUERY_", project_query) + ) + + # mutation 3 maps to backend that can support CLI version of at least 0.10.31 + mutation_3 = gql(mutation_str.replace("_PROJECT_QUERY_", project_query)) + mutation_2 = gql( + mutation_str.replace("_PROJECT_QUERY_", project_query).replace( + "configValidationWarnings", "" + ) + ) + mutation_1 = gql( + mutation_str.replace("_PROJECT_QUERY_", "").replace( + "configValidationWarnings", "" + ) + ) + + # TODO(dag): replace this with a query for protocol versioning + mutations = [mutation_5, mutation_4, mutation_3, mutation_2, mutation_1] + + config = self._validate_config_and_fill_distribution(config) + + # Silly, but attr-dicts like EasyDicts don't serialize correctly to yaml. + # This sanitizes them with a round trip pass through json to get a regular dict. + config_str = yaml.dump( + json.loads(json.dumps(config)), Dumper=util.NonOctalStringDumper + ) + filters = None + if prior_runs: + filters = json.dumps({"$or": [{"name": r} for r in prior_runs]}) + + err: Optional[Exception] = None + for mutation in mutations: + try: + variables = { + "id": obj_id, + "config": config_str, + "description": config.get("description"), + "entityName": entity or self.settings("entity"), + "projectName": project or self.settings("project"), + "controller": controller, + "launchScheduler": launch_scheduler, + "templateVariableValues": json.dumps(template_variable_values), + "scheduler": scheduler, + "priorRunsFilters": filters, + } + if state: + variables["state"] = state + + response = self.gql( + mutation, + variable_values=variables, + check_retry_fn=util.no_retry_4xx, + ) + except UsageError as e: + raise e + except Exception as e: + # graphql schema exception is generic + err = e + continue + err = None + break + if err: + raise err + + sweep: Dict[str, Dict[str, Dict]] = response["upsertSweep"]["sweep"] + project_obj: Dict[str, Dict] = sweep.get("project", {}) + if project_obj: + self.set_setting("project", project_obj["name"]) + entity_obj: dict = project_obj.get("entity", {}) + if entity_obj: + self.set_setting("entity", entity_obj["name"]) + + warnings = response["upsertSweep"].get("configValidationWarnings", []) + return response["upsertSweep"]["sweep"]["name"], warnings + + @normalize_exceptions + def create_anonymous_api_key(self) -> str: + """Create a new API key belonging to a new anonymous user.""" + mutation = gql( + """ + mutation CreateAnonymousApiKey { + createAnonymousEntity(input: {}) { + apiKey { + name + } + } + } + """ + ) + + response = self.gql(mutation, variable_values={}) + key: str = str(response["createAnonymousEntity"]["apiKey"]["name"]) + return key + + @staticmethod + def file_current(fname: str, md5: B64MD5) -> bool: + """Checksum a file and compare the md5 with the known md5.""" + return os.path.isfile(fname) and md5_file_b64(fname) == md5 + + @normalize_exceptions + def pull( + self, project: str, run: Optional[str] = None, entity: Optional[str] = None + ) -> "List[requests.Response]": + """Download files from W&B. + + Arguments: + project (str): The project to download + run (str, optional): The run to upload to + entity (str, optional): The entity to scope this project to. Defaults to wandb models + + Returns: + The `requests` library response object + """ + project, run = self.parse_slug(project, run=run) + urls = self.download_urls(project, run, entity) + responses = [] + for filename in urls: + _, response = self.download_write_file(urls[filename]) + if response: + responses.append(response) + + return responses + + def get_project(self) -> str: + project: str = self.default_settings.get("project") or self.settings("project") + return project + + @normalize_exceptions + def push( + self, + files: Union[List[str], Dict[str, IO]], + run: Optional[str] = None, + entity: Optional[str] = None, + project: Optional[str] = None, + description: Optional[str] = None, + force: bool = True, + progress: Union[TextIO, bool] = False, + ) -> "List[Optional[requests.Response]]": + """Uploads multiple files to W&B. + + Arguments: + files (list or dict): The filenames to upload, when dict the values are open files + run (str, optional): The run to upload to + entity (str, optional): The entity to scope this project to. Defaults to wandb models + project (str, optional): The name of the project to upload to. Defaults to the one in settings. + description (str, optional): The description of the changes + force (bool, optional): Whether to prevent push if git has uncommitted changes + progress (callable, or stream): If callable, will be called with (chunk_bytes, + total_bytes) as argument else if True, renders a progress bar to stream. + + Returns: + A list of `requests.Response` objects + """ + if project is None: + project = self.get_project() + if project is None: + raise CommError("No project configured.") + if run is None: + run = self.current_run_id + + # TODO(adrian): we use a retriable version of self.upload_file() so + # will never retry self.upload_urls() here. Instead, maybe we should + # make push itself retriable. + _, upload_headers, result = self.upload_urls( + project, + files, + run, + entity, + ) + extra_headers = {} + for upload_header in upload_headers: + key, val = upload_header.split(":", 1) + extra_headers[key] = val + responses = [] + for file_name, file_info in result.items(): + file_url = file_info["uploadUrl"] + + # If the upload URL is relative, fill it in with the base URL, + # since it's a proxied file store like the on-prem VM. + if file_url.startswith("/"): + file_url = f"{self.api_url}{file_url}" + + try: + # To handle Windows paths + # TODO: this doesn't handle absolute paths... + normal_name = os.path.join(*file_name.split("/")) + open_file = ( + files[file_name] + if isinstance(files, dict) + else open(normal_name, "rb") + ) + except OSError: + print(f"{file_name} does not exist") + continue + if progress is False: + responses.append( + self.upload_file_retry( + file_info["uploadUrl"], open_file, extra_headers=extra_headers + ) + ) + else: + if callable(progress): + responses.append( # type: ignore + self.upload_file_retry( + file_url, open_file, progress, extra_headers=extra_headers + ) + ) + else: + length = os.fstat(open_file.fileno()).st_size + with click.progressbar( + file=progress, # type: ignore + length=length, + label=f"Uploading file: {file_name}", + fill_char=click.style("&", fg="green"), + ) as bar: + responses.append( + self.upload_file_retry( + file_url, + open_file, + lambda bites, _: bar.update(bites), + extra_headers=extra_headers, + ) + ) + open_file.close() + return responses + + def link_artifact( + self, + client_id: str, + server_id: str, + portfolio_name: str, + entity: str, + project: str, + aliases: Sequence[str], + ) -> Dict[str, Any]: + template = """ + mutation LinkArtifact( + $artifactPortfolioName: String!, + $entityName: String!, + $projectName: String!, + $aliases: [ArtifactAliasInput!], + ID_TYPE + ) { + linkArtifact(input: { + artifactPortfolioName: $artifactPortfolioName, + entityName: $entityName, + projectName: $projectName, + aliases: $aliases, + ID_VALUE + }) { + versionIndex + } + } + """ + + def replace(a: str, b: str) -> None: + nonlocal template + template = template.replace(a, b) + + if server_id: + replace("ID_TYPE", "$artifactID: ID") + replace("ID_VALUE", "artifactID: $artifactID") + elif client_id: + replace("ID_TYPE", "$clientID: ID") + replace("ID_VALUE", "clientID: $clientID") + + variable_values = { + "clientID": client_id, + "artifactID": server_id, + "artifactPortfolioName": portfolio_name, + "entityName": entity, + "projectName": project, + "aliases": [ + {"alias": alias, "artifactCollectionName": portfolio_name} + for alias in aliases + ], + } + + mutation = gql(template) + response = self.gql(mutation, variable_values=variable_values) + link_artifact: Dict[str, Any] = response["linkArtifact"] + return link_artifact + + def use_artifact( + self, + artifact_id: str, + entity_name: Optional[str] = None, + project_name: Optional[str] = None, + run_name: Optional[str] = None, + use_as: Optional[str] = None, + ) -> Optional[Dict[str, Any]]: + query_template = """ + mutation UseArtifact( + $entityName: String!, + $projectName: String!, + $runName: String!, + $artifactID: ID!, + _USED_AS_TYPE_ + ) { + useArtifact(input: { + entityName: $entityName, + projectName: $projectName, + runName: $runName, + artifactID: $artifactID, + _USED_AS_VALUE_ + }) { + artifact { + id + digest + description + state + createdAt + metadata + } + } + } + """ + + artifact_types = self.server_use_artifact_input_introspection() + if "usedAs" in artifact_types: + query_template = query_template.replace( + "_USED_AS_TYPE_", "$usedAs: String" + ).replace("_USED_AS_VALUE_", "usedAs: $usedAs") + else: + query_template = query_template.replace("_USED_AS_TYPE_", "").replace( + "_USED_AS_VALUE_", "" + ) + + query = gql(query_template) + + entity_name = entity_name or self.settings("entity") + project_name = project_name or self.settings("project") + run_name = run_name or self.current_run_id + + response = self.gql( + query, + variable_values={ + "entityName": entity_name, + "projectName": project_name, + "runName": run_name, + "artifactID": artifact_id, + "usedAs": use_as, + }, + ) + + if response["useArtifact"]["artifact"]: + artifact: Dict[str, Any] = response["useArtifact"]["artifact"] + return artifact + return None + + def create_artifact_type( + self, + artifact_type_name: str, + entity_name: Optional[str] = None, + project_name: Optional[str] = None, + description: Optional[str] = None, + ) -> Optional[str]: + mutation = gql( + """ + mutation CreateArtifactType( + $entityName: String!, + $projectName: String!, + $artifactTypeName: String!, + $description: String + ) { + createArtifactType(input: { + entityName: $entityName, + projectName: $projectName, + name: $artifactTypeName, + description: $description + }) { + artifactType { + id + } + } + } + """ + ) + entity_name = entity_name or self.settings("entity") + project_name = project_name or self.settings("project") + response = self.gql( + mutation, + variable_values={ + "entityName": entity_name, + "projectName": project_name, + "artifactTypeName": artifact_type_name, + "description": description, + }, + ) + _id: Optional[str] = response["createArtifactType"]["artifactType"]["id"] + return _id + + def server_artifact_introspection(self) -> List[str]: + query_string = """ + query ProbeServerArtifact { + ArtifactInfoType: __type(name:"Artifact") { + fields { + name + } + } + } + """ + + if self.server_artifact_fields_info is None: + query = gql(query_string) + res = self.gql(query) + input_fields = res.get("ArtifactInfoType", {}).get("fields", [{}]) + self.server_artifact_fields_info = [ + field["name"] for field in input_fields if "name" in field + ] + + return self.server_artifact_fields_info + + def server_create_artifact_introspection(self) -> List[str]: + query_string = """ + query ProbeServerCreateArtifactInput { + CreateArtifactInputInfoType: __type(name:"CreateArtifactInput") { + inputFields{ + name + } + } + } + """ + + if self.server_create_artifact_input_info is None: + query = gql(query_string) + res = self.gql(query) + input_fields = res.get("CreateArtifactInputInfoType", {}).get( + "inputFields", [{}] + ) + self.server_create_artifact_input_info = [ + field["name"] for field in input_fields if "name" in field + ] + + return self.server_create_artifact_input_info + + def _get_create_artifact_mutation( + self, + fields: List, + history_step: Optional[int], + distributed_id: Optional[str], + ) -> str: + types = "" + values = "" + + if "historyStep" in fields and history_step not in [0, None]: + types += "$historyStep: Int64!," + values += "historyStep: $historyStep," + + if distributed_id: + types += "$distributedID: String," + values += "distributedID: $distributedID," + + if "clientID" in fields: + types += "$clientID: ID," + values += "clientID: $clientID," + + if "sequenceClientID" in fields: + types += "$sequenceClientID: ID," + values += "sequenceClientID: $sequenceClientID," + + if "enableDigestDeduplication" in fields: + values += "enableDigestDeduplication: true," + + if "ttlDurationSeconds" in fields: + types += "$ttlDurationSeconds: Int64," + values += "ttlDurationSeconds: $ttlDurationSeconds," + + if "tags" in fields: + types += "$tags: [TagInput!]," + values += "tags: $tags," + + query_template = """ + mutation CreateArtifact( + $artifactTypeName: String!, + $artifactCollectionNames: [String!], + $entityName: String!, + $projectName: String!, + $runName: String, + $description: String, + $digest: String!, + $aliases: [ArtifactAliasInput!], + $metadata: JSONString, + _CREATE_ARTIFACT_ADDITIONAL_TYPE_ + ) { + createArtifact(input: { + artifactTypeName: $artifactTypeName, + artifactCollectionNames: $artifactCollectionNames, + entityName: $entityName, + projectName: $projectName, + runName: $runName, + description: $description, + digest: $digest, + digestAlgorithm: MANIFEST_MD5, + aliases: $aliases, + metadata: $metadata, + _CREATE_ARTIFACT_ADDITIONAL_VALUE_ + }) { + artifact { + id + state + artifactSequence { + id + latestArtifact { + id + versionIndex + } + } + } + } + } + """ + + return query_template.replace( + "_CREATE_ARTIFACT_ADDITIONAL_TYPE_", types + ).replace("_CREATE_ARTIFACT_ADDITIONAL_VALUE_", values) + + def create_artifact( + self, + artifact_type_name: str, + artifact_collection_name: str, + digest: str, + client_id: Optional[str] = None, + sequence_client_id: Optional[str] = None, + entity_name: Optional[str] = None, + project_name: Optional[str] = None, + run_name: Optional[str] = None, + description: Optional[str] = None, + metadata: Optional[Dict] = None, + ttl_duration_seconds: Optional[int] = None, + aliases: Optional[List[Dict[str, str]]] = None, + tags: Optional[List[Dict[str, str]]] = None, + distributed_id: Optional[str] = None, + is_user_created: Optional[bool] = False, + history_step: Optional[int] = None, + ) -> Tuple[Dict, Dict]: + fields = self.server_create_artifact_introspection() + artifact_fields = self.server_artifact_introspection() + if ("ttlIsInherited" not in artifact_fields) and ttl_duration_seconds: + wandb.termwarn( + "Server not compatible with setting Artifact TTLs, please upgrade the server to use Artifact TTL" + ) + # ttlDurationSeconds is only usable if ttlIsInherited is also present + ttl_duration_seconds = None + if ("tags" not in artifact_fields) and tags: + wandb.termwarn( + "Server not compatible with Artifact tags. " + "To use Artifact tags, please upgrade the server to v0.85 or higher." + ) + + query_template = self._get_create_artifact_mutation( + fields, history_step, distributed_id + ) + + entity_name = entity_name or self.settings("entity") + project_name = project_name or self.settings("project") + if not is_user_created: + run_name = run_name or self.current_run_id + + mutation = gql(query_template) + response = self.gql( + mutation, + variable_values={ + "entityName": entity_name, + "projectName": project_name, + "runName": run_name, + "artifactTypeName": artifact_type_name, + "artifactCollectionNames": [artifact_collection_name], + "clientID": client_id, + "sequenceClientID": sequence_client_id, + "digest": digest, + "description": description, + "aliases": list(aliases or []), + "tags": list(tags or []), + "metadata": json.dumps(util.make_safe_for_json(metadata)) + if metadata + else None, + "ttlDurationSeconds": ttl_duration_seconds, + "distributedID": distributed_id, + "historyStep": history_step, + }, + ) + av = response["createArtifact"]["artifact"] + latest = response["createArtifact"]["artifact"]["artifactSequence"].get( + "latestArtifact" + ) + return av, latest + + def commit_artifact(self, artifact_id: str) -> "_Response": + mutation = gql( + """ + mutation CommitArtifact( + $artifactID: ID!, + ) { + commitArtifact(input: { + artifactID: $artifactID, + }) { + artifact { + id + digest + } + } + } + """ + ) + + response: _Response = self.gql( + mutation, + variable_values={"artifactID": artifact_id}, + timeout=60, + ) + return response + + def complete_multipart_upload_artifact( + self, + artifact_id: str, + storage_path: str, + completed_parts: List[Dict[str, Any]], + upload_id: Optional[str], + complete_multipart_action: str = "Complete", + ) -> Optional[str]: + mutation = gql( + """ + mutation CompleteMultipartUploadArtifact( + $completeMultipartAction: CompleteMultipartAction!, + $completedParts: [UploadPartsInput!]!, + $artifactID: ID! + $storagePath: String! + $uploadID: String! + ) { + completeMultipartUploadArtifact( + input: { + completeMultipartAction: $completeMultipartAction, + completedParts: $completedParts, + artifactID: $artifactID, + storagePath: $storagePath + uploadID: $uploadID + } + ) { + digest + } + } + """ + ) + response = self.gql( + mutation, + variable_values={ + "completeMultipartAction": complete_multipart_action, + "artifactID": artifact_id, + "storagePath": storage_path, + "completedParts": completed_parts, + "uploadID": upload_id, + }, + ) + digest: Optional[str] = response["completeMultipartUploadArtifact"]["digest"] + return digest + + def create_artifact_manifest( + self, + name: str, + digest: str, + artifact_id: Optional[str], + base_artifact_id: Optional[str] = None, + entity: Optional[str] = None, + project: Optional[str] = None, + run: Optional[str] = None, + include_upload: bool = True, + type: str = "FULL", + ) -> Tuple[str, Dict[str, Any]]: + mutation = gql( + """ + mutation CreateArtifactManifest( + $name: String!, + $digest: String!, + $artifactID: ID!, + $baseArtifactID: ID, + $entityName: String!, + $projectName: String!, + $runName: String!, + $includeUpload: Boolean!, + {} + ) {{ + createArtifactManifest(input: {{ + name: $name, + digest: $digest, + artifactID: $artifactID, + baseArtifactID: $baseArtifactID, + entityName: $entityName, + projectName: $projectName, + runName: $runName, + {} + }}) {{ + artifactManifest {{ + id + file {{ + id + name + displayName + uploadUrl @include(if: $includeUpload) + uploadHeaders @include(if: $includeUpload) + }} + }} + }} + }} + """.format( + "$type: ArtifactManifestType = FULL" if type != "FULL" else "", + "type: $type" if type != "FULL" else "", + ) + ) + + entity_name = entity or self.settings("entity") + project_name = project or self.settings("project") + run_name = run or self.current_run_id + + response = self.gql( + mutation, + variable_values={ + "name": name, + "digest": digest, + "artifactID": artifact_id, + "baseArtifactID": base_artifact_id, + "entityName": entity_name, + "projectName": project_name, + "runName": run_name, + "includeUpload": include_upload, + "type": type, + }, + ) + return ( + response["createArtifactManifest"]["artifactManifest"]["id"], + response["createArtifactManifest"]["artifactManifest"]["file"], + ) + + def update_artifact_manifest( + self, + artifact_manifest_id: str, + base_artifact_id: Optional[str] = None, + digest: Optional[str] = None, + include_upload: Optional[bool] = True, + ) -> Tuple[str, Dict[str, Any]]: + mutation = gql( + """ + mutation UpdateArtifactManifest( + $artifactManifestID: ID!, + $digest: String, + $baseArtifactID: ID, + $includeUpload: Boolean!, + ) { + updateArtifactManifest(input: { + artifactManifestID: $artifactManifestID, + digest: $digest, + baseArtifactID: $baseArtifactID, + }) { + artifactManifest { + id + file { + id + name + displayName + uploadUrl @include(if: $includeUpload) + uploadHeaders @include(if: $includeUpload) + } + } + } + } + """ + ) + + response = self.gql( + mutation, + variable_values={ + "artifactManifestID": artifact_manifest_id, + "digest": digest, + "baseArtifactID": base_artifact_id, + "includeUpload": include_upload, + }, + ) + + return ( + response["updateArtifactManifest"]["artifactManifest"]["id"], + response["updateArtifactManifest"]["artifactManifest"]["file"], + ) + + def update_artifact_metadata( + self, artifact_id: str, metadata: Dict[str, Any] + ) -> Dict[str, Any]: + """Set the metadata of the given artifact version.""" + mutation = gql( + """ + mutation UpdateArtifact( + $artifactID: ID!, + $metadata: JSONString, + ) { + updateArtifact(input: { + artifactID: $artifactID, + metadata: $metadata, + }) { + artifact { + id + } + } + } + """ + ) + response = self.gql( + mutation, + variable_values={ + "artifactID": artifact_id, + "metadata": json.dumps(metadata), + }, + ) + return response["updateArtifact"]["artifact"] + + def _resolve_client_id( + self, + client_id: str, + ) -> Optional[str]: + if client_id in self._client_id_mapping: + return self._client_id_mapping[client_id] + + query = gql( + """ + query ClientIDMapping($clientID: ID!) { + clientIDMapping(clientID: $clientID) { + serverID + } + } + """ + ) + response = self.gql( + query, + variable_values={ + "clientID": client_id, + }, + ) + server_id = None + if response is not None: + client_id_mapping = response.get("clientIDMapping") + if client_id_mapping is not None: + server_id = client_id_mapping.get("serverID") + if server_id is not None: + self._client_id_mapping[client_id] = server_id + return server_id + + def server_create_artifact_file_spec_input_introspection(self) -> List: + query_string = """ + query ProbeServerCreateArtifactFileSpecInput { + CreateArtifactFileSpecInputInfoType: __type(name:"CreateArtifactFileSpecInput") { + inputFields{ + name + } + } + } + """ + + query = gql(query_string) + res = self.gql(query) + create_artifact_file_spec_input_info = [ + field.get("name", "") + for field in res.get("CreateArtifactFileSpecInputInfoType", {}).get( + "inputFields", [{}] + ) + ] + return create_artifact_file_spec_input_info + + @normalize_exceptions + def create_artifact_files( + self, artifact_files: Iterable["CreateArtifactFileSpecInput"] + ) -> Mapping[str, "CreateArtifactFilesResponseFile"]: + query_template = """ + mutation CreateArtifactFiles( + $storageLayout: ArtifactStorageLayout! + $artifactFiles: [CreateArtifactFileSpecInput!]! + ) { + createArtifactFiles(input: { + artifactFiles: $artifactFiles, + storageLayout: $storageLayout, + }) { + files { + edges { + node { + id + name + displayName + uploadUrl + uploadHeaders + _MULTIPART_UPLOAD_FIELDS_ + artifact { + id + } + } + } + } + } + } + """ + multipart_upload_url_query = """ + storagePath + uploadMultipartUrls { + uploadID + uploadUrlParts { + partNumber + uploadUrl + } + } + """ + + # TODO: we should use constants here from interface/artifacts.py + # but probably don't want the dependency. We're going to remove + # this setting in a future release, so I'm just hard-coding the strings. + storage_layout = "V2" + if env.get_use_v1_artifacts(): + storage_layout = "V1" + + create_artifact_file_spec_input_fields = ( + self.server_create_artifact_file_spec_input_introspection() + ) + if "uploadPartsInput" in create_artifact_file_spec_input_fields: + query_template = query_template.replace( + "_MULTIPART_UPLOAD_FIELDS_", multipart_upload_url_query + ) + else: + query_template = query_template.replace("_MULTIPART_UPLOAD_FIELDS_", "") + + mutation = gql(query_template) + response = self.gql( + mutation, + variable_values={ + "storageLayout": storage_layout, + "artifactFiles": [af for af in artifact_files], + }, + ) + + result = {} + for edge in response["createArtifactFiles"]["files"]["edges"]: + node = edge["node"] + result[node["displayName"]] = node + return result + + @normalize_exceptions + def notify_scriptable_run_alert( + self, + title: str, + text: str, + level: Optional[str] = None, + wait_duration: Optional["Number"] = None, + ) -> bool: + mutation = gql( + """ + mutation NotifyScriptableRunAlert( + $entityName: String!, + $projectName: String!, + $runName: String!, + $title: String!, + $text: String!, + $severity: AlertSeverity = INFO, + $waitDuration: Duration + ) { + notifyScriptableRunAlert(input: { + entityName: $entityName, + projectName: $projectName, + runName: $runName, + title: $title, + text: $text, + severity: $severity, + waitDuration: $waitDuration + }) { + success + } + } + """ + ) + + response = self.gql( + mutation, + variable_values={ + "entityName": self.settings("entity"), + "projectName": self.settings("project"), + "runName": self.current_run_id, + "title": title, + "text": text, + "severity": level, + "waitDuration": wait_duration, + }, + ) + success: bool = response["notifyScriptableRunAlert"]["success"] + return success + + def get_sweep_state( + self, sweep: str, entity: Optional[str] = None, project: Optional[str] = None + ) -> "SweepState": + state: SweepState = self.sweep( + sweep=sweep, entity=entity, project=project, specs="{}" + )["state"] + return state + + def set_sweep_state( + self, + sweep: str, + state: "SweepState", + entity: Optional[str] = None, + project: Optional[str] = None, + ) -> None: + assert state in ("RUNNING", "PAUSED", "CANCELED", "FINISHED") + s = self.sweep(sweep=sweep, entity=entity, project=project, specs="{}") + curr_state = s["state"].upper() + if state == "PAUSED" and curr_state not in ("PAUSED", "RUNNING"): + raise Exception("Cannot pause {} sweep.".format(curr_state.lower())) + elif state != "RUNNING" and curr_state not in ("RUNNING", "PAUSED", "PENDING"): + raise Exception("Sweep already {}.".format(curr_state.lower())) + sweep_id = s["id"] + mutation = gql( + """ + mutation UpsertSweep( + $id: ID, + $state: String, + $entityName: String, + $projectName: String + ) { + upsertSweep(input: { + id: $id, + state: $state, + entityName: $entityName, + projectName: $projectName + }){ + sweep { + name + } + } + } + """ + ) + self.gql( + mutation, + variable_values={ + "id": sweep_id, + "state": state, + "entityName": entity or self.settings("entity"), + "projectName": project or self.settings("project"), + }, + ) + + def stop_sweep( + self, + sweep: str, + entity: Optional[str] = None, + project: Optional[str] = None, + ) -> None: + """Finish the sweep to stop running new runs and let currently running runs finish.""" + self.set_sweep_state( + sweep=sweep, state="FINISHED", entity=entity, project=project + ) + + def cancel_sweep( + self, + sweep: str, + entity: Optional[str] = None, + project: Optional[str] = None, + ) -> None: + """Cancel the sweep to kill all running runs and stop running new runs.""" + self.set_sweep_state( + sweep=sweep, state="CANCELED", entity=entity, project=project + ) + + def pause_sweep( + self, + sweep: str, + entity: Optional[str] = None, + project: Optional[str] = None, + ) -> None: + """Pause the sweep to temporarily stop running new runs.""" + self.set_sweep_state( + sweep=sweep, state="PAUSED", entity=entity, project=project + ) + + def resume_sweep( + self, + sweep: str, + entity: Optional[str] = None, + project: Optional[str] = None, + ) -> None: + """Resume the sweep to continue running new runs.""" + self.set_sweep_state( + sweep=sweep, state="RUNNING", entity=entity, project=project + ) + + def _status_request(self, url: str, length: int) -> requests.Response: + """Ask google how much we've uploaded.""" + check_httpclient_logger_handler() + return requests.put( + url=url, + headers={"Content-Length": "0", "Content-Range": "bytes */%i" % length}, + ) + + def _flatten_edges(self, response: "_Response") -> List[Dict]: + """Return an array from the nested graphql relay structure.""" + return [node["node"] for node in response["edges"]] + + @normalize_exceptions + def stop_run( + self, + run_id: str, + ) -> bool: + mutation = gql( + """ + mutation stopRun($id: ID!) { + stopRun(input: { + id: $id + }) { + clientMutationId + success + } + } + """ + ) + + response = self.gql( + mutation, + variable_values={ + "id": run_id, + }, + ) + + success: bool = response["stopRun"].get("success") + + return success diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/job_builder.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/job_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..b135acf2f7c514b3e7c6a928319bd46f18e40a47 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/job_builder.py @@ -0,0 +1,629 @@ +"""job builder.""" + +import json +import logging +import os +import re +import sys +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import wandb +from wandb.sdk.artifacts.artifact import Artifact +from wandb.sdk.data_types._dtypes import TypeRegistry +from wandb.sdk.internal.internal_api import Api +from wandb.sdk.lib.filenames import DIFF_FNAME, METADATA_FNAME, REQUIREMENTS_FNAME +from wandb.util import make_artifact_name_safe + +from .settings_static import SettingsStatic + +if sys.version_info >= (3, 8): + from typing import Literal, TypedDict +else: + from typing_extensions import Literal, TypedDict + +_logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from wandb.proto.wandb_internal_pb2 import ArtifactRecord + +FROZEN_REQUIREMENTS_FNAME = "requirements.frozen.txt" +JOB_FNAME = "wandb-job.json" +JOB_ARTIFACT_TYPE = "job" + +LOG_LEVEL = Literal["log", "warn", "error"] + + +class Version: + def __init__(self, major: int, minor: int, patch: int): + self._major = major + self._minor = minor + self._patch = patch + + def __repr__(self) -> str: + return f"{self._major}.{self._minor}.{self._patch}" + + def __lt__(self, other: "Version") -> bool: + if self._major < other._major: + return True + elif self._major == other._major: + if self._minor < other._minor: + return True + elif self._minor == other._minor: + if self._patch < other._patch: + return True + return False + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Version): + return NotImplemented + return ( + self._major == other._major + and self._minor == other._minor + and self._patch == other._patch + ) + + +# Minimum supported wandb version for keys in the source dict of wandb-job.json +SOURCE_KEYS_MIN_SUPPORTED_VERSION = { + "dockerfile": Version(0, 17, 0), + "build_context": Version(0, 17, 0), +} + + +class GitInfo(TypedDict): + remote: str + commit: str + + +class GitSourceDict(TypedDict): + git: GitInfo + entrypoint: List[str] + notebook: bool + build_context: Optional[str] + dockerfile: Optional[str] + + +class ArtifactSourceDict(TypedDict): + artifact: str + entrypoint: List[str] + notebook: bool + build_context: Optional[str] + dockerfile: Optional[str] + + +class ImageSourceDict(TypedDict): + image: str + + +class JobSourceDict(TypedDict, total=False): + _version: str + source_type: str + source: Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict] + input_types: Dict[str, Any] + output_types: Dict[str, Any] + runtime: Optional[str] + + +class ArtifactInfoForJob(TypedDict): + id: str + name: str + + +def get_min_supported_for_source_dict( + source: Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict], +) -> Optional[Version]: + """Get the minimum supported wandb version the source dict of wandb-job.json.""" + min_seen = None + for key in source: + new_ver = SOURCE_KEYS_MIN_SUPPORTED_VERSION.get(key) + if new_ver: + if min_seen is None or new_ver < min_seen: + min_seen = new_ver + return min_seen + + +class JobArtifact(Artifact): + def __init__(self, name: str, *args: Any, **kwargs: Any): + super().__init__(name, "placeholder", *args, **kwargs) + self._type = JOB_ARTIFACT_TYPE # Get around type restriction. + + +class JobBuilder: + _settings: SettingsStatic + _metadatafile_path: Optional[str] + _requirements_path: Optional[str] + _config: Optional[Dict[str, Any]] + _summary: Optional[Dict[str, Any]] + _logged_code_artifact: Optional[ArtifactInfoForJob] + _disable: bool + _partial_source_id: Optional[str] # Partial job source artifact id. + _aliases: List[str] + _job_seq_id: Optional[str] + _job_version_alias: Optional[str] + _is_notebook_run: bool + _verbose: bool + + def __init__(self, settings: SettingsStatic, verbose: bool = False): + self._settings = settings + self._metadatafile_path = None + self._requirements_path = None + self._config = None + self._summary = None + self._logged_code_artifact = None + self._job_seq_id = None + self._job_version_alias = None + self._disable = settings.disable_job_creation + self._partial_source_id = None + self._aliases = [] + self._source_type: Optional[Literal["repo", "artifact", "image"]] = ( + settings.job_source # type: ignore[assignment] + ) + self._is_notebook_run = self._get_is_notebook_run() + self._verbose = verbose + self._partial = False + + def set_config(self, config: Dict[str, Any]) -> None: + self._config = config + + def set_summary(self, summary: Dict[str, Any]) -> None: + self._summary = summary + + @property + def disable(self) -> bool: + return self._disable + + @disable.setter + def disable(self, val: bool) -> None: + self._disable = val + + @property + def input_types(self) -> Dict[str, Any]: + return TypeRegistry.type_of(self._config).to_json() + + @property + def output_types(self) -> Dict[str, Any]: + return TypeRegistry.type_of(self._summary).to_json() + + def set_partial_source_id(self, source_id: str) -> None: + self._partial_source_id = source_id + + def _handle_server_artifact( + self, res: Optional[Dict], artifact: "ArtifactRecord" + ) -> None: + if artifact.type == "job" and res is not None: + try: + if res["artifactSequence"]["latestArtifact"] is None: + self._job_version_alias = "v0" + elif res["artifactSequence"]["latestArtifact"]["id"] == res["id"]: + self._job_version_alias = ( + f"v{res['artifactSequence']['latestArtifact']['versionIndex']}" + ) + else: + self._job_version_alias = f"v{res['artifactSequence']['latestArtifact']['versionIndex'] + 1}" + self._job_seq_id = res["artifactSequence"]["id"] + except KeyError as e: + _logger.info(f"Malformed response from ArtifactSaver.save {e}") + if artifact.type == "code" and res is not None: + self._logged_code_artifact = ArtifactInfoForJob( + { + "id": res["id"], + "name": artifact.name, + } + ) + + def _build_repo_job_source( + self, + program_relpath: str, + metadata: Dict[str, Any], + ) -> Tuple[Optional[GitSourceDict], Optional[str]]: + git_info: Dict[str, str] = metadata.get("git", {}) + remote = git_info.get("remote") + commit = git_info.get("commit") + root = metadata.get("root") + assert remote is not None + assert commit is not None + if self._is_notebook_run: + if not os.path.exists( + os.path.join(os.getcwd(), os.path.basename(program_relpath)) + ): + return None, None + + if root is None or self._settings._jupyter_root is None: + _logger.info("target path does not exist, exiting") + return None, None + assert self._settings._jupyter_root is not None + # git notebooks set the root to the git root, + # jupyter_root contains the path where the jupyter notebook was started + # program_relpath contains the path from jupyter_root to the file + # full program path here is actually the relpath from the program to the git root + full_program_path = os.path.join( + os.path.relpath(str(self._settings._jupyter_root), root), + program_relpath, + ) + full_program_path = os.path.normpath(full_program_path) + # if the notebook server is started above the git repo need to clear all the ..s + if full_program_path.startswith(".."): + split_path = full_program_path.split("/") + count_dots = 0 + for p in split_path: + if p == "..": + count_dots += 1 + full_program_path = "/".join(split_path[2 * count_dots :]) + else: + full_program_path = program_relpath + + entrypoint = self._get_entrypoint(full_program_path, metadata) + # TODO: update executable to a method that supports pex + source: GitSourceDict = { + "git": {"remote": remote, "commit": commit}, + "entrypoint": entrypoint, + "notebook": self._is_notebook_run, + "build_context": metadata.get("build_context"), + "dockerfile": metadata.get("dockerfile"), + } + name = self._make_job_name(f"{remote}_{program_relpath}") + + return source, name + + def _log_if_verbose(self, message: str, level: LOG_LEVEL) -> None: + log_func: Optional[Union[Callable[[Any], None], Callable[[Any], None]]] = None + if level == "log": + _logger.info(message) + log_func = wandb.termlog + elif level == "warn": + _logger.warning(message) + log_func = wandb.termwarn + elif level == "error": + _logger.error(message) + log_func = wandb.termerror + + if self._verbose and log_func is not None: + log_func(message) + + def _build_artifact_job_source( + self, + program_relpath: str, + metadata: Dict[str, Any], + ) -> Tuple[Optional[ArtifactSourceDict], Optional[str]]: + assert isinstance(self._logged_code_artifact, dict) + # TODO: should we just always exit early if the path doesn't exist? + if self._is_notebook_run and not self._is_colab_run(): + full_program_relpath = os.path.relpath(program_relpath, os.getcwd()) + # if the resolved path doesn't exist, then we shouldn't make a job because it will fail + if not os.path.exists(full_program_relpath): + # when users call log code in a notebook the code artifact starts + # at the directory the notebook is in instead of the jupyter core + if not os.path.exists(os.path.basename(program_relpath)): + _logger.info("target path does not exist, exiting") + self._log_if_verbose( + "No program path found when generating artifact job source for a non-colab notebook run. See https://docs.wandb.ai/guides/launch/create-job", + "warn", + ) + return None, None + full_program_relpath = os.path.basename(program_relpath) + else: + full_program_relpath = program_relpath + + entrypoint = self._get_entrypoint(full_program_relpath, metadata) + # TODO: update executable to a method that supports pex + source: ArtifactSourceDict = { + "entrypoint": entrypoint, + "notebook": self._is_notebook_run, + "artifact": f"wandb-artifact://_id/{self._logged_code_artifact['id']}", + "build_context": metadata.get("build_context"), + "dockerfile": metadata.get("dockerfile"), + } + name = self._make_job_name(self._logged_code_artifact["name"]) + + return source, name + + def _build_image_job_source( + self, metadata: Dict[str, Any] + ) -> Tuple[ImageSourceDict, str]: + image_name = metadata.get("docker") + assert isinstance(image_name, str) + + raw_image_name = image_name + if ":" in image_name: + tag = image_name.split(":")[-1] + + # if tag looks properly formatted, assume its a tag + # regex: alphanumeric and "_" "-" "." + if re.fullmatch(r"([a-zA-Z0-9_\-\.]+)", tag): + raw_image_name = raw_image_name.replace(f":{tag}", "") + self._aliases += [tag] + + source: ImageSourceDict = { + "image": image_name, + } + name = self._make_job_name(raw_image_name) + + return source, name + + def _make_job_name(self, input_str: str) -> str: + """Use job name from settings if provided, else use programmatic name.""" + if self._settings.job_name: + return self._settings.job_name + + return make_artifact_name_safe(f"job-{input_str}") + + def _get_entrypoint( + self, + program_relpath: str, + metadata: Dict[str, Any], + ) -> List[str]: + # if building a partial job from CLI, overwrite entrypoint and notebook + # should already be in metadata from create_job + if self._partial: + if metadata.get("entrypoint"): + entrypoint: List[str] = metadata["entrypoint"] + return entrypoint + # job is being built from a run + entrypoint = [os.path.basename(sys.executable), program_relpath] + + return entrypoint + + def _get_is_notebook_run(self) -> bool: + return hasattr(self._settings, "_jupyter") and bool(self._settings._jupyter) + + def _is_colab_run(self) -> bool: + return hasattr(self._settings, "_colab") and bool(self._settings._colab) + + def _build_job_source( + self, + source_type: str, + program_relpath: Optional[str], + metadata: Dict[str, Any], + ) -> Tuple[ + Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict, None], + Optional[str], + ]: + """Construct a job source dict and name from the current run. + + Arguments: + source_type (str): The type of source to build the job from. One of + "repo", "artifact", or "image". + """ + source: Union[ + GitSourceDict, + ArtifactSourceDict, + ImageSourceDict, + None, + ] = None + + if source_type == "repo": + source, name = self._build_repo_job_source( + program_relpath or "", + metadata, + ) + elif source_type == "artifact": + source, name = self._build_artifact_job_source( + program_relpath or "", + metadata, + ) + elif source_type == "image" and self._has_image_job_ingredients(metadata): + source, name = self._build_image_job_source(metadata) + else: + source = None + + if source is None: + if source_type: + self._log_if_verbose( + f"Source type is set to '{source_type}' but some required information is missing " + "from the environment. A job will not be created from this run. See " + "https://docs.wandb.ai/guides/launch/create-job", + "warn", + ) + return None, None + + return source, name + + def build( + self, + api: Api, + build_context: Optional[str] = None, + dockerfile: Optional[str] = None, + base_image: Optional[str] = None, + ) -> Optional[Artifact]: + """Build a job artifact from the current run. + + Arguments: + api (Api): The API object to use to create the job artifact. + build_context (Optional[str]): Path within the job source code to + the image build context. Saved as part of the job for future + builds. + dockerfile (Optional[str]): Path within the build context the + Dockerfile. Saved as part of the job for future builds. + base_image (Optional[str]): The base image used to run the job code. + + Returns: + Optional[Artifact]: The job artifact if it was successfully built, + otherwise None. + """ + _logger.info("Attempting to build job artifact") + + # If a partial job was used, write the input/output types to the metadata + # rather than building a new job version. + if self._partial_source_id is not None: + new_metadata = { + "input_types": {"@wandb.config": self.input_types}, + "output_types": self.output_types, + } + api.update_artifact_metadata( + self._partial_source_id, + new_metadata, + ) + return None + + if not os.path.exists( + os.path.join(self._settings.files_dir, REQUIREMENTS_FNAME) + ): + self._log_if_verbose( + "No requirements.txt found, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job", + "warn", + ) + return None + metadata = self._handle_metadata_file() + if metadata is None: + self._log_if_verbose( + f"Ensure read and write access to run files dir: {self._settings.files_dir}, control this via the WANDB_DIR env var. See https://docs.wandb.ai/guides/track/environment-variables", + "warn", + ) + return None + + runtime: Optional[str] = metadata.get("python") + # can't build a job without a python version + if runtime is None: + self._log_if_verbose( + "No python version found in metadata, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job", + "warn", + ) + return None + + input_types = TypeRegistry.type_of(self._config).to_json() + output_types = TypeRegistry.type_of(self._summary).to_json() + + name: Optional[str] = None + source_info: Optional[JobSourceDict] = None + + # configure job from environment + source_type = self._get_source_type(metadata) + if not source_type: + # if source_type is None, then we don't have enough information to build a job + # if the user intended to create a job, warn. + if ( + self._settings.job_name + or self._settings.job_source + or self._source_type + ): + self._log_if_verbose( + "No source type found, not creating job artifact", "warn" + ) + return None + + program_relpath = self._get_program_relpath(source_type, metadata) + if not self._partial and source_type != "image" and not program_relpath: + self._log_if_verbose( + "No program path found, not creating job artifact. See https://docs.wandb.ai/guides/launch/create-job", + "warn", + ) + return None + + source, name = self._build_job_source( + source_type, + program_relpath, + metadata, + ) + if source is None: + return None + + if build_context: + source["build_context"] = build_context # type: ignore[typeddict-item] + if dockerfile: + source["dockerfile"] = dockerfile # type: ignore[typeddict-item] + if base_image: + source["base_image"] = base_image # type: ignore[typeddict-item] + + # Pop any keys that are initialized to None. The current TypedDict + # system for source dicts requires all keys to be present, but we + # don't want to include keys that are None in the final dict. + for key in list(source.keys()): + if source[key] is None: # type: ignore[literal-required] + source.pop(key) # type: ignore[literal-require,misc] + + source_info = { + "_version": str(get_min_supported_for_source_dict(source) or "v0"), + "source_type": source_type, + "source": source, + "input_types": input_types, + "output_types": output_types, + "runtime": runtime, + } + + assert source_info is not None + assert name is not None + + artifact = JobArtifact(name) + + _logger.info("adding wandb-job metadata file") + with artifact.new_file("wandb-job.json") as f: + f.write(json.dumps(source_info, indent=4)) + + artifact.add_file( + os.path.join(self._settings.files_dir, REQUIREMENTS_FNAME), + name=FROZEN_REQUIREMENTS_FNAME, + ) + + if source_type == "repo": + # add diff + if os.path.exists(os.path.join(self._settings.files_dir, DIFF_FNAME)): + artifact.add_file( + os.path.join(self._settings.files_dir, DIFF_FNAME), + name=DIFF_FNAME, + ) + + return artifact + + def _get_source_type(self, metadata: Dict[str, Any]) -> Optional[str]: + if self._source_type: + return self._source_type + + if self._has_git_job_ingredients(metadata): + _logger.info("is repo sourced job") + return "repo" + + if self._has_artifact_job_ingredients(): + _logger.info("is artifact sourced job") + return "artifact" + + if self._has_image_job_ingredients(metadata): + _logger.info("is image sourced job") + return "image" + + _logger.info("no source found") + return None + + def _get_program_relpath( + self, source_type: str, metadata: Dict[str, Any] + ) -> Optional[str]: + if self._is_notebook_run: + _logger.info("run is notebook based run") + program = metadata.get("program") + + if not program: + self._log_if_verbose( + "Notebook 'program' path not found in metadata. See https://docs.wandb.ai/guides/launch/create-job", + "warn", + ) + + return program + + if source_type == "artifact" or self._settings.job_source == "artifact": + # if the job is set to be an artifact, use relpath guaranteed + # to be correct. 'codePath' uses the root path when in git repo + # fallback to codePath if strictly local relpath not present + return metadata.get("codePathLocal") or metadata.get("codePath") + + return metadata.get("codePath") + + def _handle_metadata_file( + self, + ) -> Optional[Dict]: + if os.path.exists(os.path.join(self._settings.files_dir, METADATA_FNAME)): + with open(os.path.join(self._settings.files_dir, METADATA_FNAME)) as f: + metadata: Dict = json.load(f) + return metadata + + return None + + def _has_git_job_ingredients(self, metadata: Dict[str, Any]) -> bool: + git_info: Dict[str, str] = metadata.get("git", {}) + if self._is_notebook_run and metadata.get("root") is None: + return False + return git_info.get("remote") is not None and git_info.get("commit") is not None + + def _has_artifact_job_ingredients(self) -> bool: + return self._logged_code_artifact is not None + + def _has_image_job_ingredients(self, metadata: Dict[str, Any]) -> bool: + return metadata.get("docker") is not None diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/progress.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/progress.py new file mode 100644 index 0000000000000000000000000000000000000000..a0848a404d0a498850ba18e06e90c90ac09eda3a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/progress.py @@ -0,0 +1,83 @@ +"""progress.""" + +import os +import sys +from typing import IO, TYPE_CHECKING, Optional + +from wandb.errors import CommError + +if TYPE_CHECKING: + if sys.version_info >= (3, 8): + from typing import Protocol + else: + from typing_extensions import Protocol + + class ProgressFn(Protocol): + def __call__(self, new_bytes: int, total_bytes: int) -> None: + pass + + +class Progress: + """A helper class for displaying progress.""" + + ITER_BYTES = 1024 * 1024 + + def __init__( + self, file: IO[bytes], callback: Optional["ProgressFn"] = None + ) -> None: + self.file = file + if callback is None: + + def callback_(new_bytes: int, total_bytes: int) -> None: + pass + + callback = callback_ + + self.callback: ProgressFn = callback + self.bytes_read = 0 + self.len = os.fstat(file.fileno()).st_size + + def read(self, size=-1): + """Read bytes and call the callback.""" + bites = self.file.read(size) + self.bytes_read += len(bites) + if not bites and self.bytes_read < self.len: + # Files shrinking during uploads causes request timeouts. Maybe + # we could avoid those by updating the self.len in real-time, but + # files getting truncated while uploading seems like something + # that shouldn't really be happening anyway. + raise CommError( + "File {} size shrank from {} to {} while it was being uploaded.".format( + self.file.name, self.len, self.bytes_read + ) + ) + # Growing files are also likely to be bad, but our code didn't break + # on those in the past, so it's riskier to make that an error now. + self.callback(len(bites), self.bytes_read) + return bites + + def rewind(self) -> None: + self.callback(-self.bytes_read, 0) + self.bytes_read = 0 + self.file.seek(0) + + def __getattr__(self, name): + """Fallback to the file object for attrs not defined here.""" + if hasattr(self.file, name): + return getattr(self.file, name) + else: + raise AttributeError + + def __iter__(self): + return self + + def __next__(self): + bites = self.read(self.ITER_BYTES) + if len(bites) == 0: + raise StopIteration + return bites + + def __len__(self): + return self.len + + next = __next__ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/thread_local_settings.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/thread_local_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..2ee0e74cc44151617cb90e7eb45996e0d1a3859d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/thread_local_settings.py @@ -0,0 +1,18 @@ +import threading +from typing import Dict, Optional + + +# Context variable for setting API settings (api keys, etc.) for internal and public apis thread-locally +# TODO: move this into actual settings +class _ThreadLocalApiSettings(threading.local): + api_key: Optional[str] + cookies: Optional[Dict] + headers: Optional[Dict] + + def __init__(self) -> None: + self.api_key = None + self.cookies = None + self.headers = None + + +_thread_local_api_settings: _ThreadLocalApiSettings = _ThreadLocalApiSettings() diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/internal/update.py b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/update.py new file mode 100644 index 0000000000000000000000000000000000000000..0af8e41076d24901830cf4fba13d09ef94bec261 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/internal/update.py @@ -0,0 +1,113 @@ +from typing import Dict, Optional, Tuple + +import requests + +import wandb + + +def _find_available( + current_version: str, +) -> Optional[Tuple[str, bool, bool, bool, Optional[str]]]: + from wandb.util import parse_version + + pypi_url = "https://pypi.org/pypi/wandb/json" + + yanked_dict = {} + try: + # raise Exception("test") + async_requests_get = wandb.util.async_call(requests.get, timeout=5) + data, thread = async_requests_get(pypi_url, timeout=3) + if not data or isinstance(data, Exception): + return None + data = data.json() + latest_version = data["info"]["version"] + release_list = data["releases"].keys() + for version, fields in data["releases"].items(): + for item in fields: + yanked = item.get("yanked") + yanked_reason = item.get("yanked_reason") + if yanked: + yanked_dict[version] = yanked_reason + except Exception: + # Any issues whatsoever, just skip the latest version check. + return None + + # Return if no update is available + pip_prerelease = False + deleted = False + yanked = False + yanked_reason = None + parsed_current_version = parse_version(current_version) + + # Check if current version has been yanked or deleted + # NOTE: we will not return yanked or deleted if there is nothing to upgrade to + if current_version in release_list: + yanked = current_version in yanked_dict + yanked_reason = yanked_dict.get(current_version) + else: + deleted = True + + # Check pre-releases + if parse_version(latest_version) <= parsed_current_version: + # pre-releases are not included in latest_version + # so if we are currently running a pre-release we check more + if not parsed_current_version.is_prerelease: + return None + # Candidates are pre-releases with the same base_version + release_list = map(parse_version, release_list) + release_list = filter(lambda v: v.is_prerelease, release_list) + release_list = filter( + lambda v: v.base_version == parsed_current_version.base_version, + release_list, + ) + release_list = sorted(release_list) + if not release_list: + return None + + parsed_latest_version = release_list[-1] + if parsed_latest_version <= parsed_current_version: + return None + latest_version = str(parsed_latest_version) + pip_prerelease = True + + return latest_version, pip_prerelease, deleted, yanked, yanked_reason + + +def check_available(current_version: str) -> Optional[Dict[str, Optional[str]]]: + package_info = _find_available(current_version) + if not package_info: + return None + + wandb_module_name = "wandb" + + latest_version, pip_prerelease, deleted, yanked, yanked_reason = package_info + upgrade_message = ( + "{} version {} is available! To upgrade, please run:\n" + " $ pip install {} --upgrade{}".format( + wandb_module_name, + latest_version, + wandb_module_name, + " --pre" if pip_prerelease else "", + ) + ) + delete_message = None + if deleted: + delete_message = "{} version {} has been retired! Please upgrade.".format( + wandb_module_name, + current_version, + ) + yank_message = None + if yanked: + reason_message = "({}) ".format(yanked_reason) if yanked_reason else "" + yank_message = "{} version {} has been recalled! {}Please upgrade.".format( + wandb_module_name, + current_version, + reason_message, + ) + + # A new version is available! + return { + "upgrade_message": upgrade_message, + "yank_message": yank_message, + "delete_message": delete_message, + } diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b0c5cc1a1c26002521d8ba797d86928d1eefb33 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..595c495ad09cdec03e637d6931aa6a1c242717c5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch_add.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5799a63c5db9abd0015ef68cf91b26c02831378 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_launch_add.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_project_spec.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_project_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..596f6aaf32b2e03c8fabcc13e25966516576d575 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/_project_spec.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/create_job.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/create_job.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3df96c0a247b2cdc459ed5c0e9ed9b507b1f1d56 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/create_job.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/errors.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b7e44347be2bf4fc9ad19ede6f36b667a313b2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/errors.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/git_reference.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/git_reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02596f67934b7f00f93eea7510688280b4faa4e0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/git_reference.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/loader.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a348696f7166d40426d2d033ed5498684b0707ac Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/loader.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7734d6b864106142a18279f3f21cce88c4f98b3c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/wandb_reference.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/wandb_reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b20249eda5145661aa39eb42f304c4938e898323 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/__pycache__/wandb_reference.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6820467fb94298b80ac69ab155089e5da837de8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/docker_builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/docker_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b888dccb5bf5f17fb4977336392b9ccebf51ffdd Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/__pycache__/docker_builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/abstract.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/abstract.py new file mode 100644 index 0000000000000000000000000000000000000000..6a9d168725531777e2f870283aaaff5d20512b23 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/abstract.py @@ -0,0 +1,156 @@ +"""Abstract plugin class defining the interface needed to build container images for W&B Launch.""" + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Dict, Optional + +from wandb.sdk.launch.environment.abstract import AbstractEnvironment +from wandb.sdk.launch.registry.abstract import AbstractRegistry + +from .._project_spec import EntryPoint, LaunchProject +from ..registry.anon import AnonynmousRegistry +from ..utils import ( + AZURE_CONTAINER_REGISTRY_URI_REGEX, + ELASTIC_CONTAINER_REGISTRY_URI_REGEX, + GCP_ARTIFACT_REGISTRY_URI_REGEX, +) + +if TYPE_CHECKING: + from wandb.sdk.launch.agent.job_status_tracker import JobAndRunStatusTracker + + +class AbstractBuilder(ABC): + """Abstract plugin class defining the interface needed to build container images for W&B Launch.""" + + builder_type: str + environment: AbstractEnvironment + registry: AbstractRegistry + builder_config: Dict[str, Any] + + @abstractmethod + def __init__( + self, + environment: AbstractEnvironment, + registry: AbstractRegistry, + verify: bool = True, + ) -> None: + """Initialize a builder. + + Arguments: + builder_config: The builder config. + registry: The registry to use. + verify: Whether to verify the functionality of the builder. + + Raises: + LaunchError: If the builder cannot be initialized or verified. + """ + raise NotImplementedError + + @classmethod + @abstractmethod + def from_config( + cls, + config: dict, + environment: AbstractEnvironment, + registry: AbstractRegistry, + ) -> "AbstractBuilder": + """Create a builder from a config dictionary. + + Arguments: + config: The config dictionary. + environment: The environment to use. + registry: The registry to use. + verify: Whether to verify the functionality of the builder. + login: Whether to login to the registry immediately. + + Returns: + The builder. + """ + raise NotImplementedError + + @abstractmethod + async def build_image( + self, + launch_project: LaunchProject, + entrypoint: EntryPoint, + job_tracker: Optional["JobAndRunStatusTracker"] = None, + ) -> str: + """Build the image for the given project. + + Arguments: + launch_project: The project to build. + build_ctx_path: The path to the build context. + + Returns: + The image name. + """ + raise NotImplementedError + + @abstractmethod + async def verify(self) -> None: + """Verify that the builder can be used to build images. + + Raises: + LaunchError: If the builder cannot be used to build images. + """ + raise NotImplementedError + + +def registry_from_uri(uri: str) -> AbstractRegistry: + """Create a registry helper object from a uri. + + This function parses the URI and determines which supported registry it + belongs to. It then creates a registry helper object for that registry. + The supported remote registry types are: + - Azure Container Registry + - Google Container Registry + - AWS Elastic Container Registry + + The format of the URI is as follows: + - Azure Container Registry: .azurecr.io// + - Google Container Registry: -docker.pkg.dev/// + - AWS Elastic Container Registry: .dkr.ecr..amazonaws.com// + + Our classification of the registry is based on the domain name. For example, + if the uri contains `.azurecr.io`, we classify it as an Azure + Container Registry. If the uri contains `.dkr.ecr`, we classify + it as an AWS Elastic Container Registry. If the uri contains + `-docker.pkg.dev`, we classify it as a Google Artifact Registry. + + This function will attempt to load the approriate cloud helpers for the + + `https://` prefix is optional for all of the above. + + Arguments: + uri: The uri to create a registry from. + + Returns: + The registry. + + Raises: + LaunchError: If the registry helper cannot be loaded for the given URI. + """ + if uri.startswith("https://"): + uri = uri[len("https://") :] + + if AZURE_CONTAINER_REGISTRY_URI_REGEX.match(uri) is not None: + from wandb.sdk.launch.registry.azure_container_registry import ( + AzureContainerRegistry, + ) + + return AzureContainerRegistry(uri=uri) + + elif GCP_ARTIFACT_REGISTRY_URI_REGEX.match(uri) is not None: + from wandb.sdk.launch.registry.google_artifact_registry import ( + GoogleArtifactRegistry, + ) + + return GoogleArtifactRegistry(uri=uri) + + elif ELASTIC_CONTAINER_REGISTRY_URI_REGEX.match(uri) is not None: + from wandb.sdk.launch.registry.elastic_container_registry import ( + ElasticContainerRegistry, + ) + + return ElasticContainerRegistry(uri=uri) + + return AnonynmousRegistry(uri=uri) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/context_manager.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/context_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..c1ceb3a21ce4627e7468339653343364ceac8b1b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/context_manager.py @@ -0,0 +1,235 @@ +import logging +import os +import shutil +import tempfile +from typing import Tuple + +from wandb.sdk.launch._project_spec import LaunchProject +from wandb.sdk.launch.builder.build import image_tag_from_dockerfile_and_source +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.utils import get_current_python_version + +from .build import ( + _WANDB_DOCKERFILE_NAME, + get_base_setup, + get_docker_user, + get_entrypoint_setup, + get_requirements_section, + get_user_setup, +) +from .templates.dockerfile import DOCKERFILE_TEMPLATE + +_logger = logging.getLogger(__name__) + + +class BuildContextManager: + """Creates a build context for a container image from job source code. + + The dockerfile and build context may be specified by the job itself. If not, + the behavior for creating the build context is as follows: + + - If a Dockerfile.wandb is found adjacent to the entrypoint, the directory + containing the entrypoint is used as the build context and Dockerfile.wandb + is used as the Dockerfile. + + - If `override_dockerfile` is set on the LaunchProject, the directory + containing the Dockerfile is used as the build context and the Dockerfile + is used as the Dockerfile. `override_dockerfile` can be set in a launch + spec via the `-D` flag to `wandb launch` or in the `overrides` section + of the launch drawer. + + - If no dockerfile is set, a Dockerfile is generated from the job's + requirements and entrypoint. + """ + + def __init__(self, launch_project: LaunchProject): + """Initialize a BuildContextManager. + + Arguments: + launch_project: The launch project. + """ + self._launch_project = launch_project + assert self._launch_project.project_dir is not None + self._directory = tempfile.mkdtemp() + + def _generate_dockerfile(self, builder_type: str) -> str: + """Generate a Dockerfile for the container image. + + Arguments: + builder_type: The type of builder to use. One of "docker" or "kaniko". + + Returns: + The contents of the Dockerfile. + """ + launch_project = self._launch_project + entry_point = ( + launch_project.override_entrypoint or launch_project.get_job_entry_point() + ) + + # get python versions truncated to major.minor to ensure image availability + if launch_project.python_version: + spl = launch_project.python_version.split(".")[:2] + py_version, py_major = (".".join(spl), spl[0]) + else: + py_version, py_major = get_current_python_version() + + python_build_image = ( + f"python:{py_version}" # use full python image for package installation + ) + requirements_section = get_requirements_section( + launch_project, self._directory, builder_type + ) + # ----- stage 2: base ----- + python_base_setup = get_base_setup(launch_project, py_version, py_major) + + # set up user info + username, userid = get_docker_user(launch_project, launch_project.resource) + user_setup = get_user_setup(username, userid, launch_project.resource) + workdir = f"/home/{username}" + + assert entry_point is not None + entrypoint_section = get_entrypoint_setup(entry_point) + + dockerfile_contents = DOCKERFILE_TEMPLATE.format( + py_build_image=python_build_image, + requirements_section=requirements_section, + base_setup=python_base_setup, + uid=userid, + user_setup=user_setup, + workdir=workdir, + entrypoint_section=entrypoint_section, + ) + return dockerfile_contents + + def create_build_context(self, builder_type: str) -> Tuple[str, str]: + """Create the build context for the container image. + + Returns: + A pair of str: the path to the build context locally and the image + tag computed from the Dockerfile. + """ + entrypoint = ( + self._launch_project.get_job_entry_point() + or self._launch_project.override_entrypoint + ) + assert entrypoint is not None + assert entrypoint.name is not None + assert self._launch_project.project_dir is not None + + # we use that as the build context. + build_context_root_dir = self._launch_project.project_dir + job_build_context = self._launch_project.job_build_context + if job_build_context: + full_path = os.path.join(build_context_root_dir, job_build_context) + if not os.path.exists(full_path): + raise LaunchError(f"Build context does not exist at {full_path}") + build_context_root_dir = full_path + + # This is the case where the user specifies a Dockerfile to use. + # We use the directory containing the Dockerfile as the build context. + override_dockerfile = self._launch_project.override_dockerfile + if override_dockerfile: + full_path = os.path.join( + build_context_root_dir, + override_dockerfile, + ) + if not os.path.exists(full_path): + raise LaunchError(f"Dockerfile does not exist at {full_path}") + shutil.copytree( + build_context_root_dir, + self._directory, + symlinks=True, + dirs_exist_ok=True, + ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), + ) + shutil.copy( + full_path, + os.path.join(self._directory, _WANDB_DOCKERFILE_NAME), + ) + return self._directory, image_tag_from_dockerfile_and_source( + self._launch_project, open(full_path).read() + ) + + # If the job specifies a Dockerfile, we use that as the Dockerfile. + job_dockerfile = self._launch_project.job_dockerfile + if job_dockerfile: + dockerfile_path = os.path.join(build_context_root_dir, job_dockerfile) + if not os.path.exists(dockerfile_path): + raise LaunchError(f"Dockerfile does not exist at {dockerfile_path}") + shutil.copytree( + build_context_root_dir, + self._directory, + symlinks=True, + dirs_exist_ok=True, + ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), + ) + shutil.copy( + dockerfile_path, + os.path.join(self._directory, _WANDB_DOCKERFILE_NAME), + ) + return self._directory, image_tag_from_dockerfile_and_source( + self._launch_project, open(dockerfile_path).read() + ) + + # This is the case where we find Dockerfile.wandb adjacent to the + # entrypoint. We use the entrypoint directory as the build context. + entrypoint_dir = os.path.dirname(entrypoint.name) + if entrypoint_dir: + path = os.path.join( + build_context_root_dir, + entrypoint_dir, + _WANDB_DOCKERFILE_NAME, + ) + else: + path = os.path.join(build_context_root_dir, _WANDB_DOCKERFILE_NAME) + if os.path.exists( + path + ): # We found a Dockerfile.wandb adjacent to the entrypoint. + shutil.copytree( + os.path.dirname(path), + self._directory, + symlinks=True, + dirs_exist_ok=True, + ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), + ) + # TODO: remove this once we make things more explicit for users + if entrypoint_dir: + new_path = os.path.basename(entrypoint.name) + entrypoint = self._launch_project.get_job_entry_point() + if entrypoint is not None: + entrypoint.update_entrypoint_path(new_path) + with open(path) as f: + docker_file_contents = f.read() + return self._directory, image_tag_from_dockerfile_and_source( + self._launch_project, docker_file_contents + ) + + # This is the case where we use our own Dockerfile template. We move + # the user code into a src directory in the build context. + dst_path = os.path.join(self._directory, "src") + assert self._launch_project.project_dir is not None + shutil.copytree( + src=self._launch_project.project_dir, + dst=dst_path, + symlinks=True, + ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), + ) + shutil.copy( + os.path.join(os.path.dirname(__file__), "templates", "_wandb_bootstrap.py"), + os.path.join(self._directory), + ) + if self._launch_project.python_version: + runtime_path = os.path.join(dst_path, "runtime.txt") + with open(runtime_path, "w") as fp: + fp.write(f"python-{self._launch_project.python_version}") + + # TODO: we likely don't need to pass the whole git repo into the container + # with open(os.path.join(directory, ".dockerignore"), "w") as f: + # f.write("**/.git") + with open(os.path.join(self._directory, _WANDB_DOCKERFILE_NAME), "w") as handle: + docker_file_contents = self._generate_dockerfile(builder_type=builder_type) + handle.write(docker_file_contents) + image_tag = image_tag_from_dockerfile_and_source( + self._launch_project, docker_file_contents + ) + return self._directory, image_tag diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/kaniko_builder.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/kaniko_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..648dee815c8d80bbfb259864fa3081f49c401956 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/kaniko_builder.py @@ -0,0 +1,595 @@ +import asyncio +import base64 +import copy +import json +import logging +import os +import shutil +import tarfile +import tempfile +import time +import traceback +from typing import Any, Dict, Optional + +import wandb +from wandb.sdk.launch.agent.job_status_tracker import JobAndRunStatusTracker +from wandb.sdk.launch.builder.abstract import AbstractBuilder, registry_from_uri +from wandb.sdk.launch.environment.abstract import AbstractEnvironment +from wandb.sdk.launch.environment.azure_environment import AzureEnvironment +from wandb.sdk.launch.registry.abstract import AbstractRegistry +from wandb.sdk.launch.registry.azure_container_registry import AzureContainerRegistry +from wandb.sdk.launch.registry.elastic_container_registry import ( + ElasticContainerRegistry, +) +from wandb.sdk.launch.registry.google_artifact_registry import GoogleArtifactRegistry +from wandb.util import get_module + +from .._project_spec import EntryPoint, LaunchProject +from ..errors import LaunchError +from ..utils import ( + LOG_PREFIX, + get_kube_context_and_api_client, + warn_failed_packages_from_build_logs, +) +from .build import _WANDB_DOCKERFILE_NAME +from .context_manager import BuildContextManager + +get_module( + "kubernetes_asyncio", + required="Kaniko builder requires the kubernetes_asyncio package. Please install it with `pip install wandb[launch]`.", +) + +import kubernetes_asyncio as kubernetes # type: ignore # noqa: E402 +from kubernetes_asyncio import client # noqa: E402 + +_logger = logging.getLogger(__name__) + +_DEFAULT_BUILD_TIMEOUT_SECS = 1800 # 30 minute build timeout + +SERVICE_ACCOUNT_NAME = os.environ.get("WANDB_LAUNCH_SERVICE_ACCOUNT_NAME", "default") +PVC_NAME = os.environ.get("WANDB_LAUNCH_KANIKO_PVC_NAME") +PVC_MOUNT_PATH = ( + os.environ.get("WANDB_LAUNCH_KANIKO_PVC_MOUNT_PATH", "/kaniko").rstrip("/") + if PVC_NAME + else None +) +DOCKER_CONFIG_SECRET = os.environ.get("WANDB_LAUNCH_KANIKO_AUTH_SECRET") + + +if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/namespace"): + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace") as f: + NAMESPACE = f.read().strip() +else: + NAMESPACE = "wandb" + + +def get_pod_name_safe(job: client.V1Job): + try: + return job.spec.template.metadata.name + except AttributeError: + return None + + +async def _wait_for_completion( + batch_client: client.BatchV1Api, job_name: str, deadline_secs: Optional[int] = None +) -> bool: + start_time = time.time() + while True: + job = await batch_client.read_namespaced_job_status(job_name, NAMESPACE) + if job.status.succeeded is not None and job.status.succeeded >= 1: + return True + elif job.status.failed is not None and job.status.failed >= 1: + wandb.termerror(f"{LOG_PREFIX}Build job {job.status.failed} failed {job}") + return False + wandb.termlog(f"{LOG_PREFIX}Waiting for build job to complete...") + if deadline_secs is not None and time.time() - start_time > deadline_secs: + return False + + await asyncio.sleep(5) + + +class KanikoBuilder(AbstractBuilder): + """Builds a docker image for a project using Kaniko.""" + + type = "kaniko" + + build_job_name: str + build_context_store: str + secret_name: Optional[str] + secret_key: Optional[str] + image: str + + def __init__( + self, + environment: AbstractEnvironment, + registry: AbstractRegistry, + build_job_name: str = "wandb-launch-container-build", + build_context_store: str = "", + secret_name: str = "", + secret_key: str = "", + image: str = "gcr.io/kaniko-project/executor:v1.11.0", + config: Optional[dict] = None, + ): + """Initialize a KanikoBuilder. + + Arguments: + environment (AbstractEnvironment): The environment to use. + registry (AbstractRegistry): The registry to use. + build_job_name (str, optional): The name of the build job. + build_context_store (str, optional): The name of the build context store. + secret_name (str, optional): The name of the secret to use for the registry. + secret_key (str, optional): The key of the secret to use for the registry. + verify (bool, optional): Whether to verify the functionality of the builder. + Defaults to True. + """ + self.environment = environment + self.registry = registry + self.build_job_name = build_job_name + self.build_context_store = build_context_store.rstrip("/") + self.secret_name = secret_name + self.secret_key = secret_key + self.image = image + self.kaniko_config = config or {} + + @classmethod + def from_config( + cls, + config: dict, + environment: AbstractEnvironment, + registry: AbstractRegistry, + verify: bool = True, + login: bool = True, + ) -> "AbstractBuilder": + """Create a KanikoBuilder from a config dict. + + Arguments: + config: A dict containing the builder config. Must contain a "type" key + with value "kaniko". + environment: The environment to use for the build. + registry: The registry to use for the build. + verify: Whether to verify the builder config. + + Returns: + A KanikoBuilder instance. + """ + if config.get("type") != "kaniko": + raise LaunchError( + "Builder config must include 'type':'kaniko' to create a KanikoBuilder." + ) + build_context_store = config.get("build-context-store", "") + if build_context_store is None: + if not PVC_MOUNT_PATH: + raise LaunchError( + "You must specify a build context store for kaniko builds. " + "You can set builder.build-context-store in your agent config " + "to a valid s3, gcs, or azure blog storage URI. Or, configure " + "a persistent volume claim through the agent helm chart: " + "https://github.com/wandb/helm-charts/tree/main/charts/launch-agent" + ) + build_job_name = config.get("build-job-name", "wandb-launch-container-build") + secret_name = config.get("secret-name", "") + secret_key = config.get("secret-key", "") + kaniko_image = config.get( + "kaniko-image", "gcr.io/kaniko-project/executor:v1.11.0" + ) + image_uri = config.get("destination") + if image_uri is not None: + registry = registry_from_uri(image_uri) + kaniko_config = config.get("kaniko-config", {}) + + return cls( + environment, + registry, + build_context_store=build_context_store, + build_job_name=build_job_name, + secret_name=secret_name, + secret_key=secret_key, + image=kaniko_image, + config=kaniko_config, + ) + + async def verify(self) -> None: + """Verify that the builder config is valid. + + Raises: + LaunchError: If the builder config is invalid. + """ + if self.build_context_store: + await self.environment.verify_storage_uri(self.build_context_store) + + def login(self) -> None: + """Login to the registry.""" + pass + + async def _create_docker_ecr_config_map( + self, job_name: str, corev1_client: client.CoreV1Api, repository: str + ) -> None: + username, password = await self.registry.get_username_password() + encoded = base64.b64encode(f"{username}:{password}".encode()).decode("utf-8") + ecr_config_map = client.V1ConfigMap( + api_version="v1", + kind="ConfigMap", + metadata=client.V1ObjectMeta( + name=f"docker-config-{job_name}", + namespace=NAMESPACE, + ), + data={ + "config.json": json.dumps( + { + "auths": { + f"{await self.registry.get_repo_uri()}": {"auth": encoded} + } + } + ) + }, + immutable=True, + ) + await corev1_client.create_namespaced_config_map(NAMESPACE, ecr_config_map) + + async def _delete_docker_ecr_config_map( + self, job_name: str, client: client.CoreV1Api + ) -> None: + if self.secret_name: + await client.delete_namespaced_config_map( + f"docker-config-{job_name}", NAMESPACE + ) + + async def _upload_build_context(self, run_id: str, context_path: str) -> str: + # creat a tar archive of the build context and upload it to s3 + context_file = tempfile.NamedTemporaryFile(delete=False) + with tarfile.TarFile.open(fileobj=context_file, mode="w:gz") as context_tgz: + context_tgz.add(context_path, arcname=".") + context_file.close() + if PVC_MOUNT_PATH is None: + destination = f"{self.build_context_store}/{run_id}.tgz" + if self.environment is None: + raise LaunchError("No environment specified for Kaniko build.") + await self.environment.upload_file(context_file.name, destination) + return destination + else: + destination = f"{PVC_MOUNT_PATH}/{run_id}.tgz" + try: + shutil.copy(context_file.name, destination) + except Exception as e: + raise LaunchError( + f"Error copying build context to PVC mounted at {PVC_MOUNT_PATH}: {e}" + ) from e + return f"tar:///context/{run_id}.tgz" + + async def build_image( + self, + launch_project: LaunchProject, + entrypoint: EntryPoint, + job_tracker: Optional[JobAndRunStatusTracker] = None, + ) -> str: + await self.verify() + + build_contex_manager = BuildContextManager(launch_project=launch_project) + context_path, image_tag = build_contex_manager.create_build_context("kaniko") + run_id = launch_project.run_id + repo_uri = await self.registry.get_repo_uri() + image_uri = repo_uri + ":" + image_tag + + # The DOCKER_CONFIG_SECRET option is mutually exclusive with the + # registry classes, so we must skip the check for image existence in + # that case. + if not launch_project.build_required(): + if DOCKER_CONFIG_SECRET: + wandb.termlog( + f"Skipping check for existing image {image_uri} due to custom dockerconfig." + ) + else: + if await self.registry.check_image_exists(image_uri): + return image_uri + + _logger.info(f"Building image {image_uri}...") + _, api_client = await get_kube_context_and_api_client( + kubernetes, launch_project.resource_args + ) + # TODO: use same client as kubernetes_runner.py + batch_v1 = client.BatchV1Api(api_client) + core_v1 = client.CoreV1Api(api_client) + + build_job_name = f"{self.build_job_name}-{run_id}" + + build_context = await self._upload_build_context(run_id, context_path) + build_job = await self._create_kaniko_job( + build_job_name, repo_uri, image_uri, build_context, core_v1, api_client + ) + wandb.termlog(f"{LOG_PREFIX}Created kaniko job {build_job_name}") + + try: + # DOCKER_CONFIG_SECRET is a user provided dockerconfigjson. Skip our + # dockerconfig handling if it's set. + if ( + isinstance(self.registry, AzureContainerRegistry) + and not DOCKER_CONFIG_SECRET + ): + dockerfile_config_map = client.V1ConfigMap( + metadata=client.V1ObjectMeta( + name=f"docker-config-{build_job_name}" + ), + data={ + "config.json": json.dumps( + { + "credHelpers": { + f"{self.registry.registry_name}.azurecr.io": "acr-env" + } + } + ) + }, + ) + await core_v1.create_namespaced_config_map( + "wandb", dockerfile_config_map + ) + if self.secret_name: + await self._create_docker_ecr_config_map( + build_job_name, core_v1, repo_uri + ) + k8s_job = await batch_v1.create_namespaced_job(NAMESPACE, build_job) + # wait for double the job deadline since it might take time to schedule + if not await _wait_for_completion( + batch_v1, build_job_name, 3 * _DEFAULT_BUILD_TIMEOUT_SECS + ): + if job_tracker: + job_tracker.set_err_stage("build") + msg = f"Failed to build image in kaniko for job {run_id}." + pod_name = get_pod_name_safe(k8s_job) + if pod_name: + msg += f" View logs with `kubectl logs -n {NAMESPACE} {pod_name}`." + raise Exception(msg) + try: + pods_from_job = await core_v1.list_namespaced_pod( + namespace=NAMESPACE, label_selector=f"job-name={build_job_name}" + ) + if len(pods_from_job.items) != 1: + raise Exception( + f"Expected 1 pod for job {build_job_name}, found {len(pods_from_job.items)}" + ) + pod_name = pods_from_job.items[0].metadata.name + logs = await core_v1.read_namespaced_pod_log(pod_name, NAMESPACE) + warn_failed_packages_from_build_logs( + logs, image_uri, launch_project.api, job_tracker + ) + except Exception as e: + wandb.termwarn( + f"{LOG_PREFIX}Failed to get logs for kaniko job {build_job_name}: {e}" + ) + except Exception as e: + wandb.termerror( + f"{LOG_PREFIX}Exception when creating Kubernetes resources: {e}\n" + ) + raise e + finally: + wandb.termlog(f"{LOG_PREFIX}Cleaning up resources") + try: + if ( + isinstance(self.registry, AzureContainerRegistry) + and not DOCKER_CONFIG_SECRET + ): + await core_v1.delete_namespaced_config_map( + f"docker-config-{build_job_name}", "wandb" + ) + if self.secret_name: + await self._delete_docker_ecr_config_map(build_job_name, core_v1) + await batch_v1.delete_namespaced_job(build_job_name, NAMESPACE) + except Exception as e: + traceback.print_exc() + raise LaunchError( + f"Exception during Kubernetes resource clean up {e}" + ) from e + return image_uri + + async def _create_kaniko_job( + self, + job_name: str, + repository: str, + image_tag: str, + build_context_path: str, + core_client: client.CoreV1Api, + api_client, + ) -> Dict[str, Any]: + job = copy.deepcopy(self.kaniko_config) + job_metadata = job.get("metadata", {}) + job_labels = job_metadata.get("labels", {}) + job_spec = job.get("spec", {}) + pod_template = job_spec.get("template", {}) + pod_metadata = pod_template.get("metadata", {}) + pod_labels = pod_metadata.get("labels", {}) + pod_spec = pod_template.get("spec", {}) + volumes = pod_spec.get("volumes", []) + containers = pod_spec.get("containers") or [{}] + if len(containers) > 1: + raise LaunchError( + "Multiple container configs not supported for kaniko builder." + ) + container = containers[0] + volume_mounts = container.get("volumeMounts", []) + env = container.get("env", []) + custom_args = container.get("args", []) + + if PVC_MOUNT_PATH: + volumes.append( + {"name": "kaniko-pvc", "persistentVolumeClaim": {"claimName": PVC_NAME}} + ) + volume_mounts.append({"name": "kaniko-pvc", "mountPath": "/context"}) + + if bool(self.secret_name) != bool(self.secret_key): + raise LaunchError( + "Both secret_name and secret_key or neither must be specified " + "for kaniko build. You provided only one of them." + ) + if isinstance(self.registry, ElasticContainerRegistry): + env.append( + { + "name": "AWS_REGION", + "value": self.registry.region, + } + ) + # TODO(ben): Refactor all of this environment/registry + # specific stuff into methods of those classes. + if isinstance(self.environment, AzureEnvironment): + # Use the core api to check if the secret exists + try: + await core_client.read_namespaced_secret( + "azure-storage-access-key", + "wandb", + ) + except Exception as e: + raise LaunchError( + "Secret azure-storage-access-key does not exist in " + "namespace wandb. Please create it with the key password " + "set to your azure storage access key." + ) from e + env.append( + { + "name": "AZURE_STORAGE_ACCESS_KEY", + "valueFrom": { + "secretKeyRef": { + "name": "azure-storage-access-key", + "key": "password", + } + }, + } + ) + if DOCKER_CONFIG_SECRET: + volumes.append( + { + "name": "kaniko-docker-config", + "secret": { + "secretName": DOCKER_CONFIG_SECRET, + "items": [ + { + "key": ".dockerconfigjson", + "path": "config.json", + } + ], + }, + } + ) + volume_mounts.append( + {"name": "kaniko-docker-config", "mountPath": "/kaniko/.docker"} + ) + elif self.secret_name and self.secret_key: + volumes.append( + { + "name": "docker-config", + "configMap": {"name": f"docker-config-{job_name}"}, + } + ) + volume_mounts.append( + {"name": "docker-config", "mountPath": "/kaniko/.docker"} + ) + # TODO(ben): I don't like conditioning on the registry type here. As a + # future change I want the registry and environment classes to provide + # a list of environment variables and volume mounts that need to be + # added to the job. The environment class provides credentials for + # build context access, and the registry class provides credentials + # for pushing the image. This way we can have separate secrets for + # each and support build contexts and registries that require + # different credentials. + if isinstance(self.registry, ElasticContainerRegistry): + mount_path = "/root/.aws" + key = "credentials" + elif isinstance(self.registry, GoogleArtifactRegistry): + mount_path = "/kaniko/.config/gcloud" + key = "config.json" + env.append( + { + "name": "GOOGLE_APPLICATION_CREDENTIALS", + "value": "/kaniko/.config/gcloud/config.json", + } + ) + else: + wandb.termwarn( + f"{LOG_PREFIX}Automatic credential handling is not supported for registry type {type(self.registry)}. Build job: {self.build_job_name}" + ) + volumes.append( + { + "name": self.secret_name, + "secret": { + "secretName": self.secret_name, + "items": [{"key": self.secret_key, "path": key}], + }, + } + ) + volume_mounts.append( + { + "name": self.secret_name, + "mountPath": mount_path, + "readOnly": True, + } + ) + if ( + isinstance(self.registry, AzureContainerRegistry) + and not DOCKER_CONFIG_SECRET + ): + # Add the docker config map + volumes.append( + { + "name": "docker-config", + "configMap": {"name": f"docker-config-{job_name}"}, + } + ) + volume_mounts.append( + {"name": "docker-config", "mountPath": "/kaniko/.docker/"} + ) + # Kaniko doesn't want https:// at the beginning of the image tag. + destination = image_tag + if destination.startswith("https://"): + destination = destination.replace("https://", "") + args = { + "--context": build_context_path, + "--dockerfile": _WANDB_DOCKERFILE_NAME, + "--destination": destination, + "--cache": "true", + "--cache-repo": repository.replace("https://", ""), + "--snapshot-mode": "redo", + "--compressed-caching": "false", + } + for custom_arg in custom_args: + arg_name, arg_value = custom_arg.split("=", 1) + args[arg_name] = arg_value + parsed_args = [ + f"{arg_name}={arg_value}" for arg_name, arg_value in args.items() + ] + container["args"] = parsed_args + + # Apply the rest of our defaults + pod_labels["wandb"] = "launch" + # This annotation is required to enable azure workload identity. + # Don't add this label if using a docker config secret for auth. + if ( + isinstance(self.registry, AzureContainerRegistry) + and not DOCKER_CONFIG_SECRET + ): + pod_labels["azure.workload.identity/use"] = "true" + pod_spec["restartPolicy"] = pod_spec.get("restartPolicy", "Never") + pod_spec["activeDeadlineSeconds"] = pod_spec.get( + "activeDeadlineSeconds", _DEFAULT_BUILD_TIMEOUT_SECS + ) + pod_spec["serviceAccountName"] = pod_spec.get( + "serviceAccountName", SERVICE_ACCOUNT_NAME + ) + job_spec["backoffLimit"] = job_spec.get("backoffLimit", 0) + job_labels["wandb"] = "launch" + job_metadata["namespace"] = job_metadata.get("namespace", NAMESPACE) + job_metadata["name"] = job_metadata.get("name", job_name) + job["apiVersion"] = "batch/v1" + job["kind"] = "Job" + + # Apply all nested configs from the bottom up + pod_metadata["labels"] = pod_labels + pod_template["metadata"] = pod_metadata + container["name"] = container.get("name", "wandb-container-build") + container["image"] = container.get("image", self.image) + container["volumeMounts"] = volume_mounts + container["env"] = env + pod_spec["containers"] = [container] + pod_spec["volumes"] = volumes + pod_template["spec"] = pod_spec + job_spec["template"] = pod_template + job_metadata["labels"] = job_labels + job["metadata"] = job_metadata + job["spec"] = job_spec + return job diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/noop.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/noop.py new file mode 100644 index 0000000000000000000000000000000000000000..52a64cf5e17dab77bf1792a540421f99e22b5c55 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/noop.py @@ -0,0 +1,58 @@ +"""NoOp builder implementation.""" + +from typing import Any, Dict, Optional + +from wandb.sdk.launch.builder.abstract import AbstractBuilder +from wandb.sdk.launch.environment.abstract import AbstractEnvironment +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.registry.abstract import AbstractRegistry + +from .._project_spec import EntryPoint, LaunchProject +from ..agent.job_status_tracker import JobAndRunStatusTracker + + +class NoOpBuilder(AbstractBuilder): + """NoOp builder.""" + + type = "noop" + + def __init__( + self, + builder_config: Dict[str, Any], + environment: AbstractEnvironment, + registry: AbstractRegistry, + ) -> None: + """Initialize a NoOpBuilder.""" + self.environment = environment + self.registry = registry + + @classmethod + def from_config( + cls, + config: dict, + environment: AbstractEnvironment, + registry: AbstractRegistry, + verify: bool = True, + ) -> "AbstractBuilder": + """Create a noop builder from a config.""" + return cls(config, environment, registry) + + async def verify(self) -> None: + """Verify the builder.""" + raise LaunchError("Attempted to verify noop builder.") + + async def build_image( + self, + launch_project: LaunchProject, + entrypoint: EntryPoint, + job_tracker: Optional[JobAndRunStatusTracker] = None, + ) -> str: + """Build the image. + + For this we raise a launch error since it can't build. + """ + raise LaunchError( + "Attempted build with noop builder. Specify a builder in your launch config at ~/.config/wandb/launch-config.yaml.\n" + "Note: Jobs sourced from git repos and code artifacts require a builder, while jobs sourced from Docker images do not.\n" + "See https://docs.wandb.ai/guides/launch/create-job." + ) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/_wandb_bootstrap.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/_wandb_bootstrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49869ec2fc77dd3b4be8278e3080dcec76ab24f7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/__pycache__/_wandb_bootstrap.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/_wandb_bootstrap.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/_wandb_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..81e9197775943aa057f74cf2e07a876f1a9452e4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/builder/templates/_wandb_bootstrap.py @@ -0,0 +1,188 @@ +import json +import os +import re +import subprocess +import sys +from typing import List, Optional, Set + +FAILED_PACKAGES_PREFIX = "ERROR: Failed to install: " +FAILED_PACKAGES_POSTFIX = ". During automated build process." +ONLY_INCLUDE = {x for x in os.getenv("WANDB_ONLY_INCLUDE", "").split(",") if x != ""} +OPTS = [] +# If the builder doesn't support buildx no need to use the cache +if os.getenv("WANDB_DISABLE_CACHE"): + OPTS.append("--no-cache-dir") +# When installing all packages from requirements.frozen.txt no need to resolve deps +if len(ONLY_INCLUDE) == 0: + OPTS.append("--no-deps") +# When installing the intersection of requirements.frozen.txt and requirements.txt +# force the frozen versions +else: + OPTS.append("--force") + +TORCH_DEP_REGEX = r"torch(vision|audio)?==\d+\.\d+\.\d+(\+(?:cu[\d]{2,3})|(?:\+cpu))?" + + +def install_deps( + deps: List[str], + failed: Optional[Set[str]] = None, + extra_index: Optional[str] = None, + opts: Optional[List[str]] = None, +) -> Optional[Set[str]]: + """Install pip dependencies. + + Arguments: + deps {List[str]} -- List of dependencies to install + failed (set, None): The libraries that failed to install + + Returns: + deps (str[], None): The dependencies that failed to install + """ + try: + subprocess.check_output(["pip", "install", "uv"], stderr=subprocess.STDOUT) + # Include only uri if @ is present + clean_deps = [d.split("@")[-1].strip() if "@" in d else d for d in deps] + index_args = ["--extra-index-url", extra_index] if extra_index else [] + print("installing {}...".format(", ".join(clean_deps))) + opts = opts or [] + args = ["uv", "pip", "install"] + opts + clean_deps + index_args + sys.stdout.flush() + subprocess.check_output(args, stderr=subprocess.STDOUT) + return failed + except subprocess.CalledProcessError as e: + if failed is None: + failed = set() + num_failed = len(failed) + current_pkg = None + for line in e.output.decode("utf8").splitlines(): + # Since the name of the package might not be on the same line as + # the error msg, keep track of the currently installing package + current_pkg = get_current_package(line, clean_deps, current_pkg) + + if "error: subprocess-exited-with-error" in line: + if current_pkg is not None: + failed.add(current_pkg) + elif line.startswith("ERROR:"): + clean_dep = find_package_in_error_string(clean_deps, line) + if clean_dep is not None: + if clean_dep in deps: + failed.add(clean_dep) + else: + for d in deps: + if clean_dep in d: + failed.add(d.replace(" ", "")) + break + if len(set(clean_deps) - failed) == 0: + return failed + elif len(failed) > num_failed: + return install_deps( + list(set(clean_deps) - failed), + failed, + extra_index=extra_index, + opts=opts, + ) + else: + return failed + + +def main() -> None: + """Install deps in requirements.frozen.txt.""" + extra_index = None + torch_reqs = [] + if os.path.exists("requirements.frozen.txt"): + with open("requirements.frozen.txt") as f: + print("Installing frozen dependencies...") + reqs = [] + for req in f: + if ( + len(ONLY_INCLUDE) == 0 + or req in ONLY_INCLUDE + or req.split("=")[0].lower() in ONLY_INCLUDE + ): + # can't pip install wandb==0.*.*.dev1 through pip. Lets just install wandb for now + if req.startswith("wandb==") and "dev1" in req: + req = "wandb" + match = re.match( + TORCH_DEP_REGEX, + req, + ) + if match: + variant = match.group(2) + if variant: + extra_index = ( + f"https://download.pytorch.org/whl/{variant[1:]}" + ) + torch_reqs.append(req.strip().replace(" ", "")) + else: + reqs.append(req.strip().replace(" ", "")) + else: + print(f"Ignoring requirement: {req} from frozen requirements") + failed = install_deps(reqs, opts=OPTS) or set() + with open("_wandb_bootstrap_errors.json", "w") as f: + f.write(json.dumps({"pip": list(failed)})) + if len(failed) > 0: + sys.stderr.write( + FAILED_PACKAGES_PREFIX + ",".join(failed) + FAILED_PACKAGES_POSTFIX + ) + sys.stderr.flush() + install_deps(torch_reqs, extra_index=extra_index) + else: + print("No frozen requirements found") + + +def add_version_to_package_name(deps: List[str], package: str) -> Optional[str]: + """Add the associated version to a package name. + + For example: `my-package` -> `my-package==1.0.0` + """ + for dep in deps: + if dep.split("==")[0] == package: + return dep + return None + + +def get_current_package( + line: str, deps: List[str], current_pkg: Optional[str] +) -> Optional[str]: + """Tries to pull a package name from the line. + + Used to keep track of what the currently-installing package is, + in case an error message isn't on the same line as the package + """ + # "Collecting my-package==1.0.0" + if line.startswith("Collecting"): + return line.split(" ")[1] + # "Building wheel for my-package (pyproject.toml): finished with status 'error'" + elif line.strip().startswith("Building wheel") and line.strip().endswith( + "finished with status 'error'" + ): + return add_version_to_package_name(deps, line.strip().split(" ")[3]) + # "Running setup.py install for my-package: finished with status 'error'" + elif line.strip().startswith("Running setup.py install") and line.strip().endswith( + "finished with status 'error'" + ): + return add_version_to_package_name(deps, line.strip().split(" ")[4][:-1]) + return current_pkg + + +# hacky way to get the name of the requirement that failed +# attempt last word which is the name of the package often +# fall back to checking all words in the line for the package name +def find_package_in_error_string(deps: List[str], line: str) -> Optional[str]: + # if the last word in the error string is in the list of deps, return it + last_word = line.split(" ")[-1] + if last_word in deps: + return last_word + # if the last word is not in the list of deps, check all words + # TODO: this could report the wrong package if the error string + # contains a reference to another package in the deps + # before the package that failed to install + for word in line.split(" "): + if word.strip(",") in deps: + return word + # if we can't find the package, return None + return None + + +if __name__ == "__main__": + main() diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/manage.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/manage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..270b8dace280014ff27c2b0d1445c1ffe64faf2b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/manage.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/schema.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c8df0ac7b59fecd6950b5ceb9628712234bb20 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/__pycache__/schema.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/files.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/files.py new file mode 100644 index 0000000000000000000000000000000000000000..e0fb790a0e19eae4dea6974d5a3e878da3352fee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/files.py @@ -0,0 +1,148 @@ +import json +import os +from typing import Any, Dict + +import yaml + +from ..errors import LaunchError + +FILE_OVERRIDE_ENV_VAR = "WANDB_LAUNCH_FILE_OVERRIDES" + + +class FileOverrides: + """Singleton that read file overrides json from environment variables.""" + + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = object.__new__(cls) + cls._instance.overrides = {} + cls._instance.load() + return cls._instance + + def load(self) -> None: + """Load overrides from an environment variable.""" + overrides = os.environ.get(FILE_OVERRIDE_ENV_VAR) + if overrides is None: + if f"{FILE_OVERRIDE_ENV_VAR}_0" in os.environ: + overrides = "" + idx = 0 + while f"{FILE_OVERRIDE_ENV_VAR}_{idx}" in os.environ: + overrides += os.environ[f"{FILE_OVERRIDE_ENV_VAR}_{idx}"] + idx += 1 + if overrides: + try: + contents = json.loads(overrides) + if not isinstance(contents, dict): + raise LaunchError(f"Invalid JSON in {FILE_OVERRIDE_ENV_VAR}") + self.overrides = contents + except json.JSONDecodeError: + raise LaunchError(f"Invalid JSON in {FILE_OVERRIDE_ENV_VAR}") + + +def config_path_is_valid(path: str) -> None: + """Validate a config file path. + + This function checks if a given config file path is valid. A valid path + should meet the following criteria: + + - The path must be expressed as a relative path without any upwards path + traversal, e.g. `../config.json`. + - The file specified by the path must exist. + - The file must have a supported extension (`.json`, `.yaml`, or `.yml`). + + Args: + path (str): The path to validate. + + Raises: + LaunchError: If the path is not valid. + """ + if os.path.isabs(path): + raise LaunchError( + f"Invalid config path: {path}. Please provide a relative path." + ) + if ".." in path: + raise LaunchError( + f"Invalid config path: {path}. Please provide a relative path " + "without any upward path traversal, e.g. `../config.json`." + ) + path = os.path.normpath(path) + if not os.path.exists(path): + raise LaunchError(f"Invalid config path: {path}. File does not exist.") + if not any(path.endswith(ext) for ext in [".json", ".yaml", ".yml"]): + raise LaunchError( + f"Invalid config path: {path}. Only JSON and YAML files are supported." + ) + + +def override_file(path: str) -> None: + """Check for file overrides in the environment and apply them if found.""" + file_overrides = FileOverrides() + if path in file_overrides.overrides: + overrides = file_overrides.overrides.get(path) + if overrides is not None: + config = _read_config_file(path) + _update_dict(config, overrides) + _write_config_file(path, config) + + +def _write_config_file(path: str, config: Any) -> None: + """Write a config file to disk. + + Args: + path (str): The path to the config file. + config (Any): The contents of the config file as a Python object. + + Raises: + LaunchError: If the file extension is not supported. + """ + _, ext = os.path.splitext(path) + if ext == ".json": + with open(path, "w") as f: + json.dump(config, f, indent=2) + elif ext in [".yaml", ".yml"]: + with open(path, "w") as f: + yaml.safe_dump(config, f) + else: + raise LaunchError(f"Unsupported file extension: {ext}") + + +def _read_config_file(path: str) -> Any: + """Read a config file from disk. + + Args: + path (str): The path to the config file. + + Returns: + Any: The contents of the config file as a Python object. + """ + _, ext = os.path.splitext(path) + if ext == ".json": + with open( + path, + ) as f: + return json.load(f) + elif ext in [".yaml", ".yml"]: + with open( + path, + ) as f: + return yaml.safe_load(f) + else: + raise LaunchError(f"Unsupported file extension: {ext}") + + +def _update_dict(target: Dict, source: Dict) -> None: + """Update a dictionary with the contents of another dictionary. + + Args: + target (Dict): The dictionary to update. + source (Dict): The dictionary to update from. + """ + for key, value in source.items(): + if isinstance(value, dict): + if key not in target: + target[key] = {} + _update_dict(target[key], value) + else: + target[key] = value diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/internal.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/internal.py new file mode 100644 index 0000000000000000000000000000000000000000..9d52fedfcf3cac1cb7c5fc6d7183feb9db169c9a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/internal.py @@ -0,0 +1,315 @@ +"""The layer between launch sdk user code and the wandb internal process. + +If there is an active run this communication is done through the wandb run's +backend interface. + +If there is no active run, the messages are staged on the StagedLaunchInputs +singleton and sent when a run is created. +""" + +import os +import pathlib +import shutil +import tempfile +from typing import Any, Dict, List, Optional + +import wandb +import wandb.data_types +from wandb.sdk.launch.errors import LaunchError +from wandb.sdk.launch.inputs.schema import META_SCHEMA +from wandb.sdk.wandb_run import Run +from wandb.util import get_module + +from .files import config_path_is_valid, override_file + +PERIOD = "." +BACKSLASH = "\\" +LAUNCH_MANAGED_CONFIGS_DIR = "_wandb_configs" + + +class ConfigTmpDir: + """Singleton for managing temporary directories for configuration files. + + Any configuration files designated as inputs to a launch job are copied to + a temporary directory. This singleton manages the temporary directory and + provides paths to the configuration files. + """ + + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = object.__new__(cls) + return cls._instance + + def __init__(self): + if not hasattr(self, "_tmp_dir"): + self._tmp_dir = tempfile.mkdtemp() + self._configs_dir = os.path.join(self._tmp_dir, LAUNCH_MANAGED_CONFIGS_DIR) + os.mkdir(self._configs_dir) + + @property + def tmp_dir(self): + return pathlib.Path(self._tmp_dir) + + @property + def configs_dir(self): + return pathlib.Path(self._configs_dir) + + +class JobInputArguments: + """Arguments for the publish_job_input of Interface.""" + + def __init__( + self, + include: Optional[List[str]] = None, + exclude: Optional[List[str]] = None, + schema: Optional[dict] = None, + file_path: Optional[str] = None, + run_config: Optional[bool] = None, + ): + self.include = include + self.exclude = exclude + self.schema = schema + self.file_path = file_path + self.run_config = run_config + + +class StagedLaunchInputs: + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = object.__new__(cls) + return cls._instance + + def __init__(self) -> None: + if not hasattr(self, "_staged_inputs"): + self._staged_inputs: List[JobInputArguments] = [] + + def add_staged_input( + self, + input_arguments: JobInputArguments, + ): + self._staged_inputs.append(input_arguments) + + def apply(self, run: Run): + """Apply the staged inputs to the given run.""" + for input in self._staged_inputs: + _publish_job_input(input, run) + + +def _publish_job_input( + input: JobInputArguments, + run: Run, +) -> None: + """Publish a job input to the backend interface of the given run. + + Arguments: + input (JobInputArguments): The arguments for the job input. + run (Run): The run to publish the job input to. + """ + assert run._backend is not None + assert run._backend.interface is not None + assert input.run_config is not None + + interface = run._backend.interface + if input.file_path: + config_dir = ConfigTmpDir() + dest = os.path.join(config_dir.configs_dir, input.file_path) + run.save(dest, base_path=config_dir.tmp_dir) + interface.publish_job_input( + include_paths=[_split_on_unesc_dot(path) for path in input.include] + if input.include + else [], + exclude_paths=[_split_on_unesc_dot(path) for path in input.exclude] + if input.exclude + else [], + input_schema=input.schema, + run_config=input.run_config, + file_path=input.file_path or "", + ) + + +def _replace_refs_and_allofs(schema: dict, defs: Optional[dict]) -> dict: + """Recursively fix JSON schemas with common issues. + + 1. Replaces any instances of $ref with their associated definition in defs + 2. Removes any "allOf" lists that only have one item, "lifting" the item up + See test_internal.py for examples + """ + ret: Dict[str, Any] = {} + if "$ref" in schema and defs: + # Reference found, replace it with its definition + def_key = schema.pop("$ref").split("#/$defs/")[1] + # Also run recursive replacement in case a ref contains more refs + ret = _replace_refs_and_allofs(defs.pop(def_key), defs) + for key, val in schema.items(): + if isinstance(val, dict): + # Step into dicts recursively + new_val_dict = _replace_refs_and_allofs(val, defs) + ret[key] = new_val_dict + elif isinstance(val, list): + # Step into each item in the list + new_val_list = [] + for item in val: + if isinstance(item, dict): + new_val_list.append(_replace_refs_and_allofs(item, defs)) + else: + new_val_list.append(item) + # Lift up allOf blocks with only one item + if ( + key == "allOf" + and len(new_val_list) == 1 + and isinstance(new_val_list[0], dict) + ): + ret.update(new_val_list[0]) + else: + ret[key] = new_val_list + else: + # For anything else (str, int, etc) keep it as-is + ret[key] = val + return ret + + +def _validate_schema(schema: dict) -> None: + jsonschema = get_module( + "jsonschema", + required="Setting job schema requires the jsonschema package. Please install it with `pip install 'wandb[launch]'`.", + lazy=False, + ) + validator = jsonschema.Draft202012Validator(META_SCHEMA) + errs = sorted(validator.iter_errors(schema), key=str) + if errs: + wandb.termwarn(f"Schema includes unhandled or invalid configurations:\n{errs}") + + +def handle_config_file_input( + path: str, + include: Optional[List[str]] = None, + exclude: Optional[List[str]] = None, + schema: Optional[Any] = None, +): + """Declare an overridable configuration file for a launch job. + + The configuration file is copied to a temporary directory and the path to + the copy is sent to the backend interface of the active run and used to + configure the job builder. + + If there is no active run, the configuration file is staged and sent when a + run is created. + """ + config_path_is_valid(path) + override_file(path) + tmp_dir = ConfigTmpDir() + dest = os.path.join(tmp_dir.configs_dir, path) + dest_dir = os.path.dirname(dest) + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + shutil.copy( + path, + dest, + ) + if schema: + # This supports both an instance of a pydantic BaseModel class (e.g. schema=MySchema(...)) + # or the BaseModel class itself (e.g. schema=MySchema) + if hasattr(schema, "model_json_schema") and callable( + schema.model_json_schema # type: ignore + ): + schema = schema.model_json_schema() + if not isinstance(schema, dict): + raise LaunchError( + "schema must be a dict, Pydantic model instance, or Pydantic model class." + ) + defs = schema.pop("$defs", None) + schema = _replace_refs_and_allofs(schema, defs) + _validate_schema(schema) + arguments = JobInputArguments( + include=include, + exclude=exclude, + schema=schema, + file_path=path, + run_config=False, + ) + if wandb.run is not None: + _publish_job_input(arguments, wandb.run) + else: + staged_inputs = StagedLaunchInputs() + staged_inputs.add_staged_input(arguments) + + +def handle_run_config_input( + include: Optional[List[str]] = None, + exclude: Optional[List[str]] = None, + schema: Optional[Any] = None, +): + """Declare wandb.config as an overridable configuration for a launch job. + + The include and exclude paths are sent to the backend interface of the + active run and used to configure the job builder. + + If there is no active run, the include and exclude paths are staged and sent + when a run is created. + """ + if schema: + # This supports both an instance of a pydantic BaseModel class (e.g. schema=MySchema(...)) + # or the BaseModel class itself (e.g. schema=MySchema) + if hasattr(schema, "model_json_schema") and callable( + schema.model_json_schema # type: ignore + ): + schema = schema.model_json_schema() + if not isinstance(schema, dict): + raise LaunchError( + "schema must be a dict, Pydantic model instance, or Pydantic model class." + ) + defs = schema.pop("$defs", None) + schema = _replace_refs_and_allofs(schema, defs) + _validate_schema(schema) + arguments = JobInputArguments( + include=include, + exclude=exclude, + schema=schema, + run_config=True, + file_path=None, + ) + if wandb.run is not None: + _publish_job_input(arguments, wandb.run) + else: + stage_inputs = StagedLaunchInputs() + stage_inputs.add_staged_input(arguments) + + +def _split_on_unesc_dot(path: str) -> List[str]: + r"""Split a string on unescaped dots. + + Arguments: + path (str): The string to split. + + Raises: + ValueError: If the path has a trailing escape character. + + Returns: + List[str]: The split string. + """ + parts = [] + part = "" + i = 0 + while i < len(path): + if path[i] == BACKSLASH: + if i == len(path) - 1: + raise LaunchError( + f"Invalid config path {path}: trailing {BACKSLASH}.", + ) + if path[i + 1] == PERIOD: + part += PERIOD + i += 2 + elif path[i] == PERIOD: + parts.append(part) + part = "" + i += 1 + else: + part += path[i] + i += 1 + if part: + parts.append(part) + return parts diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/manage.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/manage.py new file mode 100644 index 0000000000000000000000000000000000000000..91104eeae4e17dfdbc5fbc8f3e2f016873abffd2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/manage.py @@ -0,0 +1,113 @@ +"""Functions for declaring overridable configuration for launch jobs.""" + +from typing import Any, List, Optional + + +def manage_config_file( + path: str, + include: Optional[List[str]] = None, + exclude: Optional[List[str]] = None, + schema: Optional[Any] = None, +): + r"""Declare an overridable configuration file for a launch job. + + If a new job version is created from the active run, the configuration file + will be added to the job's inputs. If the job is launched and overrides + have been provided for the configuration file, this function will detect + the overrides from the environment and update the configuration file on disk. + Note that these overrides will only be applied in ephemeral containers. + `include` and `exclude` are lists of dot separated paths with the config. + The paths are used to filter subtrees of the configuration file out of the + job's inputs. + + For example, given the following configuration file: + ```yaml + model: + name: resnet + layers: 18 + training: + epochs: 10 + batch_size: 32 + ``` + + Passing `include=['model']` will only include the `model` subtree in the + job's inputs. Passing `exclude=['model.layers']` will exclude the `layers` + key from the `model` subtree. Note that `exclude` takes precedence over + `include`. + + `.` is used as a separator for nested keys. If a key contains a `.`, it + should be escaped with a backslash, e.g. `include=[r'model\.layers']`. Note + the use of `r` to denote a raw string when using escape chars. + + Args: + path (str): The path to the configuration file. This path must be + relative and must not contain backwards traversal, i.e. `..`. + include (List[str]): A list of keys to include in the configuration file. + exclude (List[str]): A list of keys to exclude from the configuration file. + schema (dict | Pydantic model): A JSON Schema or Pydantic model describing + describing which attributes will be editable from the Launch drawer. + Accepts both an instance of a Pydantic BaseModel class or the BaseModel + class itself. + + Raises: + LaunchError: If the path is not valid, or if there is no active run. + """ + # note: schema's Any type is because in the case where a BaseModel class is + # provided, its type is a pydantic internal type that we don't want our typing + # to depend on. schema's type should be considered + # "Optional[dict | ]" + from .internal import handle_config_file_input + + return handle_config_file_input(path, include, exclude, schema) + + +def manage_wandb_config( + include: Optional[List[str]] = None, + exclude: Optional[List[str]] = None, + schema: Optional[Any] = None, +): + r"""Declare wandb.config as an overridable configuration for a launch job. + + If a new job version is created from the active run, the run config + (wandb.config) will become an overridable input of the job. If the job is + launched and overrides have been provided for the run config, the overrides + will be applied to the run config when `wandb.init` is called. + `include` and `exclude` are lists of dot separated paths with the config. + The paths are used to filter subtrees of the configuration file out of the + job's inputs. + + For example, given the following run config contents: + ```yaml + model: + name: resnet + layers: 18 + training: + epochs: 10 + batch_size: 32 + ``` + Passing `include=['model']` will only include the `model` subtree in the + job's inputs. Passing `exclude=['model.layers']` will exclude the `layers` + key from the `model` subtree. Note that `exclude` takes precedence over + `include`. + `.` is used as a separator for nested keys. If a key contains a `.`, it + should be escaped with a backslash, e.g. `include=[r'model\.layers']`. Note + the use of `r` to denote a raw string when using escape chars. + + Args: + include (List[str]): A list of subtrees to include in the configuration. + exclude (List[str]): A list of subtrees to exclude from the configuration. + schema (dict | Pydantic model): A JSON Schema or Pydantic model describing + describing which attributes will be editable from the Launch drawer. + Accepts both an instance of a Pydantic BaseModel class or the BaseModel + class itself. + + Raises: + LaunchError: If there is no active run. + """ + # note: schema's Any type is because in the case where a BaseModel class is + # provided, its type is a pydantic internal type that we don't want our typing + # to depend on. schema's type should be considered + # "Optional[dict | ]" + from .internal import handle_run_config_input + + handle_run_config_input(include, exclude, schema) diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/schema.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..519eb76275796d738ff7f32c6279e85ded8ae635 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/inputs/schema.py @@ -0,0 +1,39 @@ +META_SCHEMA = { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["boolean", "integer", "number", "string", "object"], + }, + "title": {"type": "string"}, + "description": {"type": "string"}, + "enum": {"type": "array", "items": {"type": ["integer", "number", "string"]}}, + "properties": {"type": "object", "patternProperties": {".*": {"$ref": "#"}}}, + "allOf": {"type": "array", "items": {"$ref": "#"}}, + }, + "allOf": [ + { + "if": {"properties": {"type": {"const": "number"}}}, + "then": { + "properties": { + "minimum": {"type": ["integer", "number"]}, + "maximum": {"type": ["integer", "number"]}, + "exclusiveMinimum": {"type": ["integer", "number"]}, + "exclusiveMaximum": {"type": ["integer", "number"]}, + } + }, + }, + { + "if": {"properties": {"type": {"const": "integer"}}}, + "then": { + "properties": { + "minimum": {"type": "integer"}, + "maximum": {"type": "integer"}, + "exclusiveMinimum": {"type": "integer"}, + "exclusiveMaximum": {"type": "integer"}, + } + }, + }, + ], + "unevaluatedProperties": False, +} diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a0d0558b5c92232872cddae76a9587eb22a56ea Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/abstract.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/abstract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e004dfaac79efbf4e15bab2565c8cc31ce937ad3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/abstract.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/kubernetes_monitor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/kubernetes_monitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..484b35de95ea55e76a42b9fdc6f223cd6548c4be Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/kubernetes_monitor.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/kubernetes_runner.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/kubernetes_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63359b30d2963ad457fdbaaa106e9f1d52f7fc14 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/kubernetes_runner.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/local_container.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/local_container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae7f2b73ad7765bf8a8d86fef0c1798e6edd1e42 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/local_container.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/local_process.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/local_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..425853d4f8e25397c68343ecd5532cbec0e0471d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/local_process.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/sagemaker_runner.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/sagemaker_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ff49fbccb083e2a23d0ae1e48c1e53922e407b8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/sagemaker_runner.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/vertex_runner.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/vertex_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cf68f1efcec5c30fa54f115da77694c983295f4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/__pycache__/vertex_runner.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_process.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_process.py new file mode 100644 index 0000000000000000000000000000000000000000..5b04361dc2501862f93eea3b8ef7c68807dcbf9f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/local_process.py @@ -0,0 +1,78 @@ +import logging +import shlex +from typing import Any, List, Optional + +import wandb + +from .._project_spec import LaunchProject +from ..errors import LaunchError +from ..utils import ( + LOG_PREFIX, + MAX_ENV_LENGTHS, + PROJECT_SYNCHRONOUS, + sanitize_wandb_api_key, + validate_wandb_python_deps, +) +from .abstract import AbstractRun, AbstractRunner +from .local_container import _run_entry_point + +_logger = logging.getLogger(__name__) + + +class LocalProcessRunner(AbstractRunner): + """Runner class, uses a project to create a LocallySubmittedRun. + + LocalProcessRunner is very similar to a LocalContainerRunner, except it does not + run the command inside a docker container. Instead, it runs the + command specified as a process directly on the bare metal machine. + + """ + + async def run( # type: ignore + self, + launch_project: LaunchProject, + *args, + **kwargs, + ) -> Optional[AbstractRun]: + if args is not None: + _msg = f"{LOG_PREFIX}LocalProcessRunner.run received unused args {args}" + _logger.warning(_msg) + if kwargs is not None: + _msg = f"{LOG_PREFIX}LocalProcessRunner.run received unused kwargs {kwargs}" + _logger.warning(_msg) + + synchronous: bool = self.backend_config[PROJECT_SYNCHRONOUS] + entry_point = ( + launch_project.override_entrypoint or launch_project.get_job_entry_point() + ) + + cmd: List[Any] = [] + + if launch_project.project_dir is None: + raise LaunchError("Launch LocalProcessRunner received empty project dir") + + if launch_project.job: + assert launch_project._job_artifact is not None + try: + validate_wandb_python_deps( + "requirements.frozen.txt", + launch_project.project_dir, + ) + except Exception: + wandb.termwarn("Unable to validate python dependencies") + env_vars = launch_project.get_env_vars_dict( + self._api, MAX_ENV_LENGTHS[self.__class__.__name__] + ) + for env_key, env_value in env_vars.items(): + cmd += [f"{shlex.quote(env_key)}={shlex.quote(env_value)}"] + if entry_point is not None: + cmd += entry_point.command + cmd += launch_project.override_args + + command_str = " ".join(cmd).strip() + _msg = f"{LOG_PREFIX}Launching run as a local-process with command {sanitize_wandb_api_key(command_str)}" + wandb.termlog(_msg) + run = _run_entry_point(command_str, launch_project.project_dir) + if synchronous: + await run.wait() + return run diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/sagemaker_runner.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/sagemaker_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..e77a4fcf423fdd1a2180b5bfa3f19900694a84b7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/sagemaker_runner.py @@ -0,0 +1,426 @@ +"""Implementation of the SageMakerRunner class.""" + +import asyncio +import logging +from typing import Any, Dict, List, Optional, cast + +if False: + import boto3 # type: ignore + +import wandb +from wandb.apis.internal import Api +from wandb.sdk.launch.environment.aws_environment import AwsEnvironment +from wandb.sdk.launch.errors import LaunchError + +from .._project_spec import EntryPoint, LaunchProject +from ..registry.abstract import AbstractRegistry +from ..utils import ( + LOG_PREFIX, + MAX_ENV_LENGTHS, + PROJECT_SYNCHRONOUS, + event_loop_thread_exec, + to_camel_case, +) +from .abstract import AbstractRun, AbstractRunner, Status + +_logger = logging.getLogger(__name__) + + +class SagemakerSubmittedRun(AbstractRun): + """Instance of ``AbstractRun`` corresponding to a subprocess launched to run an entry point command on aws sagemaker.""" + + def __init__( + self, + training_job_name: str, + client: "boto3.Client", + log_client: Optional["boto3.Client"] = None, + ) -> None: + super().__init__() + self.client = client + self.log_client = log_client + self.training_job_name = training_job_name + self._status = Status("running") + + @property + def id(self) -> str: + return f"sagemaker-{self.training_job_name}" + + async def get_logs(self) -> Optional[str]: + if self.log_client is None: + return None + try: + describe_log_streams = event_loop_thread_exec( + self.log_client.describe_log_streams + ) + describe_res = await describe_log_streams( + logGroupName="/aws/sagemaker/TrainingJobs", + logStreamNamePrefix=self.training_job_name, + ) + if len(describe_res["logStreams"]) == 0: + wandb.termwarn( + f"Failed to get logs for training job: {self.training_job_name}" + ) + return None + log_name = describe_res["logStreams"][0]["logStreamName"] + get_log_events = event_loop_thread_exec(self.log_client.get_log_events) + res = await get_log_events( + logGroupName="/aws/sagemaker/TrainingJobs", + logStreamName=log_name, + ) + assert "events" in res + return "\n".join( + [f'{event["timestamp"]}:{event["message"]}' for event in res["events"]] + ) + except self.log_client.exceptions.ResourceNotFoundException: + wandb.termwarn( + f"Failed to get logs for training job: {self.training_job_name}" + ) + return None + except Exception as e: + wandb.termwarn( + f"Failed to handle logs for training job: {self.training_job_name} with error {str(e)}" + ) + return None + + async def wait(self) -> bool: + while True: + status_state = (await self.get_status()).state + wandb.termlog( + f"{LOG_PREFIX}Training job {self.training_job_name} status: {status_state}" + ) + if status_state in ["stopped", "failed", "finished"]: + break + await asyncio.sleep(5) + return status_state == "finished" + + async def cancel(self) -> None: + # Interrupt child process if it hasn't already exited + status = await self.get_status() + if status.state == "running": + self.client.stop_training_job(TrainingJobName=self.training_job_name) + await self.wait() + + async def get_status(self) -> Status: + describe_training_job = event_loop_thread_exec( + self.client.describe_training_job + ) + job_status = ( + await describe_training_job(TrainingJobName=self.training_job_name) + )["TrainingJobStatus"] + if job_status == "Completed" or job_status == "Stopped": + self._status = Status("finished") + elif job_status == "Failed": + self._status = Status("failed") + elif job_status == "Stopping": + self._status = Status("stopping") + elif job_status == "InProgress": + self._status = Status("running") + return self._status + + +class SageMakerRunner(AbstractRunner): + """Runner class, uses a project to create a SagemakerSubmittedRun.""" + + def __init__( + self, + api: Api, + backend_config: Dict[str, Any], + environment: AwsEnvironment, + registry: AbstractRegistry, + ) -> None: + """Initialize the SagemakerRunner. + + Arguments: + api (Api): The API instance. + backend_config (Dict[str, Any]): The backend configuration. + environment (AwsEnvironment): The AWS environment. + + Raises: + LaunchError: If the runner cannot be initialized. + """ + super().__init__(api, backend_config) + self.environment = environment + self.registry = registry + + async def run( + self, + launch_project: LaunchProject, + image_uri: str, + ) -> Optional[AbstractRun]: + """Run a project on Amazon Sagemaker. + + Arguments: + launch_project (LaunchProject): The project to run. + + Returns: + Optional[AbstractRun]: The run instance. + + Raises: + LaunchError: If the launch is unsuccessful. + """ + _logger.info("using AWSSagemakerRunner") + + given_sagemaker_args = launch_project.resource_args.get("sagemaker") + if given_sagemaker_args is None: + raise LaunchError( + "No sagemaker args specified. Specify sagemaker args in resource_args" + ) + + default_output_path = self.backend_config.get("runner", {}).get( + "s3_output_path" + ) + if default_output_path is not None and not default_output_path.startswith( + "s3://" + ): + default_output_path = f"s3://{default_output_path}" + + session = await self.environment.get_session() + client = await event_loop_thread_exec(session.client)("sts") + caller_id = client.get_caller_identity() + account_id = caller_id["Account"] + _logger.info(f"Using account ID {account_id}") + partition = await self.environment.get_partition() + role_arn = get_role_arn( + given_sagemaker_args, self.backend_config, account_id, partition + ) + + # Create a sagemaker client to launch the job. + sagemaker_client = session.client("sagemaker") + log_client = None + try: + log_client = session.client("logs") + except Exception as e: + wandb.termwarn( + f"Failed to connect to cloudwatch logs with error {str(e)}, logs will not be available" + ) + + # if the user provided the image they want to use, use that, but warn it won't have swappable artifacts + if ( + given_sagemaker_args.get("AlgorithmSpecification", {}).get("TrainingImage") + is not None + ): + sagemaker_args = build_sagemaker_args( + launch_project, + self._api, + role_arn, + launch_project.override_entrypoint, + launch_project.override_args, + MAX_ENV_LENGTHS[self.__class__.__name__], + given_sagemaker_args.get("AlgorithmSpecification", {}).get( + "TrainingImage" + ), + default_output_path, + ) + _logger.info( + f"Launching sagemaker job on user supplied image with args: {sagemaker_args}" + ) + run = await launch_sagemaker_job( + launch_project, sagemaker_args, sagemaker_client, log_client + ) + if self.backend_config[PROJECT_SYNCHRONOUS]: + await run.wait() + return run + + _logger.info("Connecting to sagemaker client") + entry_point = ( + launch_project.override_entrypoint or launch_project.get_job_entry_point() + ) + command_args = [] + if entry_point is not None: + command_args += entry_point.command + command_args += launch_project.override_args + if command_args: + command_str = " ".join(command_args) + wandb.termlog( + f"{LOG_PREFIX}Launching run on sagemaker with entrypoint: {command_str}" + ) + else: + wandb.termlog( + f"{LOG_PREFIX}Launching run on sagemaker with user-provided entrypoint in image" + ) + sagemaker_args = build_sagemaker_args( + launch_project, + self._api, + role_arn, + entry_point, + launch_project.override_args, + MAX_ENV_LENGTHS[self.__class__.__name__], + image_uri, + default_output_path, + ) + _logger.info(f"Launching sagemaker job with args: {sagemaker_args}") + run = await launch_sagemaker_job( + launch_project, sagemaker_args, sagemaker_client, log_client + ) + if self.backend_config[PROJECT_SYNCHRONOUS]: + await run.wait() + return run + + +def merge_image_uri_with_algorithm_specification( + algorithm_specification: Optional[Dict[str, Any]], + image_uri: Optional[str], + entrypoint_command: List[str], + args: Optional[List[str]], +) -> Dict[str, Any]: + """Create an AWS AlgorithmSpecification. + + AWS Sagemaker algorithms require a training image and an input mode. If the user + does not specify the specification themselves, define the spec minimally using these + two fields. Otherwise, if they specify the AlgorithmSpecification set the training + image if it is not set. + """ + if algorithm_specification is None: + algorithm_specification = { + "TrainingImage": image_uri, + "TrainingInputMode": "File", + } + else: + if image_uri: + algorithm_specification["TrainingImage"] = image_uri + if entrypoint_command: + algorithm_specification["ContainerEntrypoint"] = entrypoint_command + if args: + algorithm_specification["ContainerArguments"] = args + + if algorithm_specification["TrainingImage"] is None: + raise LaunchError("Failed determine tag for training image") + return algorithm_specification + + +def build_sagemaker_args( + launch_project: LaunchProject, + api: Api, + role_arn: str, + entry_point: Optional[EntryPoint], + args: Optional[List[str]], + max_env_length: int, + image_uri: str, + default_output_path: Optional[str] = None, +) -> Dict[str, Any]: + sagemaker_args: Dict[str, Any] = {} + resource_args = launch_project.fill_macros(image_uri) + given_sagemaker_args: Optional[Dict[str, Any]] = resource_args.get("sagemaker") + + if given_sagemaker_args is None: + raise LaunchError( + "No sagemaker args specified. Specify sagemaker args in resource_args" + ) + if ( + given_sagemaker_args.get("OutputDataConfig") is None + and default_output_path is not None + ): + sagemaker_args["OutputDataConfig"] = {"S3OutputPath": default_output_path} + else: + sagemaker_args["OutputDataConfig"] = given_sagemaker_args.get( + "OutputDataConfig" + ) + + if sagemaker_args.get("OutputDataConfig") is None: + raise LaunchError( + "Sagemaker launcher requires an OutputDataConfig Sagemaker resource argument" + ) + training_job_name = cast( + str, (given_sagemaker_args.get("TrainingJobName") or launch_project.run_id) + ) + sagemaker_args["TrainingJobName"] = training_job_name + entry_cmd = entry_point.command if entry_point else [] + + sagemaker_args["AlgorithmSpecification"] = ( + merge_image_uri_with_algorithm_specification( + given_sagemaker_args.get( + "AlgorithmSpecification", + given_sagemaker_args.get("algorithm_specification"), + ), + image_uri, + entry_cmd, + args, + ) + ) + + sagemaker_args["RoleArn"] = role_arn + + camel_case_args = { + to_camel_case(key): item for key, item in given_sagemaker_args.items() + } + sagemaker_args = { + **camel_case_args, + **sagemaker_args, + } + + if sagemaker_args.get("ResourceConfig") is None: + raise LaunchError( + "Sagemaker launcher requires a ResourceConfig resource argument" + ) + + if sagemaker_args.get("StoppingCondition") is None: + raise LaunchError( + "Sagemaker launcher requires a StoppingCondition resource argument" + ) + + given_env = given_sagemaker_args.get( + "Environment", sagemaker_args.get("environment", {}) + ) + calced_env = launch_project.get_env_vars_dict(api, max_env_length) + total_env = {**calced_env, **given_env} + sagemaker_args["Environment"] = total_env + + # Add wandb tag + tags = sagemaker_args.get("Tags", []) + tags.append({"Key": "WandbRunId", "Value": launch_project.run_id}) + sagemaker_args["Tags"] = tags + + # remove args that were passed in for launch but not passed to sagemaker + sagemaker_args.pop("EcrRepoName", None) + sagemaker_args.pop("region", None) + sagemaker_args.pop("profile", None) + + # clear the args that are None so they are not passed + filtered_args = {k: v for k, v in sagemaker_args.items() if v is not None} + + return filtered_args + + +async def launch_sagemaker_job( + launch_project: LaunchProject, + sagemaker_args: Dict[str, Any], + sagemaker_client: "boto3.Client", + log_client: Optional["boto3.Client"] = None, +) -> SagemakerSubmittedRun: + training_job_name = sagemaker_args.get("TrainingJobName") or launch_project.run_id + create_training_job = event_loop_thread_exec(sagemaker_client.create_training_job) + resp = await create_training_job(**sagemaker_args) + + if resp.get("TrainingJobArn") is None: + raise LaunchError("Failed to create training job when submitting to SageMaker") + + run = SagemakerSubmittedRun(training_job_name, sagemaker_client, log_client) + wandb.termlog( + f"{LOG_PREFIX}Run job submitted with arn: {resp.get('TrainingJobArn')}" + ) + url = "https://{region}.console.aws.amazon.com/sagemaker/home?region={region}#/jobs/{job_name}".format( + region=sagemaker_client.meta.region_name, job_name=training_job_name + ) + wandb.termlog(f"{LOG_PREFIX}See training job status at: {url}") + return run + + +def get_role_arn( + sagemaker_args: Dict[str, Any], + backend_config: Dict[str, Any], + account_id: str, + partition: str, +) -> str: + """Get the role arn from the sagemaker args or the backend config.""" + role_arn = sagemaker_args.get("RoleArn") or sagemaker_args.get("role_arn") + if role_arn is None: + role_arn = backend_config.get("runner", {}).get("role_arn") + if role_arn is None or not isinstance(role_arn, str): + raise LaunchError( + "AWS sagemaker require a string RoleArn set this by adding a `RoleArn` key to the sagemaker" + "field of resource_args" + ) + if role_arn.startswith(f"arn:{partition}:iam::"): + return role_arn # type: ignore + + return f"arn:{partition}:iam::{account_id}:role/{role_arn}" diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/vertex_runner.py b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/vertex_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a1d7718bcf68ec3ac3afece06755d2e8e12304 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/runner/vertex_runner.py @@ -0,0 +1,230 @@ +import asyncio +import logging +from typing import Any, Dict, Optional + +if False: + from google.cloud import aiplatform # type: ignore # noqa: F401 + +from wandb.apis.internal import Api +from wandb.util import get_module + +from .._project_spec import LaunchProject +from ..environment.gcp_environment import GcpEnvironment +from ..errors import LaunchError +from ..registry.abstract import AbstractRegistry +from ..utils import MAX_ENV_LENGTHS, PROJECT_SYNCHRONOUS, event_loop_thread_exec +from .abstract import AbstractRun, AbstractRunner, Status + +GCP_CONSOLE_URI = "https://console.cloud.google.com" + +_logger = logging.getLogger(__name__) + + +WANDB_RUN_ID_KEY = "wandb-run-id" + + +class VertexSubmittedRun(AbstractRun): + def __init__(self, job: Any) -> None: + self._job = job + + @property + def id(self) -> str: + # numeric ID of the custom training job + return self._job.name # type: ignore + + async def get_logs(self) -> Optional[str]: + # TODO: implement + return None + + @property + def name(self) -> str: + return self._job.display_name # type: ignore + + @property + def gcp_region(self) -> str: + return self._job.location # type: ignore + + @property + def gcp_project(self) -> str: + return self._job.project # type: ignore + + def get_page_link(self) -> str: + return "{console_uri}/vertex-ai/locations/{region}/training/{job_id}?project={project}".format( + console_uri=GCP_CONSOLE_URI, + region=self.gcp_region, + job_id=self.id, + project=self.gcp_project, + ) + + async def wait(self) -> bool: + # TODO: run this in a separate thread. + await self._job.wait() + return (await self.get_status()).state == "finished" + + async def get_status(self) -> Status: + job_state = str(self._job.state) # extract from type PipelineState + if job_state == "JobState.JOB_STATE_SUCCEEDED": + return Status("finished") + if job_state == "JobState.JOB_STATE_FAILED": + return Status("failed") + if job_state == "JobState.JOB_STATE_RUNNING": + return Status("running") + if job_state == "JobState.JOB_STATE_PENDING": + return Status("starting") + return Status("unknown") + + async def cancel(self) -> None: + self._job.cancel() + + +class VertexRunner(AbstractRunner): + """Runner class, uses a project to create a VertexSubmittedRun.""" + + def __init__( + self, + api: Api, + backend_config: Dict[str, Any], + environment: GcpEnvironment, + registry: AbstractRegistry, + ) -> None: + """Initialize a VertexRunner instance.""" + super().__init__(api, backend_config) + self.environment = environment + self.registry = registry + + async def run( + self, launch_project: LaunchProject, image_uri: str + ) -> Optional[AbstractRun]: + """Run a Vertex job.""" + full_resource_args = launch_project.fill_macros(image_uri) + resource_args = full_resource_args.get("vertex") + # We support setting under gcp-vertex for historical reasons. + if not resource_args: + resource_args = full_resource_args.get("gcp-vertex") + if not resource_args: + raise LaunchError( + "No Vertex resource args specified. Specify args via --resource-args with a JSON file or string under top-level key gcp_vertex" + ) + + spec_args = resource_args.get("spec", {}) + run_args = resource_args.get("run", {}) + + synchronous: bool = self.backend_config[PROJECT_SYNCHRONOUS] + + entry_point = ( + launch_project.override_entrypoint or launch_project.get_job_entry_point() + ) + + # TODO: Set entrypoint in each container + entry_cmd = [] + if entry_point is not None: + entry_cmd += entry_point.command + entry_cmd += launch_project.override_args + + env_vars = launch_project.get_env_vars_dict( + api=self._api, + max_env_length=MAX_ENV_LENGTHS[self.__class__.__name__], + ) + + worker_specs = spec_args.get("worker_pool_specs", []) + if not worker_specs: + raise LaunchError( + "Vertex requires at least one worker pool spec. Please specify " + "a worker pool spec in resource arguments under the key " + "`vertex.spec.worker_pool_specs`." + ) + + # TODO: Add entrypoint + args to each worker pool spec + for spec in worker_specs: + if not spec.get("container_spec"): + raise LaunchError( + "Vertex requires a container spec for each worker pool spec. " + "Please specify a container spec in resource arguments under " + "the key `vertex.spec.worker_pool_specs[].container_spec`." + ) + spec["container_spec"]["command"] = entry_cmd + + # Add our env vars to user supplied env vars + env = spec["container_spec"].get("env", []) + env.extend( + [{"name": key, "value": value} for key, value in env_vars.items()] + ) + spec["container_spec"]["env"] = env + + if not spec_args.get("staging_bucket"): + raise LaunchError( + "Vertex requires a staging bucket. Please specify a staging bucket " + "in resource arguments under the key `vertex.spec.staging_bucket`." + ) + + _logger.info("Launching Vertex job...") + submitted_run = await launch_vertex_job( + launch_project, + spec_args, + run_args, + self.environment, + synchronous, + ) + return submitted_run + + +async def launch_vertex_job( + launch_project: LaunchProject, + spec_args: Dict[str, Any], + run_args: Dict[str, Any], + environment: GcpEnvironment, + synchronous: bool = False, +) -> VertexSubmittedRun: + try: + await environment.verify() + aiplatform = get_module( # noqa: F811 + "google.cloud.aiplatform", + "VertexRunner requires google.cloud.aiplatform to be installed", + ) + init = event_loop_thread_exec(aiplatform.init) + await init( + project=environment.project, + location=environment.region, + staging_bucket=spec_args.get("staging_bucket"), + credentials=await environment.get_credentials(), + ) + labels = spec_args.get("labels", {}) + labels[WANDB_RUN_ID_KEY] = launch_project.run_id + job = aiplatform.CustomJob( + display_name=launch_project.name, + worker_pool_specs=spec_args.get("worker_pool_specs"), + base_output_dir=spec_args.get("base_output_dir"), + encryption_spec_key_name=spec_args.get("encryption_spec_key_name"), + labels=labels, + ) + execution_kwargs = dict( + timeout=run_args.get("timeout"), + service_account=run_args.get("service_account"), + network=run_args.get("network"), + enable_web_access=run_args.get("enable_web_access", False), + experiment=run_args.get("experiment"), + experiment_run=run_args.get("experiment_run"), + tensorboard=run_args.get("tensorboard"), + restart_job_on_worker_restart=run_args.get( + "restart_job_on_worker_restart", False + ), + ) + # Unclear if there are exceptions that can be thrown where we should + # retry instead of erroring. For now, just catch all exceptions and they + # go to the UI for the user to interpret. + except Exception as e: + raise LaunchError(f"Failed to create Vertex job: {e}") + + if synchronous: + run = event_loop_thread_exec(job.run) + await run(**execution_kwargs, sync=True) + else: + submit = event_loop_thread_exec(job.submit) + await submit(**execution_kwargs) + submitted_run = VertexSubmittedRun(job) + interval = 1 + while not getattr(job._gca_resource, "name", None): + # give time for the gcp job object to be created and named, this should only loop a couple times max + await asyncio.sleep(interval) + interval = min(30, interval * 2) + return submitted_run diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10bdea3723caaaa1214b0c6773a1a74ca16944f1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/scheduler.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bb29e476d09b28f106ae3d4c9054b358bd61083 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/scheduler.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/scheduler_sweep.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/scheduler_sweep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7df8920af8d2da77d2629f6a6f0f15065cce6cb2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/scheduler_sweep.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6de3e432b37bb249c4eb40649e34fe1f62785c64 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/wandb/sdk/launch/sweeps/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc b/parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7323d8ba755d45b16c6fe5a0fc9d9170d7b7cbd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d6250079a6bbdb363d853e62f8756e4c573418d7a85603a494cb15027b1bae +size 102531