ZTWHHH commited on
Commit
ddcdd58
·
verified ·
1 Parent(s): 981ae05

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/__init__.cpython-310.pyc +0 -0
  2. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_dtypes.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_private.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/audio.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/bokeh.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/graph.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/histogram.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/html.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/image.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/molecule.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/object_3d.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/plotly.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/saved_model.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/table.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/trace_tree.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/utils.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/video.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__init__.py +0 -0
  19. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/__init__.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/json_metadata.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/media.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/wb_value.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/json_metadata.py +55 -0
  24. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/media.py +315 -0
  25. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/wb_value.py +274 -0
  26. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/__init__.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/classes.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/image_mask.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/classes.py +159 -0
  30. parrot/lib/python3.10/site-packages/wandb/sdk/launch/__init__.py +14 -0
  31. parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch.py +330 -0
  32. parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch_add.py +255 -0
  33. parrot/lib/python3.10/site-packages/wandb/sdk/launch/_project_spec.py +566 -0
  34. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__init__.py +5 -0
  35. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/__init__.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/agent.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/config.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/job_status_tracker.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/run_queue_item_file_saver.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/agent.py +924 -0
  41. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/config.py +296 -0
  42. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/job_status_tracker.py +53 -0
  43. parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/run_queue_item_file_saver.py +45 -0
  44. parrot/lib/python3.10/site-packages/wandb/sdk/launch/create_job.py +528 -0
  45. parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/abstract.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/aws_environment.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/azure_environment.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/gcp_environment.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/local_environment.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/abstract.py +29 -0
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_dtypes.cpython-310.pyc ADDED
Binary file (25.4 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/_private.cpython-310.pyc ADDED
Binary file (439 Bytes). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/audio.cpython-310.pyc ADDED
Binary file (5.67 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/bokeh.cpython-310.pyc ADDED
Binary file (2.6 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/graph.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/histogram.cpython-310.pyc ADDED
Binary file (3.14 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/html.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/image.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/molecule.cpython-310.pyc ADDED
Binary file (6.95 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/object_3d.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/plotly.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/saved_model.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/table.cpython-310.pyc ADDED
Binary file (34.4 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/trace_tree.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.91 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/__pycache__/video.cpython-310.pyc ADDED
Binary file (7.4 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/json_metadata.cpython-310.pyc ADDED
Binary file (2.12 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/media.cpython-310.pyc ADDED
Binary file (9.04 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/__pycache__/wb_value.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/json_metadata.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import codecs
2
+ import os
3
+ from typing import TYPE_CHECKING, Type, Union
4
+
5
+ from wandb import util
6
+ from wandb.sdk.lib import runid
7
+
8
+ from .._private import MEDIA_TMP
9
+ from .media import Media
10
+
11
+ if TYPE_CHECKING: # pragma: no cover
12
+ from wandb.sdk.artifacts.artifact import Artifact
13
+
14
+ from ...wandb_run import Run as LocalRun
15
+
16
+
17
+ # Allows encoding of arbitrary JSON structures
18
+ # as a file
19
+ #
20
+ # This class should be used as an abstract class
21
+ # extended to have validation methods
22
+
23
+
24
+ class JSONMetadata(Media):
25
+ """JSONMetadata is a type for encoding arbitrary metadata as files."""
26
+
27
+ def __init__(self, val: dict) -> None:
28
+ super().__init__()
29
+
30
+ self.validate(val)
31
+ self._val = val
32
+
33
+ ext = "." + self.type_name() + ".json"
34
+ tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ext)
35
+ with codecs.open(tmp_path, "w", encoding="utf-8") as fp:
36
+ util.json_dump_uncompressed(self._val, fp)
37
+ self._set_file(tmp_path, is_tmp=True, extension=ext)
38
+
39
+ @classmethod
40
+ def get_media_subdir(cls: Type["JSONMetadata"]) -> str:
41
+ return os.path.join("media", "metadata", cls.type_name())
42
+
43
+ def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
44
+ json_dict = super().to_json(run_or_artifact)
45
+ json_dict["_type"] = self.type_name()
46
+
47
+ return json_dict
48
+
49
+ # These methods should be overridden in the child class
50
+ @classmethod
51
+ def type_name(cls) -> str:
52
+ return "metadata"
53
+
54
+ def validate(self, val: dict) -> bool:
55
+ return True
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/media.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import platform
4
+ import re
5
+ import shutil
6
+ from typing import TYPE_CHECKING, Optional, Sequence, Type, Union, cast
7
+
8
+ import wandb
9
+ from wandb import util
10
+ from wandb._globals import _datatypes_callback
11
+ from wandb.sdk.lib import filesystem
12
+ from wandb.sdk.lib.paths import LogicalPath
13
+
14
+ from .wb_value import WBValue
15
+
16
+ if TYPE_CHECKING: # pragma: no cover
17
+ import numpy as np
18
+
19
+ from wandb.sdk.artifacts.artifact import Artifact
20
+
21
+ from ...wandb_run import Run as LocalRun
22
+
23
+
24
+ SYS_PLATFORM = platform.system()
25
+
26
+
27
+ def _wb_filename(
28
+ key: Union[str, int], step: Union[str, int], id: Union[str, int], extension: str
29
+ ) -> str:
30
+ return f"{str(key)}_{str(step)}_{str(id)}{extension}"
31
+
32
+
33
+ class Media(WBValue):
34
+ """A WBValue stored as a file outside JSON that can be rendered in a media panel.
35
+
36
+ If necessary, we move or copy the file into the Run's media directory so that it
37
+ gets uploaded.
38
+ """
39
+
40
+ _path: Optional[str]
41
+ _run: Optional["LocalRun"]
42
+ _caption: Optional[str]
43
+ _is_tmp: Optional[bool]
44
+ _extension: Optional[str]
45
+ _sha256: Optional[str]
46
+ _size: Optional[int]
47
+
48
+ def __init__(self, caption: Optional[str] = None) -> None:
49
+ super().__init__()
50
+ self._path = None
51
+ # The run under which this object is bound, if any.
52
+ self._run = None
53
+ self._caption = caption
54
+
55
+ def _set_file(
56
+ self, path: str, is_tmp: bool = False, extension: Optional[str] = None
57
+ ) -> None:
58
+ self._path = path
59
+ self._is_tmp = is_tmp
60
+ self._extension = extension
61
+ assert extension is None or path.endswith(
62
+ extension
63
+ ), f'Media file extension "{extension}" must occur at the end of path "{path}".'
64
+
65
+ with open(self._path, "rb") as f:
66
+ self._sha256 = hashlib.sha256(f.read()).hexdigest()
67
+ self._size = os.path.getsize(self._path)
68
+
69
+ @classmethod
70
+ def get_media_subdir(cls: Type["Media"]) -> str:
71
+ raise NotImplementedError
72
+
73
+ @staticmethod
74
+ def captions(
75
+ media_items: Sequence["Media"],
76
+ ) -> Union[bool, Sequence[Optional[str]]]:
77
+ if media_items[0]._caption is not None:
78
+ return [m._caption for m in media_items]
79
+ else:
80
+ return False
81
+
82
+ def is_bound(self) -> bool:
83
+ return self._run is not None
84
+
85
+ def file_is_set(self) -> bool:
86
+ return self._path is not None and self._sha256 is not None
87
+
88
+ def bind_to_run(
89
+ self,
90
+ run: "LocalRun",
91
+ key: Union[int, str],
92
+ step: Union[int, str],
93
+ id_: Optional[Union[int, str]] = None,
94
+ ignore_copy_err: Optional[bool] = None,
95
+ ) -> None:
96
+ """Bind this object to a particular Run.
97
+
98
+ Calling this function is necessary so that we have somewhere specific to put the
99
+ file associated with this object, from which other Runs can refer to it.
100
+ """
101
+ assert self.file_is_set(), "bind_to_run called before _set_file"
102
+
103
+ if SYS_PLATFORM == "Windows" and not util.check_windows_valid_filename(key):
104
+ raise ValueError(
105
+ f"Media {key} is invalid. Please remove invalid filename characters"
106
+ )
107
+
108
+ # The following two assertions are guaranteed to pass
109
+ # by definition file_is_set, but are needed for
110
+ # mypy to understand that these are strings below.
111
+ assert isinstance(self._path, str)
112
+ assert isinstance(self._sha256, str)
113
+
114
+ assert run is not None, 'Argument "run" must not be None.'
115
+ self._run = run
116
+
117
+ if self._extension is None:
118
+ _, extension = os.path.splitext(os.path.basename(self._path))
119
+ else:
120
+ extension = self._extension
121
+
122
+ if id_ is None:
123
+ id_ = self._sha256[:20]
124
+
125
+ file_path = _wb_filename(key, step, id_, extension)
126
+ media_path = os.path.join(self.get_media_subdir(), file_path)
127
+ new_path = os.path.join(self._run.dir, media_path)
128
+ filesystem.mkdir_exists_ok(os.path.dirname(new_path))
129
+
130
+ if self._is_tmp:
131
+ shutil.move(self._path, new_path)
132
+ self._path = new_path
133
+ self._is_tmp = False
134
+ _datatypes_callback(media_path)
135
+ else:
136
+ try:
137
+ shutil.copy(self._path, new_path)
138
+ except shutil.SameFileError as e:
139
+ if not ignore_copy_err:
140
+ raise e
141
+ self._path = new_path
142
+ _datatypes_callback(media_path)
143
+
144
+ def to_json(self, run: Union["LocalRun", "Artifact"]) -> dict:
145
+ """Serialize the object into a JSON blob.
146
+
147
+ Uses run or artifact to store additional data. If `run_or_artifact` is a
148
+ wandb.Run then `self.bind_to_run()` must have been previously been called.
149
+
150
+ Args:
151
+ run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which
152
+ this object should be generating JSON for - this is useful to store
153
+ additional data if needed.
154
+
155
+ Returns:
156
+ dict: JSON representation
157
+ """
158
+ # NOTE: uses of Audio in this class are a temporary hack -- when Ref support moves up
159
+ # into Media itself we should get rid of them
160
+ from wandb import Image
161
+ from wandb.data_types import Audio
162
+ from wandb.sdk.wandb_run import Run
163
+
164
+ json_obj = {}
165
+
166
+ if isinstance(run, Run):
167
+ json_obj.update(
168
+ {
169
+ "_type": "file", # TODO(adrian): This isn't (yet) a real media type we support on the frontend.
170
+ "sha256": self._sha256,
171
+ "size": self._size,
172
+ }
173
+ )
174
+ artifact_entry_url = self._get_artifact_entry_ref_url()
175
+ if artifact_entry_url is not None:
176
+ json_obj["artifact_path"] = artifact_entry_url
177
+ artifact_entry_latest_url = self._get_artifact_entry_latest_ref_url()
178
+ if artifact_entry_latest_url is not None:
179
+ json_obj["_latest_artifact_path"] = artifact_entry_latest_url
180
+
181
+ if artifact_entry_url is None or self.is_bound():
182
+ assert self.is_bound(), "Value of type {} must be bound to a run with bind_to_run() before being serialized to JSON.".format(
183
+ type(self).__name__
184
+ )
185
+
186
+ assert (
187
+ self._run is run
188
+ ), "We don't support referring to media files across runs."
189
+
190
+ # The following two assertions are guaranteed to pass
191
+ # by definition is_bound, but are needed for
192
+ # mypy to understand that these are strings below.
193
+ assert isinstance(self._path, str)
194
+ json_obj["path"] = LogicalPath(
195
+ os.path.relpath(self._path, self._run.dir)
196
+ )
197
+
198
+ elif isinstance(run, wandb.Artifact):
199
+ if self.file_is_set():
200
+ # The following two assertions are guaranteed to pass
201
+ # by definition of the call above, but are needed for
202
+ # mypy to understand that these are strings below.
203
+ assert isinstance(self._path, str)
204
+ assert isinstance(self._sha256, str)
205
+ artifact = run # Checks if the concrete image has already been added to this artifact
206
+ name = artifact.get_added_local_path_name(self._path)
207
+ if name is None:
208
+ if self._is_tmp:
209
+ name = os.path.join(
210
+ self.get_media_subdir(), os.path.basename(self._path)
211
+ )
212
+ else:
213
+ # If the files is not temporary, include the first 8 characters of the file's SHA256 to
214
+ # avoid name collisions. This way, if there are two images `dir1/img.png` and `dir2/img.png`
215
+ # we end up with a unique path for each.
216
+ name = os.path.join(
217
+ self.get_media_subdir(),
218
+ self._sha256[:20],
219
+ os.path.basename(self._path),
220
+ )
221
+
222
+ # if not, check to see if there is a source artifact for this object
223
+ if (
224
+ self._artifact_source is not None
225
+ # and self._artifact_source.artifact != artifact
226
+ ):
227
+ default_root = self._artifact_source.artifact._default_root()
228
+ # if there is, get the name of the entry (this might make sense to move to a helper off artifact)
229
+ if self._path.startswith(default_root):
230
+ name = self._path[len(default_root) :]
231
+ name = name.lstrip(os.sep)
232
+
233
+ # Add this image as a reference
234
+ path = self._artifact_source.artifact.get_entry(name)
235
+ artifact.add_reference(path.ref_url(), name=name)
236
+ elif (
237
+ isinstance(self, Audio) or isinstance(self, Image)
238
+ ) and self.path_is_reference(self._path):
239
+ artifact.add_reference(self._path, name=name)
240
+ else:
241
+ entry = artifact.add_file(
242
+ self._path, name=name, is_tmp=self._is_tmp
243
+ )
244
+ name = entry.path
245
+
246
+ json_obj["path"] = name
247
+ json_obj["sha256"] = self._sha256
248
+ json_obj["_type"] = self._log_type
249
+ return json_obj
250
+
251
+ @classmethod
252
+ def from_json(
253
+ cls: Type["Media"], json_obj: dict, source_artifact: "Artifact"
254
+ ) -> "Media":
255
+ """Likely will need to override for any more complicated media objects."""
256
+ return cls(source_artifact.get_entry(json_obj["path"]).download())
257
+
258
+ def __eq__(self, other: object) -> bool:
259
+ """Likely will need to override for any more complicated media objects."""
260
+ return (
261
+ isinstance(other, self.__class__)
262
+ and hasattr(self, "_sha256")
263
+ and hasattr(other, "_sha256")
264
+ and self._sha256 == other._sha256
265
+ )
266
+
267
+ @staticmethod
268
+ def path_is_reference(path: Optional[str]) -> bool:
269
+ return bool(path and re.match(r"^(gs|s3|https?)://", path))
270
+
271
+
272
+ class BatchableMedia(Media):
273
+ """Media that is treated in batches.
274
+
275
+ E.g. images and thumbnails. Apart from images, we just use these batches to help
276
+ organize files by name in the media directory.
277
+ """
278
+
279
+ def __init__(self) -> None:
280
+ super().__init__()
281
+
282
+ @classmethod
283
+ def seq_to_json(
284
+ cls: Type["BatchableMedia"],
285
+ seq: Sequence["BatchableMedia"],
286
+ run: "LocalRun",
287
+ key: str,
288
+ step: Union[int, str],
289
+ ) -> dict:
290
+ raise NotImplementedError
291
+
292
+
293
+ def _numpy_arrays_to_lists(
294
+ payload: Union[dict, Sequence, "np.ndarray"],
295
+ ) -> Union[Sequence, dict, str, int, float, bool]:
296
+ # Casts all numpy arrays to lists so we don't convert them to histograms, primarily for Plotly
297
+
298
+ if isinstance(payload, dict):
299
+ res = {}
300
+ for key, val in payload.items():
301
+ res[key] = _numpy_arrays_to_lists(val)
302
+ return res
303
+ elif isinstance(payload, Sequence) and not isinstance(payload, str):
304
+ return [_numpy_arrays_to_lists(v) for v in payload]
305
+ elif util.is_numpy_array(payload):
306
+ if TYPE_CHECKING:
307
+ payload = cast("np.ndarray", payload)
308
+ return [
309
+ _numpy_arrays_to_lists(v)
310
+ for v in (payload.tolist() if payload.ndim > 0 else [payload.tolist()])
311
+ ]
312
+ # Protects against logging non serializable objects
313
+ elif isinstance(payload, Media):
314
+ return str(payload.__class__.__name__)
315
+ return payload # type: ignore
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/base_types/wb_value.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Type, Union
2
+
3
+ from wandb import util
4
+
5
+ if TYPE_CHECKING: # pragma: no cover
6
+ from wandb.sdk.artifacts.artifact import Artifact
7
+
8
+ from ...wandb_run import Run as LocalRun
9
+
10
+ TypeMappingType = Dict[str, Type["WBValue"]]
11
+
12
+
13
+ def _server_accepts_client_ids() -> bool:
14
+ from wandb.util import parse_version
15
+
16
+ # First, if we are offline, assume the backend server cannot
17
+ # accept client IDs. Unfortunately, this is the best we can do
18
+ # until we are sure that all local versions are > "0.11.0" max_cli_version.
19
+ # The practical implication is that tables logged in offline mode
20
+ # will not show up in the workspace (but will still show up in artifacts). This
21
+ # means we never lose data, and we can still view using weave. If we decided
22
+ # to use client ids in offline mode, then the manifests and artifact data
23
+ # would never be resolvable and would lead to failed uploads. Our position
24
+ # is to never lose data - and instead take the tradeoff in the UI.
25
+ if util._is_offline():
26
+ return False
27
+
28
+ # If the script is online, request the max_cli_version and ensure the server
29
+ # is of a high enough version.
30
+ max_cli_version = util._get_max_cli_version()
31
+ if max_cli_version is None:
32
+ return False
33
+ accepts_client_ids: bool = parse_version("0.11.0") <= parse_version(max_cli_version)
34
+ return accepts_client_ids
35
+
36
+
37
+ class _WBValueArtifactSource:
38
+ artifact: "Artifact"
39
+ name: Optional[str]
40
+
41
+ def __init__(self, artifact: "Artifact", name: Optional[str] = None) -> None:
42
+ self.artifact = artifact
43
+ self.name = name
44
+
45
+
46
+ class _WBValueArtifactTarget:
47
+ artifact: "Artifact"
48
+ name: Optional[str]
49
+
50
+ def __init__(self, artifact: "Artifact", name: Optional[str] = None) -> None:
51
+ self.artifact = artifact
52
+ self.name = name
53
+
54
+
55
+ class WBValue:
56
+ """Typed objects that can be logged with `wandb.log()` and visualized by wandb.
57
+
58
+ The objects will be serialized as JSON and always have a _type attribute that
59
+ indicates how to interpret the other fields.
60
+ """
61
+
62
+ # Class Attributes
63
+ _type_mapping: ClassVar[Optional["TypeMappingType"]] = None
64
+ # override _log_type to indicate the type which the subclass deserializes
65
+ _log_type: ClassVar[Optional[str]] = None
66
+
67
+ # Instance Attributes
68
+ _artifact_source: Optional[_WBValueArtifactSource]
69
+ _artifact_target: Optional[_WBValueArtifactTarget]
70
+
71
+ def __init__(self) -> None:
72
+ self._artifact_source = None
73
+ self._artifact_target = None
74
+
75
+ def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
76
+ """Serialize the object into a JSON blob.
77
+
78
+ Uses current run or artifact to store additional data.
79
+
80
+ Args:
81
+ run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which
82
+ this object should be generating JSON for - this is useful to to store
83
+ additional data if needed.
84
+
85
+ Returns:
86
+ dict: JSON representation
87
+ """
88
+ raise NotImplementedError
89
+
90
+ @classmethod
91
+ def from_json(
92
+ cls: Type["WBValue"], json_obj: dict, source_artifact: "Artifact"
93
+ ) -> "WBValue":
94
+ """Deserialize a `json_obj` into it's class representation.
95
+
96
+ If additional resources were stored in the `run_or_artifact` artifact during the
97
+ `to_json` call, then those resources should be in the `source_artifact`.
98
+
99
+ Args:
100
+ json_obj (dict): A JSON dictionary to deserialize source_artifact
101
+ (wandb.Artifact): An artifact which will hold any additional
102
+ resources which were stored during the `to_json` function.
103
+ """
104
+ raise NotImplementedError
105
+
106
+ @classmethod
107
+ def with_suffix(cls: Type["WBValue"], name: str, filetype: str = "json") -> str:
108
+ """Get the name with the appropriate suffix.
109
+
110
+ Args:
111
+ name (str): the name of the file
112
+ filetype (str, optional): the filetype to use. Defaults to "json".
113
+
114
+ Returns:
115
+ str: a filename which is suffixed with it's `_log_type` followed by the
116
+ filetype.
117
+ """
118
+ if cls._log_type is not None:
119
+ suffix = cls._log_type + "." + filetype
120
+ else:
121
+ suffix = filetype
122
+ if not name.endswith(suffix):
123
+ return name + "." + suffix
124
+ return name
125
+
126
+ @staticmethod
127
+ def init_from_json(
128
+ json_obj: dict, source_artifact: "Artifact"
129
+ ) -> Optional["WBValue"]:
130
+ """Initialize a `WBValue` from a JSON blob based on the class that creatd it.
131
+
132
+ Looks through all subclasses and tries to match the json obj with the class
133
+ which created it. It will then call that subclass' `from_json` method.
134
+ Importantly, this function will set the return object's `source_artifact`
135
+ attribute to the passed in source artifact. This is critical for artifact
136
+ bookkeeping. If you choose to create a wandb.Value via it's `from_json` method,
137
+ make sure to properly set this `artifact_source` to avoid data duplication.
138
+
139
+ Args:
140
+ json_obj (dict): A JSON dictionary to deserialize. It must contain a `_type`
141
+ key. This is used to lookup the correct subclass to use.
142
+ source_artifact (wandb.Artifact): An artifact which will hold any additional
143
+ resources which were stored during the `to_json` function.
144
+
145
+ Returns:
146
+ wandb.Value: a newly created instance of a subclass of wandb.Value
147
+ """
148
+ class_option = WBValue.type_mapping().get(json_obj["_type"])
149
+ if class_option is not None:
150
+ obj = class_option.from_json(json_obj, source_artifact)
151
+ obj._set_artifact_source(source_artifact)
152
+ return obj
153
+
154
+ return None
155
+
156
+ @staticmethod
157
+ def type_mapping() -> "TypeMappingType":
158
+ """Return a map from `_log_type` to subclass. Used to lookup correct types for deserialization.
159
+
160
+ Returns:
161
+ dict: dictionary of str:class
162
+ """
163
+ if WBValue._type_mapping is None:
164
+ WBValue._type_mapping = {}
165
+ frontier = [WBValue]
166
+ explored = set()
167
+ while len(frontier) > 0:
168
+ class_option = frontier.pop()
169
+ explored.add(class_option)
170
+ if class_option._log_type is not None:
171
+ WBValue._type_mapping[class_option._log_type] = class_option
172
+ for subclass in class_option.__subclasses__():
173
+ if subclass not in explored:
174
+ frontier.append(subclass)
175
+ return WBValue._type_mapping
176
+
177
+ def __eq__(self, other: object) -> bool:
178
+ return id(self) == id(other)
179
+
180
+ def __ne__(self, other: object) -> bool:
181
+ return not self.__eq__(other)
182
+
183
+ def to_data_array(self) -> List[Any]:
184
+ """Convert the object to a list of primitives representing the underlying data."""
185
+ raise NotImplementedError
186
+
187
+ def _set_artifact_source(
188
+ self, artifact: "Artifact", name: Optional[str] = None
189
+ ) -> None:
190
+ assert (
191
+ self._artifact_source is None
192
+ ), "Cannot update artifact_source. Existing source: {}/{}".format(
193
+ self._artifact_source.artifact, self._artifact_source.name
194
+ )
195
+ self._artifact_source = _WBValueArtifactSource(artifact, name)
196
+
197
+ def _set_artifact_target(
198
+ self, artifact: "Artifact", name: Optional[str] = None
199
+ ) -> None:
200
+ assert (
201
+ self._artifact_target is None
202
+ ), "Cannot update artifact_target. Existing target: {}/{}".format(
203
+ self._artifact_target.artifact, self._artifact_target.name
204
+ )
205
+ self._artifact_target = _WBValueArtifactTarget(artifact, name)
206
+
207
+ def _get_artifact_entry_ref_url(self) -> Optional[str]:
208
+ # If the object is coming from another artifact
209
+ if self._artifact_source and self._artifact_source.name:
210
+ ref_entry = self._artifact_source.artifact.get_entry(
211
+ type(self).with_suffix(self._artifact_source.name)
212
+ )
213
+ return str(ref_entry.ref_url())
214
+ # Else, if the object is destined for another artifact and we support client IDs
215
+ elif (
216
+ self._artifact_target
217
+ and self._artifact_target.name
218
+ and self._artifact_target.artifact._client_id is not None
219
+ and self._artifact_target.artifact._final
220
+ and _server_accepts_client_ids()
221
+ ):
222
+ return "wandb-client-artifact://{}/{}".format(
223
+ self._artifact_target.artifact._client_id,
224
+ type(self).with_suffix(self._artifact_target.name),
225
+ )
226
+ # Else if we do not support client IDs, but online, then block on upload
227
+ # Note: this is old behavior just to stay backwards compatible
228
+ # with older server versions. This code path should be removed
229
+ # once those versions are no longer supported. This path uses a .wait
230
+ # which blocks the user process on artifact upload.
231
+ elif (
232
+ self._artifact_target
233
+ and self._artifact_target.name
234
+ and self._artifact_target.artifact._is_draft_save_started()
235
+ and not util._is_offline()
236
+ and not _server_accepts_client_ids()
237
+ ):
238
+ self._artifact_target.artifact.wait()
239
+ ref_entry = self._artifact_target.artifact.get_entry(
240
+ type(self).with_suffix(self._artifact_target.name)
241
+ )
242
+ return str(ref_entry.ref_url())
243
+ return None
244
+
245
+ def _get_artifact_entry_latest_ref_url(self) -> Optional[str]:
246
+ if (
247
+ self._artifact_target
248
+ and self._artifact_target.name
249
+ and self._artifact_target.artifact._client_id is not None
250
+ and self._artifact_target.artifact._final
251
+ and _server_accepts_client_ids()
252
+ ):
253
+ return "wandb-client-artifact://{}:latest/{}".format(
254
+ self._artifact_target.artifact._sequence_client_id,
255
+ type(self).with_suffix(self._artifact_target.name),
256
+ )
257
+ # Else if we do not support client IDs, then block on upload
258
+ # Note: this is old behavior just to stay backwards compatible
259
+ # with older server versions. This code path should be removed
260
+ # once those versions are no longer supported. This path uses a .wait
261
+ # which blocks the user process on artifact upload.
262
+ elif (
263
+ self._artifact_target
264
+ and self._artifact_target.name
265
+ and self._artifact_target.artifact._is_draft_save_started()
266
+ and not util._is_offline()
267
+ and not _server_accepts_client_ids()
268
+ ):
269
+ self._artifact_target.artifact.wait()
270
+ ref_entry = self._artifact_target.artifact.get_entry(
271
+ type(self).with_suffix(self._artifact_target.name)
272
+ )
273
+ return str(ref_entry.ref_url())
274
+ return None
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/classes.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/__pycache__/image_mask.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/data_types/helper_types/classes.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Type, Union
3
+
4
+ from .. import _dtypes
5
+ from ..base_types.media import Media
6
+
7
+ if TYPE_CHECKING: # pragma: no cover
8
+ from wandb.sdk.artifacts.artifact import Artifact
9
+
10
+ from ...wandb_run import Run as LocalRun
11
+
12
+
13
+ class Classes(Media):
14
+ _log_type = "classes"
15
+
16
+ _class_set: Sequence[dict]
17
+
18
+ def __init__(self, class_set: Sequence[dict]) -> None:
19
+ """Classes is holds class metadata intended to be used in concert with other objects when visualizing artifacts.
20
+
21
+ Args:
22
+ class_set (list): list of dicts in the form of {"id":int|str, "name":str}
23
+ """
24
+ super().__init__()
25
+ for class_obj in class_set:
26
+ assert "id" in class_obj and "name" in class_obj
27
+ self._class_set = class_set
28
+
29
+ @classmethod
30
+ def from_json(
31
+ cls: Type["Classes"],
32
+ json_obj: dict,
33
+ source_artifact: Optional["Artifact"],
34
+ ) -> "Classes":
35
+ return cls(json_obj.get("class_set")) # type: ignore
36
+
37
+ def to_json(self, run_or_artifact: Optional[Union["LocalRun", "Artifact"]]) -> dict:
38
+ json_obj = {}
39
+ # This is a bit of a hack to allow _ClassesIdType to
40
+ # be able to operate fully without an artifact in play.
41
+ # In all other cases, artifact should be a true artifact.
42
+ if run_or_artifact is not None:
43
+ json_obj = super().to_json(run_or_artifact)
44
+ json_obj["_type"] = Classes._log_type
45
+ json_obj["class_set"] = self._class_set
46
+ return json_obj
47
+
48
+ def get_type(self) -> "_ClassesIdType":
49
+ return _ClassesIdType(self)
50
+
51
+ def __ne__(self, other: object) -> bool:
52
+ return not self.__eq__(other)
53
+
54
+ def __eq__(self, other: object) -> bool:
55
+ if isinstance(other, Classes):
56
+ return self._class_set == other._class_set
57
+ else:
58
+ return False
59
+
60
+
61
+ class _ClassesIdType(_dtypes.Type):
62
+ name = "classesId"
63
+ legacy_names = ["wandb.Classes_id"]
64
+ types = [Classes]
65
+
66
+ def __init__(
67
+ self,
68
+ classes_obj: Optional[Classes] = None,
69
+ valid_ids: Optional["_dtypes.UnionType"] = None,
70
+ ):
71
+ if valid_ids is None:
72
+ valid_ids = _dtypes.UnionType()
73
+ elif isinstance(valid_ids, list):
74
+ valid_ids = _dtypes.UnionType(
75
+ [_dtypes.ConstType(item) for item in valid_ids]
76
+ )
77
+ elif isinstance(valid_ids, _dtypes.UnionType):
78
+ valid_ids = valid_ids
79
+ else:
80
+ raise TypeError("valid_ids must be None, list, or UnionType")
81
+
82
+ if classes_obj is None:
83
+ classes_obj = Classes(
84
+ [
85
+ {"id": _id.params["val"], "name": str(_id.params["val"])}
86
+ for _id in valid_ids.params["allowed_types"]
87
+ ]
88
+ )
89
+ elif not isinstance(classes_obj, Classes):
90
+ raise TypeError("valid_ids must be None, or instance of Classes")
91
+ else:
92
+ valid_ids = _dtypes.UnionType(
93
+ [
94
+ _dtypes.ConstType(class_obj["id"])
95
+ for class_obj in classes_obj._class_set
96
+ ]
97
+ )
98
+
99
+ self.wb_classes_obj_ref = classes_obj
100
+ self.params.update({"valid_ids": valid_ids})
101
+
102
+ def assign(self, py_obj: Optional[Any] = None) -> "_dtypes.Type":
103
+ return self.assign_type(_dtypes.ConstType(py_obj))
104
+
105
+ def assign_type(self, wb_type: "_dtypes.Type") -> "_dtypes.Type":
106
+ valid_ids = self.params["valid_ids"].assign_type(wb_type)
107
+ if not isinstance(valid_ids, _dtypes.InvalidType):
108
+ return self
109
+
110
+ return _dtypes.InvalidType()
111
+
112
+ @classmethod
113
+ def from_obj(cls, py_obj: Optional[Any] = None) -> "_dtypes.Type":
114
+ return cls(py_obj)
115
+
116
+ def to_json(self, artifact: Optional["Artifact"] = None) -> Dict[str, Any]:
117
+ cl_dict = super().to_json(artifact)
118
+ # TODO (tss): Refactor this block with the similar one in wandb.Image.
119
+ # This is a bit of a smell that the classes object does not follow
120
+ # the same file-pattern as other media types.
121
+ if artifact is not None:
122
+ class_name = os.path.join("media", "cls")
123
+ classes_entry = artifact.add(self.wb_classes_obj_ref, class_name)
124
+ cl_dict["params"]["classes_obj"] = {
125
+ "type": "classes-file",
126
+ "path": classes_entry.path,
127
+ "digest": classes_entry.digest, # is this needed really?
128
+ }
129
+ else:
130
+ cl_dict["params"]["classes_obj"] = self.wb_classes_obj_ref.to_json(artifact)
131
+ return cl_dict
132
+
133
+ @classmethod
134
+ def from_json(
135
+ cls,
136
+ json_dict: Dict[str, Any],
137
+ artifact: Optional["Artifact"] = None,
138
+ ) -> "_dtypes.Type":
139
+ classes_obj = None
140
+ if (
141
+ json_dict.get("params", {}).get("classes_obj", {}).get("type")
142
+ == "classes-file"
143
+ ):
144
+ if artifact is not None:
145
+ classes_obj = artifact.get(
146
+ json_dict.get("params", {}).get("classes_obj", {}).get("path")
147
+ )
148
+ assert classes_obj is None or isinstance(classes_obj, Classes)
149
+ else:
150
+ raise RuntimeError("Expected artifact to be non-null.")
151
+ else:
152
+ classes_obj = Classes.from_json(
153
+ json_dict["params"]["classes_obj"], artifact
154
+ )
155
+
156
+ return cls(classes_obj)
157
+
158
+
159
+ _dtypes.TypeRegistry.add(_ClassesIdType)
parrot/lib/python3.10/site-packages/wandb/sdk/launch/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._launch import launch
2
+ from ._launch_add import launch_add
3
+ from .agent.agent import LaunchAgent
4
+ from .inputs.manage import manage_config_file, manage_wandb_config
5
+ from .utils import load_wandb_config
6
+
7
+ __all__ = [
8
+ "LaunchAgent",
9
+ "launch",
10
+ "launch_add",
11
+ "load_wandb_config",
12
+ "manage_config_file",
13
+ "manage_wandb_config",
14
+ ]
parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import logging
3
+ import os
4
+ import sys
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import yaml
8
+
9
+ import wandb
10
+ from wandb.apis.internal import Api
11
+
12
+ from . import loader
13
+ from ._project_spec import LaunchProject
14
+ from .agent import LaunchAgent
15
+ from .agent.agent import construct_agent_configs
16
+ from .environment.local_environment import LocalEnvironment
17
+ from .errors import ExecutionError, LaunchError
18
+ from .runner.abstract import AbstractRun
19
+ from .utils import (
20
+ LAUNCH_CONFIG_FILE,
21
+ PROJECT_SYNCHRONOUS,
22
+ construct_launch_spec,
23
+ validate_launch_spec_source,
24
+ )
25
+
26
+ _logger = logging.getLogger(__name__)
27
+
28
+
29
+ def set_launch_logfile(logfile: str) -> None:
30
+ """Set the logfile for the launch agent."""
31
+ # Get logger of parent module
32
+ _launch_logger = logging.getLogger("wandb.sdk.launch")
33
+ if logfile == "-":
34
+ logfile_stream = sys.stdout
35
+ else:
36
+ try:
37
+ logfile_stream = open(logfile, "w")
38
+ # check if file is writable
39
+ except Exception as e:
40
+ wandb.termerror(
41
+ f"Could not open {logfile} for writing logs. Please check "
42
+ f"the path and permissions.\nError: {e}"
43
+ )
44
+ return
45
+
46
+ wandb.termlog(
47
+ f"Internal agent logs printing to {'stdout' if logfile == '-' else logfile}. "
48
+ )
49
+ handler = logging.StreamHandler(logfile_stream)
50
+ handler.formatter = logging.Formatter(
51
+ "%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d "
52
+ "[%(filename)s:%(funcName)s():%(lineno)s] %(message)s"
53
+ )
54
+ _launch_logger.addHandler(handler)
55
+ _launch_logger.log(logging.INFO, "Internal agent logs printing to %s", logfile)
56
+
57
+
58
+ def resolve_agent_config( # noqa: C901
59
+ entity: Optional[str],
60
+ max_jobs: Optional[int],
61
+ queues: Optional[Tuple[str]],
62
+ config: Optional[str],
63
+ verbosity: Optional[int],
64
+ ) -> Tuple[Dict[str, Any], Api]:
65
+ """Resolve the agent config.
66
+
67
+ Arguments:
68
+ api (Api): The api.
69
+ entity (str): The entity.
70
+ max_jobs (int): The max number of jobs.
71
+ queues (Tuple[str]): The queues.
72
+ config (str): The config.
73
+ verbosity (int): How verbose to print, 0 or None = default, 1 = print status every 20 seconds, 2 = also print debugging information
74
+
75
+ Returns:
76
+ Tuple[Dict[str, Any], Api]: The resolved config and api.
77
+ """
78
+ defaults = {
79
+ "max_jobs": 1,
80
+ "max_schedulers": 1,
81
+ "queues": [],
82
+ "registry": {},
83
+ "builder": {},
84
+ "verbosity": 0,
85
+ }
86
+ resolved_config: Dict[str, Any] = defaults
87
+ config_path = config or os.path.expanduser(LAUNCH_CONFIG_FILE)
88
+ if os.path.isfile(config_path):
89
+ launch_config = {}
90
+ with open(config_path) as f:
91
+ try:
92
+ launch_config = yaml.safe_load(f)
93
+ # This is considered unreachable by mypy, but it's not.
94
+ if launch_config is None:
95
+ launch_config = {} # type: ignore
96
+ except yaml.YAMLError as e:
97
+ raise LaunchError(f"Invalid launch agent config: {e}")
98
+ resolved_config.update(launch_config.items())
99
+ elif config is not None:
100
+ raise LaunchError(
101
+ f"Could not find use specified launch config file: {config_path}"
102
+ )
103
+ if os.environ.get("WANDB_ENTITY") is not None:
104
+ resolved_config.update({"entity": os.environ.get("WANDB_ENTITY")})
105
+ if os.environ.get("WANDB_LAUNCH_MAX_JOBS") is not None:
106
+ resolved_config.update(
107
+ {"max_jobs": int(os.environ.get("WANDB_LAUNCH_MAX_JOBS", 1))}
108
+ )
109
+
110
+ if entity is not None:
111
+ resolved_config.update({"entity": entity})
112
+ if max_jobs is not None:
113
+ resolved_config.update({"max_jobs": int(max_jobs)})
114
+ if queues:
115
+ resolved_config.update({"queues": list(queues)})
116
+ if verbosity:
117
+ resolved_config.update({"verbosity": int(verbosity)})
118
+ # queue -> queues
119
+ if resolved_config.get("queue"):
120
+ if isinstance(resolved_config.get("queue"), str):
121
+ resolved_config["queues"].append(resolved_config["queue"])
122
+ else:
123
+ raise LaunchError(
124
+ f"Invalid launch agent config for key 'queue' with type: {type(resolved_config.get('queue'))}"
125
+ + " (expected str). Specify multiple queues with the 'queues' key"
126
+ )
127
+
128
+ keys = ["entity"]
129
+ settings = {
130
+ k: resolved_config.get(k) for k in keys if resolved_config.get(k) is not None
131
+ }
132
+
133
+ api = Api(default_settings=settings)
134
+
135
+ if resolved_config.get("entity") is None:
136
+ resolved_config.update({"entity": api.default_entity})
137
+
138
+ return resolved_config, api
139
+
140
+
141
+ def create_and_run_agent(
142
+ api: Api,
143
+ config: Dict[str, Any],
144
+ ) -> None:
145
+ try:
146
+ from wandb.sdk.launch.agent import config as agent_config
147
+ except ModuleNotFoundError:
148
+ raise LaunchError(
149
+ "wandb launch-agent requires pydantic to be installed. "
150
+ "Please install with `pip install wandb[launch]`"
151
+ )
152
+ try:
153
+ agent_config.AgentConfig(**config)
154
+ except agent_config.ValidationError as e:
155
+ errors = e.errors()
156
+ for error in errors:
157
+ loc = ".".join([str(x) for x in error.get("loc", [])])
158
+ msg = f"Agent config error in field {loc}"
159
+ value = error.get("input")
160
+ if not isinstance(value, dict):
161
+ msg += f" (value: {value})"
162
+ msg += f": {error['msg']}"
163
+ wandb.termerror(msg)
164
+ raise LaunchError("Invalid launch agent config")
165
+ agent = LaunchAgent(api, config)
166
+ try:
167
+ asyncio.run(agent.loop())
168
+ except asyncio.CancelledError:
169
+ pass
170
+
171
+
172
+ async def _launch(
173
+ api: Api,
174
+ job: Optional[str] = None,
175
+ name: Optional[str] = None,
176
+ project: Optional[str] = None,
177
+ entity: Optional[str] = None,
178
+ docker_image: Optional[str] = None,
179
+ entry_point: Optional[List[str]] = None,
180
+ version: Optional[str] = None,
181
+ resource: Optional[str] = None,
182
+ resource_args: Optional[Dict[str, Any]] = None,
183
+ launch_config: Optional[Dict[str, Any]] = None,
184
+ synchronous: Optional[bool] = None,
185
+ run_id: Optional[str] = None,
186
+ repository: Optional[str] = None,
187
+ ) -> AbstractRun:
188
+ """Helper that delegates to the project-running method corresponding to the passed-in backend."""
189
+ if launch_config is None:
190
+ launch_config = {}
191
+ if resource is None:
192
+ resource = "local-container"
193
+ launch_spec = construct_launch_spec(
194
+ None,
195
+ job,
196
+ api,
197
+ name,
198
+ project,
199
+ entity,
200
+ docker_image,
201
+ resource,
202
+ entry_point,
203
+ version,
204
+ resource_args,
205
+ launch_config,
206
+ run_id,
207
+ repository,
208
+ author=None,
209
+ )
210
+ validate_launch_spec_source(launch_spec)
211
+ launch_project = LaunchProject.from_spec(launch_spec, api)
212
+ launch_project.fetch_and_validate_project()
213
+ entrypoint = launch_project.get_job_entry_point()
214
+ image_uri = (
215
+ launch_project.docker_image or launch_project.job_base_image
216
+ ) # Either set by user or None.
217
+
218
+ # construct runner config.
219
+ runner_config: Dict[str, Any] = {}
220
+ runner_config[PROJECT_SYNCHRONOUS] = synchronous
221
+
222
+ config = launch_config or {}
223
+ environment_config, build_config, registry_config = construct_agent_configs(config)
224
+ environment = loader.environment_from_config(environment_config)
225
+ if environment is not None and not isinstance(environment, LocalEnvironment):
226
+ await environment.verify()
227
+ registry = loader.registry_from_config(registry_config, environment)
228
+ builder = loader.builder_from_config(build_config, environment, registry)
229
+ if not (launch_project.docker_image or launch_project.job_base_image):
230
+ assert entrypoint
231
+ image_uri = await builder.build_image(launch_project, entrypoint, None)
232
+ backend = loader.runner_from_config(
233
+ resource, api, runner_config, environment, registry
234
+ )
235
+ if backend:
236
+ assert image_uri
237
+ submitted_run = await backend.run(launch_project, image_uri)
238
+ # this check will always pass, run is only optional in the agent case where
239
+ # a run queue id is present on the backend config
240
+ assert submitted_run
241
+ return submitted_run
242
+ else:
243
+ raise ExecutionError(
244
+ f"Unavailable backend {resource}, available backends: {', '.join(loader.WANDB_RUNNERS)}"
245
+ )
246
+
247
+
248
+ def launch(
249
+ api: Api,
250
+ job: Optional[str] = None,
251
+ entry_point: Optional[List[str]] = None,
252
+ version: Optional[str] = None,
253
+ name: Optional[str] = None,
254
+ resource: Optional[str] = None,
255
+ resource_args: Optional[Dict[str, Any]] = None,
256
+ project: Optional[str] = None,
257
+ entity: Optional[str] = None,
258
+ docker_image: Optional[str] = None,
259
+ config: Optional[Dict[str, Any]] = None,
260
+ synchronous: Optional[bool] = True,
261
+ run_id: Optional[str] = None,
262
+ repository: Optional[str] = None,
263
+ ) -> AbstractRun:
264
+ """Launch a W&B launch experiment.
265
+
266
+ Arguments:
267
+ job: string reference to a wandb.Job eg: wandb/test/my-job:latest
268
+ api: An instance of a wandb Api from wandb.apis.internal.
269
+ entry_point: Entry point to run within the project. Defaults to using the entry point used
270
+ in the original run for wandb URIs, or main.py for git repository URIs.
271
+ version: For Git-based projects, either a commit hash or a branch name.
272
+ name: Name run under which to launch the run.
273
+ resource: Execution backend for the run.
274
+ resource_args: Resource related arguments for launching runs onto a remote backend.
275
+ Will be stored on the constructed launch config under ``resource_args``.
276
+ project: Target project to send launched run to
277
+ entity: Target entity to send launched run to
278
+ config: A dictionary containing the configuration for the run. May also contain
279
+ resource specific arguments under the key "resource_args".
280
+ synchronous: Whether to block while waiting for a run to complete. Defaults to True.
281
+ Note that if ``synchronous`` is False and ``backend`` is "local-container", this
282
+ method will return, but the current process will block when exiting until
283
+ the local run completes. If the current process is interrupted, any
284
+ asynchronous runs launched via this method will be terminated. If
285
+ ``synchronous`` is True and the run fails, the current process will
286
+ error out as well.
287
+ run_id: ID for the run (To ultimately replace the :name: field)
288
+ repository: string name of repository path for remote registry
289
+
290
+ Example:
291
+ ```python
292
+ from wandb.sdk.launch import launch
293
+
294
+ job = "wandb/jobs/Hello World:latest"
295
+ params = {"epochs": 5}
296
+ # Run W&B project and create a reproducible docker environment
297
+ # on a local host
298
+ api = wandb.apis.internal.Api()
299
+ launch(api, job, parameters=params)
300
+ ```
301
+
302
+
303
+ Returns:
304
+ an instance of`wandb.launch.SubmittedRun` exposing information (e.g. run ID)
305
+ about the launched run.
306
+
307
+ Raises:
308
+ `wandb.exceptions.ExecutionError` If a run launched in blocking mode
309
+ is unsuccessful.
310
+ """
311
+ submitted_run_obj = asyncio.run(
312
+ _launch(
313
+ job=job,
314
+ name=name,
315
+ project=project,
316
+ entity=entity,
317
+ docker_image=docker_image,
318
+ entry_point=entry_point,
319
+ version=version,
320
+ resource=resource,
321
+ resource_args=resource_args,
322
+ launch_config=config,
323
+ synchronous=synchronous,
324
+ api=api,
325
+ run_id=run_id,
326
+ repository=repository,
327
+ )
328
+ )
329
+
330
+ return submitted_run_obj
parrot/lib/python3.10/site-packages/wandb/sdk/launch/_launch_add.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import pprint
3
+ from typing import Any, Dict, List, Optional, Union
4
+
5
+ import wandb
6
+ import wandb.apis.public as public
7
+ from wandb.apis.internal import Api
8
+ from wandb.errors import CommError
9
+ from wandb.sdk.launch.builder.build import build_image_from_project
10
+ from wandb.sdk.launch.errors import LaunchError
11
+ from wandb.sdk.launch.utils import (
12
+ LAUNCH_DEFAULT_PROJECT,
13
+ LOG_PREFIX,
14
+ construct_launch_spec,
15
+ validate_launch_spec_source,
16
+ )
17
+
18
+ from ._project_spec import LaunchProject
19
+
20
+
21
+ def push_to_queue(
22
+ api: Api,
23
+ queue_name: str,
24
+ launch_spec: Dict[str, Any],
25
+ template_variables: Optional[dict],
26
+ project_queue: str,
27
+ priority: Optional[int] = None,
28
+ ) -> Any:
29
+ return api.push_to_run_queue(
30
+ queue_name, launch_spec, template_variables, project_queue, priority
31
+ )
32
+
33
+
34
+ def launch_add(
35
+ uri: Optional[str] = None,
36
+ job: Optional[str] = None,
37
+ config: Optional[Dict[str, Any]] = None,
38
+ template_variables: Optional[Dict[str, Union[float, int, str]]] = None,
39
+ project: Optional[str] = None,
40
+ entity: Optional[str] = None,
41
+ queue_name: Optional[str] = None,
42
+ resource: Optional[str] = None,
43
+ entry_point: Optional[List[str]] = None,
44
+ name: Optional[str] = None,
45
+ version: Optional[str] = None,
46
+ docker_image: Optional[str] = None,
47
+ project_queue: Optional[str] = None,
48
+ resource_args: Optional[Dict[str, Any]] = None,
49
+ run_id: Optional[str] = None,
50
+ build: Optional[bool] = False,
51
+ repository: Optional[str] = None,
52
+ sweep_id: Optional[str] = None,
53
+ author: Optional[str] = None,
54
+ priority: Optional[int] = None,
55
+ ) -> "public.QueuedRun":
56
+ """Enqueue a W&B launch experiment. With either a source uri, job or docker_image.
57
+
58
+ Arguments:
59
+ uri: URI of experiment to run. A wandb run uri or a Git repository URI.
60
+ job: string reference to a wandb.Job eg: wandb/test/my-job:latest
61
+ config: A dictionary containing the configuration for the run. May also contain
62
+ resource specific arguments under the key "resource_args"
63
+ template_variables: A dictionary containing values of template variables for a run queue.
64
+ Expected format of {"VAR_NAME": VAR_VALUE}
65
+ project: Target project to send launched run to
66
+ entity: Target entity to send launched run to
67
+ queue: the name of the queue to enqueue the run to
68
+ priority: the priority level of the job, where 1 is the highest priority
69
+ resource: Execution backend for the run: W&B provides built-in support for "local-container" backend
70
+ entry_point: Entry point to run within the project. Defaults to using the entry point used
71
+ in the original run for wandb URIs, or main.py for git repository URIs.
72
+ name: Name run under which to launch the run.
73
+ version: For Git-based projects, either a commit hash or a branch name.
74
+ docker_image: The name of the docker image to use for the run.
75
+ resource_args: Resource related arguments for launching runs onto a remote backend.
76
+ Will be stored on the constructed launch config under ``resource_args``.
77
+ run_id: optional string indicating the id of the launched run
78
+ build: optional flag defaulting to false, requires queue to be set
79
+ if build, an image is created, creates a job artifact, pushes a reference
80
+ to that job artifact to queue
81
+ repository: optional string to control the name of the remote repository, used when
82
+ pushing images to a registry
83
+ project_queue: optional string to control the name of the project for the queue. Primarily used
84
+ for back compatibility with project scoped queues
85
+
86
+
87
+ Example:
88
+ ```python
89
+ from wandb.sdk.launch import launch_add
90
+
91
+ project_uri = "https://github.com/wandb/examples"
92
+ params = {"alpha": 0.5, "l1_ratio": 0.01}
93
+ # Run W&B project and create a reproducible docker environment
94
+ # on a local host
95
+ api = wandb.apis.internal.Api()
96
+ launch_add(uri=project_uri, parameters=params)
97
+ ```
98
+
99
+
100
+ Returns:
101
+ an instance of`wandb.api.public.QueuedRun` which gives information about the
102
+ queued run, or if `wait_until_started` or `wait_until_finished` are called, gives access
103
+ to the underlying Run information.
104
+
105
+ Raises:
106
+ `wandb.exceptions.LaunchError` if unsuccessful
107
+ """
108
+ api = Api()
109
+
110
+ return _launch_add(
111
+ api,
112
+ job,
113
+ config,
114
+ template_variables,
115
+ project,
116
+ entity,
117
+ queue_name,
118
+ resource,
119
+ entry_point,
120
+ name,
121
+ version,
122
+ docker_image,
123
+ project_queue,
124
+ resource_args,
125
+ run_id=run_id,
126
+ build=build,
127
+ repository=repository,
128
+ sweep_id=sweep_id,
129
+ author=author,
130
+ priority=priority,
131
+ )
132
+
133
+
134
+ def _launch_add(
135
+ api: Api,
136
+ job: Optional[str],
137
+ config: Optional[Dict[str, Any]],
138
+ template_variables: Optional[dict],
139
+ project: Optional[str],
140
+ entity: Optional[str],
141
+ queue_name: Optional[str],
142
+ resource: Optional[str],
143
+ entry_point: Optional[List[str]],
144
+ name: Optional[str],
145
+ version: Optional[str],
146
+ docker_image: Optional[str],
147
+ project_queue: Optional[str],
148
+ resource_args: Optional[Dict[str, Any]] = None,
149
+ run_id: Optional[str] = None,
150
+ build: Optional[bool] = False,
151
+ repository: Optional[str] = None,
152
+ sweep_id: Optional[str] = None,
153
+ author: Optional[str] = None,
154
+ priority: Optional[int] = None,
155
+ ) -> "public.QueuedRun":
156
+ launch_spec = construct_launch_spec(
157
+ None,
158
+ job,
159
+ api,
160
+ name,
161
+ project,
162
+ entity,
163
+ docker_image,
164
+ resource,
165
+ entry_point,
166
+ version,
167
+ resource_args,
168
+ config,
169
+ run_id,
170
+ repository,
171
+ author,
172
+ sweep_id,
173
+ )
174
+
175
+ if build:
176
+ if resource == "local-process":
177
+ raise LaunchError(
178
+ "Cannot build a docker image for the resource: local-process"
179
+ )
180
+
181
+ if launch_spec.get("job") is not None:
182
+ wandb.termwarn("Build doesn't support setting a job. Overwriting job.")
183
+ launch_spec["job"] = None
184
+
185
+ launch_project = LaunchProject.from_spec(launch_spec, api)
186
+ docker_image_uri = asyncio.run(
187
+ build_image_from_project(launch_project, api, config or {})
188
+ )
189
+ run = wandb.run or wandb.init(
190
+ project=launch_spec["project"],
191
+ entity=launch_spec["entity"],
192
+ job_type="launch_job",
193
+ )
194
+
195
+ job_artifact = run._log_job_artifact_with_image( # type: ignore
196
+ docker_image_uri, launch_project.override_args
197
+ )
198
+ job_name = job_artifact.wait().name
199
+
200
+ job = f"{launch_spec['entity']}/{launch_spec['project']}/{job_name}"
201
+ launch_spec["job"] = job
202
+ launch_spec["uri"] = None # Remove given URI --> now in job
203
+
204
+ if queue_name is None:
205
+ queue_name = "default"
206
+ if project_queue is None:
207
+ project_queue = LAUNCH_DEFAULT_PROJECT
208
+ spec_template_vars = launch_spec.get("template_variables")
209
+ if isinstance(spec_template_vars, dict):
210
+ launch_spec.pop("template_variables")
211
+ if template_variables is None:
212
+ template_variables = spec_template_vars
213
+ else:
214
+ template_variables = {
215
+ **spec_template_vars,
216
+ **template_variables,
217
+ }
218
+
219
+ validate_launch_spec_source(launch_spec)
220
+ res = push_to_queue(
221
+ api, queue_name, launch_spec, template_variables, project_queue, priority
222
+ )
223
+
224
+ if res is None or "runQueueItemId" not in res:
225
+ raise LaunchError("Error adding run to queue")
226
+
227
+ updated_spec = res.get("runSpec")
228
+ if updated_spec:
229
+ if updated_spec.get("resource_args"):
230
+ launch_spec["resource_args"] = updated_spec.get("resource_args")
231
+ if updated_spec.get("resource"):
232
+ launch_spec["resource"] = updated_spec.get("resource")
233
+
234
+ if project_queue == LAUNCH_DEFAULT_PROJECT:
235
+ wandb.termlog(f"{LOG_PREFIX}Added run to queue {queue_name}.")
236
+ else:
237
+ wandb.termlog(f"{LOG_PREFIX}Added run to queue {project_queue}/{queue_name}.")
238
+ wandb.termlog(f"{LOG_PREFIX}Launch spec:\n{pprint.pformat(launch_spec)}\n")
239
+
240
+ public_api = public.Api()
241
+ if job is not None:
242
+ try:
243
+ public_api.artifact(job, type="job")
244
+ except (ValueError, CommError) as e:
245
+ raise LaunchError(f"Unable to fetch job with name {job}: {e}")
246
+
247
+ queued_run = public_api.queued_run(
248
+ launch_spec["entity"],
249
+ launch_spec["project"],
250
+ queue_name,
251
+ res["runQueueItemId"],
252
+ project_queue,
253
+ priority,
254
+ )
255
+ return queued_run # type: ignore
parrot/lib/python3.10/site-packages/wandb/sdk/launch/_project_spec.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Convert launch arguments into a runnable wandb launch script.
2
+
3
+ Arguments can come from a launch spec or call to wandb launch.
4
+ """
5
+
6
+ import enum
7
+ import json
8
+ import logging
9
+ import os
10
+ import shutil
11
+ import tempfile
12
+ from copy import deepcopy
13
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
14
+
15
+ from six.moves import shlex_quote
16
+
17
+ import wandb
18
+ from wandb.apis.internal import Api
19
+ from wandb.errors import CommError
20
+ from wandb.sdk.launch.utils import get_entrypoint_file
21
+ from wandb.sdk.lib.runid import generate_id
22
+
23
+ from .errors import LaunchError
24
+ from .utils import LOG_PREFIX, recursive_macro_sub
25
+
26
+ if TYPE_CHECKING:
27
+ from wandb.sdk.artifacts.artifact import Artifact
28
+
29
+ _logger = logging.getLogger(__name__)
30
+
31
+
32
+ # need to make user root for sagemaker, so users have access to /opt/ml directories
33
+ # that let users create artifacts and access input data
34
+ RESOURCE_UID_MAP = {"local": 1000, "sagemaker": 0}
35
+ IMAGE_TAG_MAX_LENGTH = 32
36
+
37
+
38
+ class LaunchSource(enum.IntEnum):
39
+ """Enumeration of possible sources for a launch project.
40
+
41
+ Attributes:
42
+ DOCKER: Source is a Docker image. This can happen if a user runs
43
+ `wandb launch -d <docker-image>`.
44
+ JOB: Source is a job. This is standard case.
45
+ SCHEDULER: Source is a wandb sweep scheduler command.
46
+ """
47
+
48
+ DOCKER: int = 1
49
+ JOB: int = 2
50
+ SCHEDULER: int = 3
51
+
52
+
53
+ class LaunchProject:
54
+ """A launch project specification.
55
+
56
+ The LaunchProject is initialized from a raw launch spec an internal API
57
+ object. The project encapsulates logic for taking a launch spec and converting
58
+ it into the executable code.
59
+
60
+ The LaunchProject needs to ultimately produce a full container spec for
61
+ execution in docker, k8s, sagemaker, or vertex. This container spec includes:
62
+ - container image uri
63
+ - environment variables for configuring wandb etc.
64
+ - entrypoint command and arguments
65
+ - additional arguments specific to the target resource (e.g. instance type, node selector)
66
+
67
+ This class is stateful and certain methods can only be called after
68
+ `LaunchProject.fetch_and_validate_project()` has been called.
69
+
70
+ Notes on the entrypoint:
71
+ - The entrypoint is the command that will be run inside the container.
72
+ - The LaunchProject stores two entrypoints
73
+ - The job entrypoint is the entrypoint specified in the job's config.
74
+ - The override entrypoint is the entrypoint specified in the launch spec.
75
+ - The override entrypoint takes precedence over the job entrypoint.
76
+ """
77
+
78
+ # This init is way to long, and there are too many attributes on this sucker.
79
+ def __init__(
80
+ self,
81
+ uri: Optional[str],
82
+ job: Optional[str],
83
+ api: Api,
84
+ launch_spec: Dict[str, Any],
85
+ target_entity: str,
86
+ target_project: str,
87
+ name: Optional[str],
88
+ docker_config: Dict[str, Any],
89
+ git_info: Dict[str, str],
90
+ overrides: Dict[str, Any],
91
+ resource: str,
92
+ resource_args: Dict[str, Any],
93
+ run_id: Optional[str],
94
+ sweep_id: Optional[str] = None,
95
+ ):
96
+ self.uri = uri
97
+ self.job = job
98
+ if job is not None:
99
+ wandb.termlog(f"{LOG_PREFIX}Launching job: {job}")
100
+ self._job_artifact: Optional[Artifact] = None
101
+ self.api = api
102
+ self.launch_spec = launch_spec
103
+ self.target_entity = target_entity
104
+ self.target_project = target_project.lower()
105
+ self.name = name # TODO: replace with run_id
106
+ # the builder key can be passed in through the resource args
107
+ # but these resource_args are then passed to the appropriate
108
+ # runner, so we need to pop the builder key out
109
+ resource_args_copy = deepcopy(resource_args)
110
+ resource_args_build = resource_args_copy.get(resource, {}).pop("builder", {})
111
+ self.resource = resource
112
+ self.resource_args = resource_args_copy
113
+ self.sweep_id = sweep_id
114
+ self.author = launch_spec.get("author")
115
+ self.python_version: Optional[str] = launch_spec.get("python_version")
116
+ self._job_dockerfile: Optional[str] = None
117
+ self._job_build_context: Optional[str] = None
118
+ self._job_base_image: Optional[str] = None
119
+ self.accelerator_base_image: Optional[str] = resource_args_build.get(
120
+ "accelerator", {}
121
+ ).get("base_image") or resource_args_build.get("cuda", {}).get("base_image")
122
+ self.docker_image: Optional[str] = docker_config.get(
123
+ "docker_image"
124
+ ) or launch_spec.get("image_uri")
125
+ self.docker_user_id = docker_config.get("user_id", 1000)
126
+ self._entry_point: Optional[EntryPoint] = (
127
+ None # todo: keep multiple entrypoint support?
128
+ )
129
+ self.init_overrides(overrides)
130
+ self.init_source()
131
+ self.init_git(git_info)
132
+ self.deps_type: Optional[str] = None
133
+ self._runtime: Optional[str] = None
134
+ self.run_id = run_id or generate_id()
135
+ self._queue_name: Optional[str] = None
136
+ self._queue_entity: Optional[str] = None
137
+ self._run_queue_item_id: Optional[str] = None
138
+
139
+ def init_source(self) -> None:
140
+ if self.docker_image is not None:
141
+ self.source = LaunchSource.DOCKER
142
+ self.project_dir = None
143
+ elif self.job is not None:
144
+ self.source = LaunchSource.JOB
145
+ self.project_dir = tempfile.mkdtemp()
146
+ elif self.uri and self.uri.startswith("placeholder"):
147
+ self.source = LaunchSource.SCHEDULER
148
+ self.project_dir = os.getcwd()
149
+ self._entry_point = self.override_entrypoint
150
+
151
+ def change_project_dir(self, new_dir: str) -> None:
152
+ """Change the project directory to a new directory."""
153
+ # Copy the contents of the old project dir to the new project dir.
154
+ old_dir = self.project_dir
155
+ if old_dir is not None:
156
+ shutil.copytree(
157
+ old_dir,
158
+ new_dir,
159
+ symlinks=True,
160
+ dirs_exist_ok=True,
161
+ ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc", ".git"),
162
+ )
163
+ shutil.rmtree(old_dir)
164
+ self.project_dir = new_dir
165
+
166
+ def init_git(self, git_info: Dict[str, str]) -> None:
167
+ self.git_version = git_info.get("version")
168
+ self.git_repo = git_info.get("repo")
169
+
170
+ def init_overrides(self, overrides: Dict[str, Any]) -> None:
171
+ """Initialize override attributes for a launch project."""
172
+ self.overrides = overrides
173
+ self.override_args: List[str] = overrides.get("args", [])
174
+ self.override_config: Dict[str, Any] = overrides.get("run_config", {})
175
+ self.override_artifacts: Dict[str, Any] = overrides.get("artifacts", {})
176
+ self.override_files: Dict[str, Any] = overrides.get("files", {})
177
+ self.override_entrypoint: Optional[EntryPoint] = None
178
+ self.override_dockerfile: Optional[str] = overrides.get("dockerfile")
179
+ override_entrypoint = overrides.get("entry_point")
180
+ if override_entrypoint:
181
+ _logger.info("Adding override entry point")
182
+ self.override_entrypoint = EntryPoint(
183
+ name=get_entrypoint_file(override_entrypoint),
184
+ command=override_entrypoint,
185
+ )
186
+
187
+ def __repr__(self) -> str:
188
+ """String representation of LaunchProject."""
189
+ if self.source == LaunchSource.JOB:
190
+ return f"{self.job}"
191
+ return f"{self.uri}"
192
+
193
+ @classmethod
194
+ def from_spec(cls, launch_spec: Dict[str, Any], api: Api) -> "LaunchProject":
195
+ """Constructs a LaunchProject instance using a launch spec.
196
+
197
+ Arguments:
198
+ launch_spec: Dictionary representation of launch spec
199
+ api: Instance of wandb.apis.internal Api
200
+
201
+ Returns:
202
+ An initialized `LaunchProject` object
203
+ """
204
+ name: Optional[str] = None
205
+ if launch_spec.get("name"):
206
+ name = launch_spec["name"]
207
+ return LaunchProject(
208
+ launch_spec.get("uri"),
209
+ launch_spec.get("job"),
210
+ api,
211
+ launch_spec,
212
+ launch_spec["entity"],
213
+ launch_spec["project"],
214
+ name,
215
+ launch_spec.get("docker", {}),
216
+ launch_spec.get("git", {}),
217
+ launch_spec.get("overrides", {}),
218
+ launch_spec.get("resource", None),
219
+ launch_spec.get("resource_args", {}),
220
+ launch_spec.get("run_id", None),
221
+ launch_spec.get("sweep_id", {}),
222
+ )
223
+
224
+ @property
225
+ def job_dockerfile(self) -> Optional[str]:
226
+ return self._job_dockerfile
227
+
228
+ @property
229
+ def job_build_context(self) -> Optional[str]:
230
+ return self._job_build_context
231
+
232
+ @property
233
+ def job_base_image(self) -> Optional[str]:
234
+ return self._job_base_image
235
+
236
+ def set_job_dockerfile(self, dockerfile: str) -> None:
237
+ self._job_dockerfile = dockerfile
238
+
239
+ def set_job_build_context(self, build_context: str) -> None:
240
+ self._job_build_context = build_context
241
+
242
+ def set_job_base_image(self, base_image: str) -> None:
243
+ self._job_base_image = base_image
244
+
245
+ @property
246
+ def image_name(self) -> str:
247
+ if self.job_base_image is not None:
248
+ return self.job_base_image
249
+ if self.docker_image is not None:
250
+ return self.docker_image
251
+ elif self.uri is not None:
252
+ cleaned_uri = self.uri.replace("https://", "/")
253
+ first_sep = cleaned_uri.find("/")
254
+ shortened_uri = cleaned_uri[first_sep:]
255
+ return wandb.util.make_docker_image_name_safe(shortened_uri)
256
+ else:
257
+ # this will always pass since one of these 3 is required
258
+ assert self.job is not None
259
+ return wandb.util.make_docker_image_name_safe(self.job.split(":")[0])
260
+
261
+ @property
262
+ def queue_name(self) -> Optional[str]:
263
+ return self._queue_name
264
+
265
+ @queue_name.setter
266
+ def queue_name(self, value: str) -> None:
267
+ self._queue_name = value
268
+
269
+ @property
270
+ def queue_entity(self) -> Optional[str]:
271
+ return self._queue_entity
272
+
273
+ @queue_entity.setter
274
+ def queue_entity(self, value: str) -> None:
275
+ self._queue_entity = value
276
+
277
+ @property
278
+ def run_queue_item_id(self) -> Optional[str]:
279
+ return self._run_queue_item_id
280
+
281
+ @run_queue_item_id.setter
282
+ def run_queue_item_id(self, value: str) -> None:
283
+ self._run_queue_item_id = value
284
+
285
+ def fill_macros(self, image: str) -> Dict[str, Any]:
286
+ """Substitute values for macros in resource arguments.
287
+
288
+ Certain macros can be used in resource args. These macros allow the
289
+ user to set resource args dynamically in the context of the
290
+ run being launched. The macros are given in the ${macro} format. The
291
+ following macros are currently supported:
292
+
293
+ ${project_name} - the name of the project the run is being launched to.
294
+ ${entity_name} - the owner of the project the run being launched to.
295
+ ${run_id} - the id of the run being launched.
296
+ ${run_name} - the name of the run that is launching.
297
+ ${image_uri} - the URI of the container image for this run.
298
+
299
+ Additionally, you may use ${<ENV-VAR-NAME>} to refer to the value of any
300
+ environment variables that you plan to set in the environment of any
301
+ agents that will receive these resource args.
302
+
303
+ Calling this method will overwrite the contents of self.resource_args
304
+ with the substituted values.
305
+
306
+ Args:
307
+ image (str): The image name to fill in for ${wandb-image}.
308
+
309
+ Returns:
310
+ Dict[str, Any]: The resource args with all macros filled in.
311
+ """
312
+ update_dict = {
313
+ "project_name": self.target_project,
314
+ "entity_name": self.target_entity,
315
+ "run_id": self.run_id,
316
+ "run_name": self.name,
317
+ "image_uri": image,
318
+ "author": self.author,
319
+ }
320
+ update_dict.update(os.environ)
321
+ result = recursive_macro_sub(self.resource_args, update_dict)
322
+ # recursive_macro_sub given a dict returns a dict with the same keys
323
+ # but with other input types behaves differently. The cast is for mypy.
324
+ return cast(Dict[str, Any], result)
325
+
326
+ def build_required(self) -> bool:
327
+ """Checks the source to see if a build is required."""
328
+ if self.job_base_image is not None:
329
+ return False
330
+ if self.source != LaunchSource.JOB:
331
+ return True
332
+ return False
333
+
334
+ @property
335
+ def docker_image(self) -> Optional[str]:
336
+ """Returns the Docker image associated with this LaunchProject.
337
+
338
+ This will only be set if an image_uri is being run outside a job.
339
+
340
+ Returns:
341
+ Optional[str]: The Docker image or None if not specified.
342
+ """
343
+ if self._docker_image:
344
+ return self._docker_image
345
+ return None
346
+
347
+ @docker_image.setter
348
+ def docker_image(self, value: str) -> None:
349
+ """Sets the Docker image for the project.
350
+
351
+ Args:
352
+ value (str): The Docker image to set.
353
+
354
+ Returns:
355
+ None
356
+ """
357
+ self._docker_image = value
358
+ self._ensure_not_docker_image_and_local_process()
359
+
360
+ def get_job_entry_point(self) -> Optional["EntryPoint"]:
361
+ """Returns the job entrypoint for the project."""
362
+ # assuming project only has 1 entry point, pull that out
363
+ # tmp fn until we figure out if we want to support multiple entry points or not
364
+ if not self._entry_point:
365
+ if not self.docker_image and not self.job_base_image:
366
+ raise LaunchError(
367
+ "Project must have at least one entry point unless docker image is specified."
368
+ )
369
+ return None
370
+ return self._entry_point
371
+
372
+ def set_job_entry_point(self, command: List[str]) -> "EntryPoint":
373
+ """Set job entrypoint for the project."""
374
+ assert (
375
+ self._entry_point is None
376
+ ), "Cannot set entry point twice. Use LaunchProject.override_entrypoint"
377
+ new_entrypoint = EntryPoint(name=command[-1], command=command)
378
+ self._entry_point = new_entrypoint
379
+ return new_entrypoint
380
+
381
+ def fetch_and_validate_project(self) -> None:
382
+ """Fetches a project into a local directory, adds the config values to the directory, and validates the first entrypoint for the project.
383
+
384
+ Arguments:
385
+ launch_project: LaunchProject to fetch and validate.
386
+ api: Instance of wandb.apis.internal Api
387
+
388
+ Returns:
389
+ A validated `LaunchProject` object.
390
+
391
+ """
392
+ if self.source == LaunchSource.DOCKER:
393
+ return
394
+ elif self.source == LaunchSource.JOB:
395
+ self._fetch_job()
396
+ assert self.project_dir is not None
397
+
398
+ # Let's make sure we document this very clearly.
399
+ def get_image_source_string(self) -> str:
400
+ """Returns a unique string identifying the source of an image."""
401
+ if self.source == LaunchSource.JOB:
402
+ assert self._job_artifact is not None
403
+ return f"{self._job_artifact.name}:v{self._job_artifact.version}"
404
+ elif self.source == LaunchSource.DOCKER:
405
+ assert isinstance(self.docker_image, str)
406
+ return self.docker_image
407
+ else:
408
+ raise LaunchError(
409
+ "Unknown source type when determining image source string"
410
+ )
411
+
412
+ def _ensure_not_docker_image_and_local_process(self) -> None:
413
+ """Ensure that docker image is not specified with local-process resource runner.
414
+
415
+ Raises:
416
+ LaunchError: If docker image is specified with local-process resource runner.
417
+ """
418
+ if self.docker_image is not None and self.resource == "local-process":
419
+ raise LaunchError(
420
+ "Cannot specify docker image with local-process resource runner"
421
+ )
422
+
423
+ def _fetch_job(self) -> None:
424
+ """Fetches the job details from the public API and configures the launch project.
425
+
426
+ Raises:
427
+ LaunchError: If there is an error accessing the job.
428
+ """
429
+ public_api = wandb.apis.public.Api()
430
+ job_dir = tempfile.mkdtemp()
431
+ try:
432
+ job = public_api.job(self.job, path=job_dir)
433
+ except CommError as e:
434
+ msg = e.message
435
+ raise LaunchError(
436
+ f"Error accessing job {self.job}: {msg} on {public_api.settings.get('base_url')}"
437
+ )
438
+ job.configure_launch_project(self) # Why is this a method of the job?
439
+ self._job_artifact = job._job_artifact
440
+
441
+ def get_env_vars_dict(self, api: Api, max_env_length: int) -> Dict[str, str]:
442
+ """Generate environment variables for the project.
443
+
444
+ Arguments:
445
+ launch_project: LaunchProject to generate environment variables for.
446
+
447
+ Returns:
448
+ Dictionary of environment variables.
449
+ """
450
+ env_vars = {}
451
+ env_vars["WANDB_BASE_URL"] = api.settings("base_url")
452
+ override_api_key = self.launch_spec.get("_wandb_api_key")
453
+ env_vars["WANDB_API_KEY"] = override_api_key or api.api_key
454
+ if self.target_project:
455
+ env_vars["WANDB_PROJECT"] = self.target_project
456
+ env_vars["WANDB_ENTITY"] = self.target_entity
457
+ env_vars["WANDB_LAUNCH"] = "True"
458
+ env_vars["WANDB_RUN_ID"] = self.run_id
459
+ if self.docker_image:
460
+ env_vars["WANDB_DOCKER"] = self.docker_image
461
+ if self.name is not None:
462
+ env_vars["WANDB_NAME"] = self.name
463
+ if "author" in self.launch_spec and not override_api_key:
464
+ env_vars["WANDB_USERNAME"] = self.launch_spec["author"]
465
+ if self.sweep_id:
466
+ env_vars["WANDB_SWEEP_ID"] = self.sweep_id
467
+ if self.launch_spec.get("_resume_count", 0) > 0:
468
+ env_vars["WANDB_RESUME"] = "allow"
469
+ if self.queue_name:
470
+ env_vars[wandb.env.LAUNCH_QUEUE_NAME] = self.queue_name
471
+ if self.queue_entity:
472
+ env_vars[wandb.env.LAUNCH_QUEUE_ENTITY] = self.queue_entity
473
+ if self.run_queue_item_id:
474
+ env_vars[wandb.env.LAUNCH_TRACE_ID] = self.run_queue_item_id
475
+
476
+ _inject_wandb_config_env_vars(self.override_config, env_vars, max_env_length)
477
+ _inject_file_overrides_env_vars(self.override_files, env_vars, max_env_length)
478
+
479
+ artifacts = {}
480
+ # if we're spinning up a launch process from a job
481
+ # we should tell the run to use that artifact
482
+ if self.job:
483
+ artifacts = {wandb.util.LAUNCH_JOB_ARTIFACT_SLOT_NAME: self.job}
484
+ env_vars["WANDB_ARTIFACTS"] = json.dumps(
485
+ {**artifacts, **self.override_artifacts}
486
+ )
487
+ return env_vars
488
+
489
+ def parse_existing_requirements(self) -> str:
490
+ import pkg_resources
491
+
492
+ requirements_line = ""
493
+ assert self.project_dir is not None
494
+ base_requirements = os.path.join(self.project_dir, "requirements.txt")
495
+ if os.path.exists(base_requirements):
496
+ include_only = set()
497
+ with open(base_requirements) as f:
498
+ iter = pkg_resources.parse_requirements(f)
499
+ while True:
500
+ try:
501
+ pkg = next(iter)
502
+ if hasattr(pkg, "name"):
503
+ name = pkg.name.lower()
504
+ else:
505
+ name = str(pkg)
506
+ include_only.add(shlex_quote(name))
507
+ except StopIteration:
508
+ break
509
+ # Different versions of pkg_resources throw different errors
510
+ # just catch them all and ignore packages we can't parse
511
+ except Exception as e:
512
+ _logger.warn(f"Unable to parse requirements.txt: {e}")
513
+ continue
514
+ requirements_line += "WANDB_ONLY_INCLUDE={} ".format(",".join(include_only))
515
+ if "wandb" not in requirements_line:
516
+ wandb.termwarn(f"{LOG_PREFIX}wandb is not present in requirements.txt.")
517
+ return requirements_line
518
+
519
+
520
+ class EntryPoint:
521
+ """An entry point into a wandb launch specification."""
522
+
523
+ def __init__(self, name: Optional[str], command: List[str]):
524
+ self.name = name
525
+ self.command = command
526
+
527
+ def update_entrypoint_path(self, new_path: str) -> None:
528
+ """Updates the entrypoint path to a new path."""
529
+ if len(self.command) == 2 and (
530
+ self.command[0].startswith("python") or self.command[0] == "bash"
531
+ ):
532
+ self.command[1] = new_path
533
+
534
+
535
+ def _inject_wandb_config_env_vars(
536
+ config: Dict[str, Any], env_dict: Dict[str, Any], maximum_env_length: int
537
+ ) -> None:
538
+ str_config = json.dumps(config)
539
+ if len(str_config) <= maximum_env_length:
540
+ env_dict["WANDB_CONFIG"] = str_config
541
+ return
542
+
543
+ chunks = [
544
+ str_config[i : i + maximum_env_length]
545
+ for i in range(0, len(str_config), maximum_env_length)
546
+ ]
547
+ config_chunks_dict = {f"WANDB_CONFIG_{i}": chunk for i, chunk in enumerate(chunks)}
548
+ env_dict.update(config_chunks_dict)
549
+
550
+
551
+ def _inject_file_overrides_env_vars(
552
+ overrides: Dict[str, Any], env_dict: Dict[str, Any], maximum_env_length: int
553
+ ) -> None:
554
+ str_overrides = json.dumps(overrides)
555
+ if len(str_overrides) <= maximum_env_length:
556
+ env_dict["WANDB_LAUNCH_FILE_OVERRIDES"] = str_overrides
557
+ return
558
+
559
+ chunks = [
560
+ str_overrides[i : i + maximum_env_length]
561
+ for i in range(0, len(str_overrides), maximum_env_length)
562
+ ]
563
+ overrides_chunks_dict = {
564
+ f"WANDB_LAUNCH_FILE_OVERRIDES_{i}": chunk for i, chunk in enumerate(chunks)
565
+ }
566
+ env_dict.update(overrides_chunks_dict)
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .agent import LaunchAgent
2
+
3
+ LaunchAgent = LaunchAgent
4
+
5
+ __all__ = ["LaunchAgent"]
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (243 Bytes). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/agent.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/config.cpython-310.pyc ADDED
Binary file (9.26 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/job_status_tracker.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/__pycache__/run_queue_item_file_saver.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/agent.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of launch agent."""
2
+
3
+ import asyncio
4
+ import logging
5
+ import os
6
+ import pprint
7
+ import threading
8
+ import time
9
+ import traceback
10
+ from dataclasses import dataclass
11
+ from multiprocessing import Event
12
+ from typing import Any, Dict, List, Optional, Tuple, Union
13
+
14
+ import yaml
15
+
16
+ import wandb
17
+ from wandb.apis.internal import Api
18
+ from wandb.errors import CommError
19
+ from wandb.sdk.launch._launch_add import launch_add
20
+ from wandb.sdk.launch.runner.local_container import LocalSubmittedRun
21
+ from wandb.sdk.launch.runner.local_process import LocalProcessRunner
22
+ from wandb.sdk.launch.sweeps.scheduler import Scheduler
23
+ from wandb.sdk.launch.utils import LAUNCH_CONFIG_FILE, resolve_build_and_registry_config
24
+ from wandb.sdk.lib import runid
25
+
26
+ from .. import loader
27
+ from .._project_spec import LaunchProject
28
+ from ..errors import LaunchDockerError, LaunchError
29
+ from ..utils import (
30
+ LAUNCH_DEFAULT_PROJECT,
31
+ LOG_PREFIX,
32
+ PROJECT_SYNCHRONOUS,
33
+ event_loop_thread_exec,
34
+ )
35
+ from .job_status_tracker import JobAndRunStatusTracker
36
+ from .run_queue_item_file_saver import RunQueueItemFileSaver
37
+
38
+ AGENT_POLLING_INTERVAL = 10
39
+ RECEIVED_JOB_POLLING_INTERVAL = 0.0 # more frequent when we know we have jobs
40
+
41
+ AGENT_POLLING = "POLLING"
42
+ AGENT_RUNNING = "RUNNING"
43
+ AGENT_KILLED = "KILLED"
44
+
45
+ HIDDEN_AGENT_RUN_TYPE = "sweep-controller"
46
+
47
+ MAX_RESUME_COUNT = 5
48
+
49
+ RUN_INFO_GRACE_PERIOD = 60
50
+
51
+ DEFAULT_STOPPED_RUN_TIMEOUT = 60
52
+
53
+ DEFAULT_PRINT_INTERVAL = 5 * 60
54
+ VERBOSE_PRINT_INTERVAL = 20
55
+
56
+ _env_timeout = os.environ.get("WANDB_LAUNCH_START_TIMEOUT")
57
+ if _env_timeout:
58
+ try:
59
+ RUN_START_TIMEOUT = float(_env_timeout)
60
+ except ValueError:
61
+ raise LaunchError(
62
+ f"Invalid value for WANDB_LAUNCH_START_TIMEOUT: {_env_timeout}"
63
+ )
64
+ else:
65
+ RUN_START_TIMEOUT = 60 * 30 # default 30 minutes
66
+
67
+ _logger = logging.getLogger(__name__)
68
+
69
+
70
+ @dataclass
71
+ class JobSpecAndQueue:
72
+ job: Dict[str, Any]
73
+ queue: str
74
+
75
+
76
+ def _convert_access(access: str) -> str:
77
+ """Convert access string to a value accepted by wandb."""
78
+ access = access.upper()
79
+ assert (
80
+ access == "PROJECT" or access == "USER"
81
+ ), "Queue access must be either project or user"
82
+ return access
83
+
84
+
85
+ def _max_from_config(
86
+ config: Dict[str, Any], key: str, default: int = 1
87
+ ) -> Union[int, float]:
88
+ """Get an integer from the config, or float.inf if -1.
89
+
90
+ Utility for parsing integers from the agent config with a default, infinity
91
+ handling, and integer parsing. Raises more informative error if parse error.
92
+ """
93
+ try:
94
+ val = config.get(key)
95
+ if val is None:
96
+ val = default
97
+ max_from_config = int(val)
98
+ except ValueError as e:
99
+ raise LaunchError(
100
+ f"Error when parsing LaunchAgent config key: ['{key}': "
101
+ f"{config.get(key)}]. Error: {str(e)}"
102
+ )
103
+ if max_from_config == -1:
104
+ return float("inf")
105
+
106
+ if max_from_config < 0:
107
+ raise LaunchError(
108
+ f"Error when parsing LaunchAgent config key: ['{key}': "
109
+ f"{config.get(key)}]. Error: negative value."
110
+ )
111
+ return max_from_config
112
+
113
+
114
+ class InternalAgentLogger:
115
+ def __init__(self, verbosity=0):
116
+ self._print_to_terminal = verbosity >= 2
117
+
118
+ def error(self, message: str):
119
+ if self._print_to_terminal:
120
+ wandb.termerror(f"{LOG_PREFIX}{message}")
121
+ _logger.error(f"{LOG_PREFIX}{message}")
122
+
123
+ def warn(self, message: str):
124
+ if self._print_to_terminal:
125
+ wandb.termwarn(f"{LOG_PREFIX}{message}")
126
+ _logger.warn(f"{LOG_PREFIX}{message}")
127
+
128
+ def info(self, message: str):
129
+ if self._print_to_terminal:
130
+ wandb.termlog(f"{LOG_PREFIX}{message}")
131
+ _logger.info(f"{LOG_PREFIX}{message}")
132
+
133
+ def debug(self, message: str):
134
+ if self._print_to_terminal:
135
+ wandb.termlog(f"{LOG_PREFIX}{message}")
136
+ _logger.debug(f"{LOG_PREFIX}{message}")
137
+
138
+
139
+ def construct_agent_configs(
140
+ launch_config: Optional[Dict] = None,
141
+ build_config: Optional[Dict] = None,
142
+ ) -> Tuple[Optional[Dict[str, Any]], Dict[str, Any], Dict[str, Any]]:
143
+ registry_config = None
144
+ environment_config = None
145
+ if launch_config is not None:
146
+ build_config = launch_config.get("builder")
147
+ registry_config = launch_config.get("registry")
148
+
149
+ default_launch_config = None
150
+ if os.path.exists(os.path.expanduser(LAUNCH_CONFIG_FILE)):
151
+ with open(os.path.expanduser(LAUNCH_CONFIG_FILE)) as f:
152
+ default_launch_config = (
153
+ yaml.safe_load(f) or {}
154
+ ) # In case the config is empty, we want it to be {} instead of None.
155
+ environment_config = default_launch_config.get("environment")
156
+
157
+ build_config, registry_config = resolve_build_and_registry_config(
158
+ default_launch_config, build_config, registry_config
159
+ )
160
+
161
+ return environment_config, build_config, registry_config
162
+
163
+
164
+ class LaunchAgent:
165
+ """Launch agent class which polls run given run queues and launches runs for wandb launch."""
166
+
167
+ _instance = None
168
+
169
+ def __new__(cls, *args: Any, **kwargs: Any) -> "LaunchAgent":
170
+ """Create a new instance of the LaunchAgent.
171
+
172
+ This method ensures that only one instance of the LaunchAgent is created.
173
+ This is done so that information about the agent can be accessed from
174
+ elsewhere in the library.
175
+ """
176
+ if cls._instance is None:
177
+ cls._instance = super().__new__(cls)
178
+ return cls._instance
179
+
180
+ @classmethod
181
+ def name(cls) -> str:
182
+ """Return the name of the agent."""
183
+ if cls._instance is None:
184
+ raise LaunchError("LaunchAgent has not been initialized")
185
+ name = cls._instance._name
186
+ if isinstance(name, str):
187
+ return name
188
+ raise LaunchError(f"Found invalid name for agent {name}")
189
+
190
+ @classmethod
191
+ def initialized(cls) -> bool:
192
+ """Return whether the agent is initialized."""
193
+ return cls._instance is not None
194
+
195
+ def __init__(self, api: Api, config: Dict[str, Any]):
196
+ """Initialize a launch agent.
197
+
198
+ Arguments:
199
+ api: Api object to use for making requests to the backend.
200
+ config: Config dictionary for the agent.
201
+ """
202
+ self._entity = config["entity"]
203
+ self._project = LAUNCH_DEFAULT_PROJECT
204
+ self._api = api
205
+ self._base_url = self._api.settings().get("base_url")
206
+ self._ticks = 0
207
+ self._jobs: Dict[int, JobAndRunStatusTracker] = {}
208
+ self._jobs_lock = threading.Lock()
209
+ self._jobs_event = Event()
210
+ self._jobs_event.set()
211
+ self._cwd = os.getcwd()
212
+ self._namespace = runid.generate_id()
213
+ self._access = _convert_access("project")
214
+ self._max_jobs = _max_from_config(config, "max_jobs")
215
+ self._max_schedulers = _max_from_config(config, "max_schedulers")
216
+ self._secure_mode = config.get("secure_mode", False)
217
+ self._verbosity = config.get("verbosity", 0)
218
+ self._internal_logger = InternalAgentLogger(verbosity=self._verbosity)
219
+ self._last_status_print_time = 0.0
220
+ self.default_config: Dict[str, Any] = config
221
+ self._stopped_run_timeout = config.get(
222
+ "stopped_run_timeout", DEFAULT_STOPPED_RUN_TIMEOUT
223
+ )
224
+ self._known_warnings: List[str] = []
225
+
226
+ # Get agent version from env var if present, otherwise wandb version
227
+ self.version: str = "wandb@" + wandb.__version__
228
+ env_agent_version = os.environ.get("WANDB_AGENT_VERSION")
229
+ if env_agent_version and env_agent_version != "wandb-launch-agent":
230
+ self.version = env_agent_version
231
+
232
+ # serverside creation
233
+ self.gorilla_supports_agents = (
234
+ self._api.launch_agent_introspection() is not None
235
+ )
236
+ self._gorilla_supports_fail_run_queue_items = (
237
+ self._api.fail_run_queue_item_introspection()
238
+ )
239
+
240
+ self._queues: List[str] = config.get("queues", ["default"])
241
+
242
+ # remove project field from agent config before sending to back end
243
+ # because otherwise it shows up in the config in the UI and confuses users
244
+ sent_config = config.copy()
245
+ if "project" in sent_config:
246
+ del sent_config["project"]
247
+
248
+ create_response = self._api.create_launch_agent(
249
+ self._entity,
250
+ self._project,
251
+ self._queues,
252
+ sent_config,
253
+ self.version,
254
+ self.gorilla_supports_agents,
255
+ )
256
+ self._id = create_response["launchAgentId"]
257
+ if self._api.entity_is_team(self._entity):
258
+ wandb.termwarn(
259
+ f"{LOG_PREFIX}Agent is running on team entity ({self._entity}). Members of this team will be able to run code on this device."
260
+ )
261
+
262
+ agent_response = self._api.get_launch_agent(
263
+ self._id, self.gorilla_supports_agents
264
+ )
265
+ self._name = agent_response["name"]
266
+ self._init_agent_run()
267
+
268
+ def _is_scheduler_job(self, run_spec: Dict[str, Any]) -> bool:
269
+ """Determine whether a job/runSpec is a sweep scheduler."""
270
+ if not run_spec:
271
+ self._internal_logger.debug(
272
+ "Received runSpec in _is_scheduler_job that was empty"
273
+ )
274
+
275
+ if run_spec.get("uri") != Scheduler.PLACEHOLDER_URI:
276
+ return False
277
+
278
+ if run_spec.get("resource") == "local-process":
279
+ # Any job pushed to a run queue that has a scheduler uri is
280
+ # allowed to use local-process
281
+ if run_spec.get("job"):
282
+ return True
283
+
284
+ # If a scheduler is local-process and run through CLI, also
285
+ # confirm command is in format: [wandb scheduler <sweep>]
286
+ cmd = run_spec.get("overrides", {}).get("entry_point", [])
287
+ if len(cmd) < 3:
288
+ return False
289
+
290
+ if cmd[:2] != ["wandb", "scheduler"]:
291
+ return False
292
+
293
+ return True
294
+
295
+ async def fail_run_queue_item(
296
+ self,
297
+ run_queue_item_id: str,
298
+ message: str,
299
+ phase: str,
300
+ files: Optional[List[str]] = None,
301
+ ) -> None:
302
+ if self._gorilla_supports_fail_run_queue_items:
303
+ fail_rqi = event_loop_thread_exec(self._api.fail_run_queue_item)
304
+ await fail_rqi(run_queue_item_id, message, phase, files)
305
+
306
+ def _init_agent_run(self) -> None:
307
+ # TODO: has it been long enough that all backends support agents?
308
+ self._wandb_run = None
309
+
310
+ if self.gorilla_supports_agents:
311
+ settings = wandb.Settings(
312
+ silent=True, disable_git=True, disable_job_creation=True
313
+ )
314
+ self._wandb_run = wandb.init(
315
+ project=self._project,
316
+ entity=self._entity,
317
+ settings=settings,
318
+ id=self._name,
319
+ job_type=HIDDEN_AGENT_RUN_TYPE,
320
+ )
321
+
322
+ @property
323
+ def thread_ids(self) -> List[int]:
324
+ """Returns a list of keys running thread ids for the agent."""
325
+ with self._jobs_lock:
326
+ return list(self._jobs.keys())
327
+
328
+ @property
329
+ def num_running_schedulers(self) -> int:
330
+ """Return just the number of schedulers."""
331
+ with self._jobs_lock:
332
+ return len([x for x in self._jobs if self._jobs[x].is_scheduler])
333
+
334
+ @property
335
+ def num_running_jobs(self) -> int:
336
+ """Return the number of jobs not including schedulers."""
337
+ with self._jobs_lock:
338
+ return len([x for x in self._jobs if not self._jobs[x].is_scheduler])
339
+
340
+ async def pop_from_queue(self, queue: str) -> Any:
341
+ """Pops an item off the runqueue to run as a job.
342
+
343
+ Arguments:
344
+ queue: Queue to pop from.
345
+
346
+ Returns:
347
+ Item popped off the queue.
348
+
349
+ Raises:
350
+ Exception: if there is an error popping from the queue.
351
+ """
352
+ try:
353
+ pop = event_loop_thread_exec(self._api.pop_from_run_queue)
354
+ ups = await pop(
355
+ queue,
356
+ entity=self._entity,
357
+ project=self._project,
358
+ agent_id=self._id,
359
+ )
360
+ return ups
361
+ except Exception as e:
362
+ print("Exception:", e)
363
+ return None
364
+
365
+ def print_status(self) -> None:
366
+ """Prints the current status of the agent."""
367
+ self._last_status_print_time = time.time()
368
+ output_str = "agent "
369
+ if self._name:
370
+ output_str += f"{self._name} "
371
+ if self.num_running_jobs < self._max_jobs:
372
+ output_str += f"polling on queues {','.join(self._queues)}, "
373
+ output_str += (
374
+ f"running {self.num_running_jobs} out of a maximum of {self._max_jobs} jobs"
375
+ )
376
+
377
+ wandb.termlog(f"{LOG_PREFIX}{output_str}")
378
+ if self.num_running_jobs > 0:
379
+ output_str += f": {','.join(str(job_id) for job_id in self.thread_ids)}"
380
+
381
+ _logger.info(output_str)
382
+
383
+ async def update_status(self, status: str) -> None:
384
+ """Update the status of the agent.
385
+
386
+ Arguments:
387
+ status: Status to update the agent to.
388
+ """
389
+ _update_status = event_loop_thread_exec(self._api.update_launch_agent_status)
390
+ update_ret = await _update_status(
391
+ self._id, status, self.gorilla_supports_agents
392
+ )
393
+ if not update_ret["success"]:
394
+ wandb.termerror(f"{LOG_PREFIX}Failed to update agent status to {status}")
395
+
396
+ def _check_run_exists_and_inited(
397
+ self, entity: str, project: str, run_id: str, rqi_id: str
398
+ ) -> bool:
399
+ """Checks the stateof the run to ensure it has been inited. Note this will not behave well with resuming."""
400
+ # Checks the _wandb key in the run config for the run queue item id. If it exists, the
401
+ # submitted run definitely called init. Falls back to checking state of run.
402
+ # TODO: handle resuming runs
403
+
404
+ # Sweep runs exist but are in pending state, normal launch runs won't exist
405
+ # so will raise a CommError.
406
+ try:
407
+ run_state = self._api.get_run_state(entity, project, run_id)
408
+ if run_state.lower() != "pending":
409
+ return True
410
+ except CommError:
411
+ self._internal_logger.info(
412
+ f"Run {entity}/{project}/{run_id} with rqi id: {rqi_id} did not have associated run",
413
+ )
414
+ return False
415
+
416
+ async def finish_thread_id(
417
+ self,
418
+ thread_id: int,
419
+ exception: Optional[Union[Exception, LaunchDockerError]] = None,
420
+ ) -> None:
421
+ """Removes the job from our list for now."""
422
+ with self._jobs_lock:
423
+ job_and_run_status = self._jobs[thread_id]
424
+ if (
425
+ job_and_run_status.entity is not None
426
+ and job_and_run_status.entity != self._entity
427
+ ):
428
+ self._internal_logger.info(
429
+ "Skipping check for completed run status because run is on a different entity than agent",
430
+ )
431
+ elif exception is not None:
432
+ tb_str = traceback.format_exception(
433
+ type(exception), value=exception, tb=exception.__traceback__
434
+ )
435
+ fnames = job_and_run_status.saver.save_contents(
436
+ "".join(tb_str), "error.log", "error"
437
+ )
438
+ await self.fail_run_queue_item(
439
+ job_and_run_status.run_queue_item_id,
440
+ str(exception),
441
+ job_and_run_status.err_stage,
442
+ fnames,
443
+ )
444
+ elif job_and_run_status.project is None or job_and_run_status.run_id is None:
445
+ self._internal_logger.info(
446
+ f"called finish_thread_id on thread whose tracker has no project or run id. RunQueueItemID: {job_and_run_status.run_queue_item_id}",
447
+ )
448
+ wandb.termerror(
449
+ "Missing project or run id on thread called finish thread id"
450
+ )
451
+ await self.fail_run_queue_item(
452
+ job_and_run_status.run_queue_item_id,
453
+ "submitted job was finished without assigned project or run id",
454
+ "agent",
455
+ )
456
+ elif job_and_run_status.run is not None:
457
+ called_init = False
458
+ # We do some weird stuff here getting run info to check for a
459
+ # created in run in W&B.
460
+ #
461
+ # We retry for 60 seconds with an exponential backoff in case
462
+ # upsert run is taking a while.
463
+ logs = None
464
+ interval = 1
465
+ while True:
466
+ called_init = self._check_run_exists_and_inited(
467
+ self._entity,
468
+ job_and_run_status.project,
469
+ job_and_run_status.run_id,
470
+ job_and_run_status.run_queue_item_id,
471
+ )
472
+ if called_init or interval > RUN_INFO_GRACE_PERIOD:
473
+ break
474
+ if not called_init:
475
+ # Fetch the logs now if we don't get run info on the
476
+ # first try, in case the logs are cleaned from the runner
477
+ # environment (e.g. k8s) during the run info grace period.
478
+ if interval == 1:
479
+ logs = await job_and_run_status.run.get_logs()
480
+ await asyncio.sleep(interval)
481
+ interval *= 2
482
+ if not called_init:
483
+ fnames = None
484
+ if job_and_run_status.completed_status == "finished":
485
+ _msg = "The submitted job exited successfully but failed to call wandb.init"
486
+ else:
487
+ _msg = "The submitted run was not successfully started"
488
+ if logs:
489
+ fnames = job_and_run_status.saver.save_contents(
490
+ logs, "error.log", "error"
491
+ )
492
+ await self.fail_run_queue_item(
493
+ job_and_run_status.run_queue_item_id, _msg, "run", fnames
494
+ )
495
+ else:
496
+ self._internal_logger.info(
497
+ f"Finish thread id {thread_id} had no exception and no run"
498
+ )
499
+ wandb._sentry.exception(
500
+ "launch agent called finish thread id on thread without run or exception"
501
+ )
502
+
503
+ # TODO: keep logs or something for the finished jobs
504
+ with self._jobs_lock:
505
+ del self._jobs[thread_id]
506
+
507
+ # update status back to polling if no jobs are running
508
+ if len(self.thread_ids) == 0:
509
+ await self.update_status(AGENT_POLLING)
510
+
511
+ async def run_job(
512
+ self, job: Dict[str, Any], queue: str, file_saver: RunQueueItemFileSaver
513
+ ) -> None:
514
+ """Set up project and run the job.
515
+
516
+ Arguments:
517
+ job: Job to run.
518
+ """
519
+ _msg = f"{LOG_PREFIX}Launch agent received job:\n{pprint.pformat(job)}\n"
520
+ wandb.termlog(_msg)
521
+ _logger.info(_msg)
522
+ # update agent status
523
+ await self.update_status(AGENT_RUNNING)
524
+
525
+ # parse job
526
+ self._internal_logger.info("Parsing launch spec")
527
+ launch_spec = job["runSpec"]
528
+
529
+ # Abort if this job attempts to override secure mode
530
+ self._assert_secure(launch_spec)
531
+ job_tracker = JobAndRunStatusTracker(job["runQueueItemId"], queue, file_saver)
532
+
533
+ asyncio.create_task(
534
+ self.task_run_job(
535
+ launch_spec,
536
+ job,
537
+ self.default_config,
538
+ self._api,
539
+ job_tracker,
540
+ )
541
+ )
542
+
543
+ def _assert_secure(self, launch_spec: Dict[str, Any]) -> None:
544
+ """If secure mode is set, make sure no vulnerable keys are overridden."""
545
+ if not self._secure_mode:
546
+ return
547
+ k8s_config = launch_spec.get("resource_args", {}).get("kubernetes", {})
548
+
549
+ pod_secure_keys = ["hostPID", "hostIPC", "hostNetwork", "initContainers"]
550
+ pod_spec = k8s_config.get("spec", {}).get("template", {}).get("spec", {})
551
+ for key in pod_secure_keys:
552
+ if key in pod_spec:
553
+ raise ValueError(
554
+ f'This agent is configured to lock "{key}" in pod spec '
555
+ "but the job specification attempts to override it."
556
+ )
557
+
558
+ container_specs = pod_spec.get("containers", [])
559
+ for container_spec in container_specs:
560
+ if "command" in container_spec:
561
+ raise ValueError(
562
+ 'This agent is configured to lock "command" in container spec '
563
+ "but the job specification attempts to override it."
564
+ )
565
+
566
+ if launch_spec.get("overrides", {}).get("entry_point"):
567
+ raise ValueError(
568
+ 'This agent is configured to lock the "entrypoint" override '
569
+ "but the job specification attempts to override it."
570
+ )
571
+
572
+ async def loop(self) -> None:
573
+ """Loop infinitely to poll for jobs and run them.
574
+
575
+ Raises:
576
+ KeyboardInterrupt: if the agent is requested to stop.
577
+ """
578
+ self.print_status()
579
+ if self._verbosity == 0:
580
+ print_interval = DEFAULT_PRINT_INTERVAL
581
+ else:
582
+ print_interval = VERBOSE_PRINT_INTERVAL
583
+ try:
584
+ while True:
585
+ job = None
586
+ self._ticks += 1
587
+ agent_response = self._api.get_launch_agent(
588
+ self._id, self.gorilla_supports_agents
589
+ )
590
+ if agent_response["stopPolling"]:
591
+ # shutdown process and all jobs if requested from ui
592
+ raise KeyboardInterrupt
593
+ if self.num_running_jobs < self._max_jobs:
594
+ # only check for new jobs if we're not at max
595
+ job_and_queue = await self.get_job_and_queue()
596
+ # these will either both be None, or neither will be None
597
+ if job_and_queue is not None:
598
+ job = job_and_queue.job
599
+ queue = job_and_queue.queue
600
+ try:
601
+ file_saver = RunQueueItemFileSaver(
602
+ self._wandb_run, job["runQueueItemId"]
603
+ )
604
+ if self._is_scheduler_job(job.get("runSpec", {})):
605
+ # If job is a scheduler, and we are already at the cap, ignore,
606
+ # don't ack, and it will be pushed back onto the queue in 1 min
607
+ if self.num_running_schedulers >= self._max_schedulers:
608
+ wandb.termwarn(
609
+ f"{LOG_PREFIX}Agent already running the maximum number "
610
+ f"of sweep schedulers: {self._max_schedulers}. To set "
611
+ "this value use `max_schedulers` key in the agent config"
612
+ )
613
+ continue
614
+ await self.run_job(job, queue, file_saver)
615
+ except Exception as e:
616
+ wandb.termerror(
617
+ f"{LOG_PREFIX}Error running job: {traceback.format_exc()}"
618
+ )
619
+ wandb._sentry.exception(e)
620
+
621
+ # always the first phase, because we only enter phase 2 within the thread
622
+ files = file_saver.save_contents(
623
+ contents=traceback.format_exc(),
624
+ fname="error.log",
625
+ file_sub_type="error",
626
+ )
627
+ await self.fail_run_queue_item(
628
+ run_queue_item_id=job["runQueueItemId"],
629
+ message=str(e),
630
+ phase="agent",
631
+ files=files,
632
+ )
633
+
634
+ if self._ticks % 2 == 0:
635
+ if len(self.thread_ids) == 0:
636
+ await self.update_status(AGENT_POLLING)
637
+ else:
638
+ await self.update_status(AGENT_RUNNING)
639
+ if time.time() - self._last_status_print_time > print_interval:
640
+ self.print_status()
641
+
642
+ if self.num_running_jobs == self._max_jobs or job is None:
643
+ # all threads busy or did not receive job
644
+ await asyncio.sleep(AGENT_POLLING_INTERVAL)
645
+ else:
646
+ await asyncio.sleep(RECEIVED_JOB_POLLING_INTERVAL)
647
+
648
+ except KeyboardInterrupt:
649
+ await self.update_status(AGENT_KILLED)
650
+ wandb.termlog(f"{LOG_PREFIX}Shutting down, active jobs:")
651
+ self.print_status()
652
+ finally:
653
+ self._jobs_event.clear()
654
+
655
+ # Threaded functions
656
+ async def task_run_job(
657
+ self,
658
+ launch_spec: Dict[str, Any],
659
+ job: Dict[str, Any],
660
+ default_config: Dict[str, Any],
661
+ api: Api,
662
+ job_tracker: JobAndRunStatusTracker,
663
+ ) -> None:
664
+ rqi_id = job["runQueueItemId"]
665
+ assert rqi_id
666
+ exception: Optional[Union[LaunchDockerError, Exception]] = None
667
+ try:
668
+ with self._jobs_lock:
669
+ self._jobs[rqi_id] = job_tracker
670
+ await self._task_run_job(
671
+ launch_spec, job, default_config, api, rqi_id, job_tracker
672
+ )
673
+ except LaunchDockerError as e:
674
+ wandb.termerror(
675
+ f"{LOG_PREFIX}agent {self._name} encountered an issue while starting Docker, see above output for details."
676
+ )
677
+ exception = e
678
+ wandb._sentry.exception(e)
679
+ except LaunchError as e:
680
+ wandb.termerror(f"{LOG_PREFIX}Error running job: {e}")
681
+ exception = e
682
+ wandb._sentry.exception(e)
683
+ except Exception as e:
684
+ wandb.termerror(f"{LOG_PREFIX}Error running job: {traceback.format_exc()}")
685
+ exception = e
686
+ wandb._sentry.exception(e)
687
+ finally:
688
+ await self.finish_thread_id(rqi_id, exception)
689
+
690
+ async def _task_run_job(
691
+ self,
692
+ launch_spec: Dict[str, Any],
693
+ job: Dict[str, Any],
694
+ default_config: Dict[str, Any],
695
+ api: Api,
696
+ thread_id: int,
697
+ job_tracker: JobAndRunStatusTracker,
698
+ ) -> None:
699
+ project = LaunchProject.from_spec(launch_spec, api)
700
+ self._set_queue_and_rqi_in_project(project, job, job_tracker.queue)
701
+ ack = event_loop_thread_exec(api.ack_run_queue_item)
702
+ await ack(job["runQueueItemId"], project.run_id)
703
+ # don't launch sweep runs if the sweep isn't healthy
704
+ await self.check_sweep_state(launch_spec, api)
705
+
706
+ job_tracker.update_run_info(project)
707
+ self._internal_logger.info("Fetching and validating project...")
708
+ project.fetch_and_validate_project()
709
+ self._internal_logger.info("Fetching resource...")
710
+ resource = launch_spec.get("resource") or "local-container"
711
+ backend_config: Dict[str, Any] = {
712
+ PROJECT_SYNCHRONOUS: False, # agent always runs async
713
+ }
714
+ self._internal_logger.info("Loading backend")
715
+ override_build_config = launch_spec.get("builder")
716
+
717
+ _, build_config, registry_config = construct_agent_configs(
718
+ default_config, override_build_config
719
+ )
720
+ image_uri = project.docker_image or project.job_base_image
721
+ entrypoint = project.get_job_entry_point()
722
+ environment = loader.environment_from_config(
723
+ default_config.get("environment", {})
724
+ )
725
+ registry = loader.registry_from_config(registry_config, environment)
726
+ builder = loader.builder_from_config(build_config, environment, registry)
727
+ backend = loader.runner_from_config(
728
+ resource, api, backend_config, environment, registry
729
+ )
730
+ if not (
731
+ project.docker_image
732
+ or project.job_base_image
733
+ or isinstance(backend, LocalProcessRunner)
734
+ ):
735
+ assert entrypoint is not None
736
+ image_uri = await builder.build_image(project, entrypoint, job_tracker)
737
+
738
+ self._internal_logger.info("Backend loaded...")
739
+ if isinstance(backend, LocalProcessRunner):
740
+ run = await backend.run(project, image_uri)
741
+ else:
742
+ assert image_uri
743
+ run = await backend.run(project, image_uri)
744
+ if self._is_scheduler_job(launch_spec):
745
+ with self._jobs_lock:
746
+ self._jobs[thread_id].is_scheduler = True
747
+ wandb.termlog(
748
+ f"{LOG_PREFIX}Preparing to run sweep scheduler "
749
+ f"({self.num_running_schedulers}/{self._max_schedulers})"
750
+ )
751
+
752
+ if not run:
753
+ with self._jobs_lock:
754
+ job_tracker.failed_to_start = True
755
+ return
756
+ with self._jobs_lock:
757
+ job_tracker.run = run
758
+ start_time = time.time()
759
+ stopped_time: Optional[float] = None
760
+ while self._jobs_event.is_set():
761
+ # If run has failed to start before timeout, kill it
762
+ state = (await run.get_status()).state
763
+ if state == "starting" and RUN_START_TIMEOUT > 0:
764
+ if time.time() - start_time > RUN_START_TIMEOUT:
765
+ await run.cancel()
766
+ raise LaunchError(
767
+ f"Run failed to start within {RUN_START_TIMEOUT} seconds. "
768
+ "If you want to increase this timeout, set WANDB_LAUNCH_START_TIMEOUT "
769
+ "to a larger value."
770
+ )
771
+ if await self._check_run_finished(job_tracker, launch_spec):
772
+ return
773
+ if await job_tracker.check_wandb_run_stopped(self._api):
774
+ if stopped_time is None:
775
+ stopped_time = time.time()
776
+ else:
777
+ if time.time() - stopped_time > self._stopped_run_timeout:
778
+ await run.cancel()
779
+ await asyncio.sleep(AGENT_POLLING_INTERVAL)
780
+
781
+ # temp: for local, kill all jobs. we don't yet have good handling for different
782
+ # types of runners in general
783
+ if isinstance(run, LocalSubmittedRun) and run._command_proc is not None:
784
+ run._command_proc.kill()
785
+
786
+ async def check_sweep_state(self, launch_spec: Dict[str, Any], api: Api) -> None:
787
+ """Check the state of a sweep before launching a run for the sweep."""
788
+ if launch_spec.get("sweep_id"):
789
+ try:
790
+ get_sweep_state = event_loop_thread_exec(api.get_sweep_state)
791
+ state = await get_sweep_state(
792
+ sweep=launch_spec["sweep_id"],
793
+ entity=launch_spec["entity"],
794
+ project=launch_spec["project"],
795
+ )
796
+ except Exception as e:
797
+ self._internal_logger.debug(f"Fetch sweep state error: {e}")
798
+ state = None
799
+
800
+ if state != "RUNNING" and state != "PAUSED":
801
+ raise LaunchError(
802
+ f"Launch agent picked up sweep job, but sweep ({launch_spec['sweep_id']}) was in a terminal state ({state})"
803
+ )
804
+
805
+ async def _check_run_finished(
806
+ self, job_tracker: JobAndRunStatusTracker, launch_spec: Dict[str, Any]
807
+ ) -> bool:
808
+ if job_tracker.completed_status:
809
+ return True
810
+
811
+ # the run can be done before the run has started
812
+ # but can also be none if the run failed to start
813
+ # so if there is no run, either the run hasn't started yet
814
+ # or it has failed
815
+ if job_tracker.run is None:
816
+ if job_tracker.failed_to_start:
817
+ return True
818
+ return False
819
+
820
+ known_error = False
821
+ try:
822
+ run = job_tracker.run
823
+ status = await run.get_status()
824
+ state = status.state
825
+
826
+ for warning in status.messages:
827
+ if warning not in self._known_warnings:
828
+ self._known_warnings.append(warning)
829
+ success = self._api.update_run_queue_item_warning(
830
+ job_tracker.run_queue_item_id,
831
+ warning,
832
+ "Kubernetes",
833
+ [],
834
+ )
835
+ if not success:
836
+ _logger.warning(
837
+ f"Error adding warning {warning} to run queue item {job_tracker.run_queue_item_id}"
838
+ )
839
+ self._known_warnings.remove(warning)
840
+
841
+ if state == "preempted" and job_tracker.entity == self._entity:
842
+ config = launch_spec.copy()
843
+ config["run_id"] = job_tracker.run_id
844
+ config["_resume_count"] = config.get("_resume_count", 0) + 1
845
+ with self._jobs_lock:
846
+ job_tracker.completed_status = state
847
+ if config["_resume_count"] > MAX_RESUME_COUNT:
848
+ wandb.termlog(
849
+ f"{LOG_PREFIX}Run {job_tracker.run_id} has already resumed {MAX_RESUME_COUNT} times."
850
+ )
851
+ return True
852
+ wandb.termlog(
853
+ f"{LOG_PREFIX}Run {job_tracker.run_id} was preempted, requeueing..."
854
+ )
855
+
856
+ if "sweep_id" in config:
857
+ # allow resumed runs from sweeps that have already completed by removing
858
+ # the sweep id before pushing to queue
859
+ del config["sweep_id"]
860
+
861
+ launch_add(
862
+ config=config,
863
+ project_queue=self._project,
864
+ queue_name=job_tracker.queue,
865
+ )
866
+ return True
867
+ # TODO change these statuses to an enum
868
+ if state in ["stopped", "failed", "finished", "preempted"]:
869
+ if job_tracker.is_scheduler:
870
+ wandb.termlog(f"{LOG_PREFIX}Scheduler finished with ID: {run.id}")
871
+ if state == "failed":
872
+ # on fail, update sweep state. scheduler run_id should == sweep_id
873
+ try:
874
+ self._api.set_sweep_state(
875
+ sweep=job_tracker.run_id,
876
+ entity=job_tracker.entity,
877
+ project=job_tracker.project,
878
+ state="CANCELED",
879
+ )
880
+ except Exception as e:
881
+ raise LaunchError(f"Failed to update sweep state: {e}")
882
+ else:
883
+ wandb.termlog(f"{LOG_PREFIX}Job finished with ID: {run.id}")
884
+ with self._jobs_lock:
885
+ job_tracker.completed_status = state
886
+ return True
887
+
888
+ return False
889
+ except LaunchError as e:
890
+ wandb.termerror(
891
+ f"{LOG_PREFIX}Terminating job {run.id} because it failed to start: {str(e)}"
892
+ )
893
+ known_error = True
894
+ with self._jobs_lock:
895
+ job_tracker.failed_to_start = True
896
+ # TODO: make get_status robust to errors for each runner, and handle them
897
+ except Exception as e:
898
+ wandb.termerror(f"{LOG_PREFIX}Error getting status for job {run.id}")
899
+ wandb.termerror(traceback.format_exc())
900
+ _logger.info("---")
901
+ _logger.info("Caught exception while getting status.")
902
+ _logger.info(f"Job ID: {run.id}")
903
+ _logger.info(traceback.format_exc())
904
+ _logger.info("---")
905
+ wandb._sentry.exception(e)
906
+ return known_error
907
+
908
+ async def get_job_and_queue(self) -> Optional[JobSpecAndQueue]:
909
+ for queue in self._queues:
910
+ job = await self.pop_from_queue(queue)
911
+ if job is not None:
912
+ self._queues.remove(queue)
913
+ self._queues.append(queue)
914
+ return JobSpecAndQueue(job, queue)
915
+ return None
916
+
917
+ def _set_queue_and_rqi_in_project(
918
+ self, project: LaunchProject, job: Dict[str, Any], queue: str
919
+ ) -> None:
920
+ project.queue_name = queue
921
+
922
+ # queue entity currently always matches the agent
923
+ project.queue_entity = self._entity
924
+ project.run_queue_item_id = job["runQueueItemId"]
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/config.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Definition of the config object used by the Launch agent."""
2
+
3
+ from enum import Enum
4
+ from typing import List, Optional
5
+
6
+ # ValidationError is imported for exception type checking purposes only.
7
+ from pydantic import ( # type: ignore
8
+ BaseModel,
9
+ Field,
10
+ ValidationError, # noqa: F401
11
+ root_validator,
12
+ validator,
13
+ )
14
+
15
+ import wandb
16
+ from wandb.sdk.launch.utils import (
17
+ AZURE_BLOB_REGEX,
18
+ AZURE_CONTAINER_REGISTRY_URI_REGEX,
19
+ ELASTIC_CONTAINER_REGISTRY_URI_REGEX,
20
+ GCP_ARTIFACT_REGISTRY_URI_REGEX,
21
+ GCS_URI_RE,
22
+ S3_URI_RE,
23
+ )
24
+
25
+ __all__ = [
26
+ "ValidationError",
27
+ "AgentConfig",
28
+ ]
29
+
30
+
31
+ class EnvironmentType(str, Enum):
32
+ """Enum of valid environment types."""
33
+
34
+ aws = "aws"
35
+ gcp = "gcp"
36
+ azure = "azure"
37
+
38
+
39
+ class RegistryType(str, Enum):
40
+ """Enum of valid registry types."""
41
+
42
+ ecr = "ecr"
43
+ acr = "acr"
44
+ gcr = "gcr"
45
+
46
+
47
+ class BuilderType(str, Enum):
48
+ """Enum of valid builder types."""
49
+
50
+ docker = "docker"
51
+ kaniko = "kaniko"
52
+ noop = "noop"
53
+
54
+
55
+ class TargetPlatform(str, Enum):
56
+ """Enum of valid target platforms."""
57
+
58
+ linux_amd64 = "linux/amd64"
59
+ linux_arm64 = "linux/arm64"
60
+
61
+
62
+ class RegistryConfig(BaseModel):
63
+ """Configuration for registry block.
64
+
65
+ Note that we don't forbid extra fields here because:
66
+ - We want to allow all fields supported by each registry
67
+ - We will perform validation on the registry object itself later
68
+ - Registry block is being deprecated in favor of destination field in builder
69
+ """
70
+
71
+ type: Optional[RegistryType] = Field(
72
+ None,
73
+ description="The type of registry to use.",
74
+ )
75
+ uri: Optional[str] = Field(
76
+ None,
77
+ description="The URI of the registry.",
78
+ )
79
+
80
+ @validator("uri") # type: ignore
81
+ @classmethod
82
+ def validate_uri(cls, uri: str) -> str:
83
+ return validate_registry_uri(uri)
84
+
85
+
86
+ class EnvironmentConfig(BaseModel):
87
+ """Configuration for the environment block."""
88
+
89
+ type: Optional[EnvironmentType] = Field(
90
+ None,
91
+ description="The type of environment to use.",
92
+ )
93
+ region: Optional[str] = Field(..., description="The region to use.")
94
+
95
+ class Config:
96
+ extra = "allow"
97
+
98
+ @root_validator(pre=True) # type: ignore
99
+ @classmethod
100
+ def check_extra_fields(cls, values: dict) -> dict:
101
+ """Check for extra fields and print a warning."""
102
+ for key in values:
103
+ if key not in ["type", "region"]:
104
+ wandb.termwarn(
105
+ f"Unrecognized field {key} in environment block. Please check your config file."
106
+ )
107
+ return values
108
+
109
+
110
+ class BuilderConfig(BaseModel):
111
+ type: Optional[BuilderType] = Field(
112
+ None,
113
+ description="The type of builder to use.",
114
+ )
115
+ destination: Optional[str] = Field(
116
+ None,
117
+ description="The destination to use for the built image. If not provided, "
118
+ "the image will be pushed to the registry.",
119
+ )
120
+
121
+ platform: Optional[TargetPlatform] = Field(
122
+ None,
123
+ description="The platform to use for the built image. If not provided, "
124
+ "the platform will be detected automatically.",
125
+ )
126
+
127
+ build_context_store: Optional[str] = Field(
128
+ None,
129
+ description="The build context store to use. Required for kaniko builds.",
130
+ alias="build-context-store",
131
+ )
132
+ build_job_name: Optional[str] = Field(
133
+ "wandb-launch-container-build",
134
+ description="Name prefix of the build job.",
135
+ alias="build-job-name",
136
+ )
137
+ secret_name: Optional[str] = Field(
138
+ None,
139
+ description="The name of the secret to use for the build job.",
140
+ alias="secret-name",
141
+ )
142
+ secret_key: Optional[str] = Field(
143
+ None,
144
+ description="The key of the secret to use for the build job.",
145
+ alias="secret-key",
146
+ )
147
+ kaniko_image: Optional[str] = Field(
148
+ "gcr.io/kaniko-project/executor:latest",
149
+ description="The image to use for the kaniko executor.",
150
+ alias="kaniko-image",
151
+ )
152
+
153
+ @validator("build_context_store") # type: ignore
154
+ @classmethod
155
+ def validate_build_context_store(
156
+ cls, build_context_store: Optional[str]
157
+ ) -> Optional[str]:
158
+ """Validate that the build context store is a valid container registry URI."""
159
+ if build_context_store is None:
160
+ return None
161
+ for regex in [
162
+ S3_URI_RE,
163
+ GCS_URI_RE,
164
+ AZURE_BLOB_REGEX,
165
+ ]:
166
+ if regex.match(build_context_store):
167
+ return build_context_store
168
+ raise ValueError(
169
+ "Invalid build context store. Build context store must be a URI for an "
170
+ "S3 bucket, GCS bucket, or Azure blob."
171
+ )
172
+
173
+ @root_validator(pre=True) # type: ignore
174
+ @classmethod
175
+ def validate_docker(cls, values: dict) -> dict:
176
+ """Right now there are no required fields for docker builds."""
177
+ return values
178
+
179
+ @validator("destination") # type: ignore
180
+ @classmethod
181
+ def validate_destination(cls, destination: Optional[str]) -> Optional[str]:
182
+ """Validate that the destination is a valid container registry URI."""
183
+ if destination is None:
184
+ return None
185
+ return validate_registry_uri(destination)
186
+
187
+
188
+ class AgentConfig(BaseModel):
189
+ """Configuration for the Launch agent."""
190
+
191
+ queues: List[str] = Field(
192
+ default=[],
193
+ description="The queues to use for this agent.",
194
+ )
195
+ entity: Optional[str] = Field(
196
+ description="The W&B entity to use for this agent.",
197
+ )
198
+ max_jobs: Optional[int] = Field(
199
+ 1,
200
+ description="The maximum number of jobs to run concurrently.",
201
+ )
202
+ max_schedulers: Optional[int] = Field(
203
+ 1,
204
+ description="The maximum number of sweep schedulers to run concurrently.",
205
+ )
206
+ secure_mode: Optional[bool] = Field(
207
+ False,
208
+ description="Whether to use secure mode for this agent. If True, the "
209
+ "agent will reject runs that attempt to override the entrypoint or image.",
210
+ )
211
+ registry: Optional[RegistryConfig] = Field(
212
+ None,
213
+ description="The registry to use.",
214
+ )
215
+ environment: Optional[EnvironmentConfig] = Field(
216
+ None,
217
+ description="The environment to use.",
218
+ )
219
+ builder: Optional[BuilderConfig] = Field(
220
+ None,
221
+ description="The builder to use.",
222
+ )
223
+ verbosity: Optional[int] = Field(
224
+ 0,
225
+ description="How verbose to print, 0 = default, 1 = verbose, 2 = very verbose",
226
+ )
227
+ stopped_run_timeout: Optional[int] = Field(
228
+ 60,
229
+ description="How many seconds to wait after receiving the stop command before forcibly cancelling a run.",
230
+ )
231
+
232
+ class Config:
233
+ extra = "forbid"
234
+
235
+
236
+ def validate_registry_uri(uri: str) -> str:
237
+ """Validate that the registry URI is a valid container registry URI.
238
+
239
+ The URI should resolve to an image name in a container registry. The recognized
240
+ formats are for ECR, ACR, and GCP Artifact Registry. If the URI does not match
241
+ any of these formats, a warning is printed indicating the registry type is not
242
+ recognized and the agent can't guarantee that images can be pushed.
243
+
244
+ If the format is recognized but does not resolve to an image name, an
245
+ error is raised. For example, if the URI is an ECR URI but does not include
246
+ an image name or includes a tag as well as an image name, an error is raised.
247
+ """
248
+ tag_msg = (
249
+ "Destination for built images may not include a tag, but the URI provided "
250
+ "includes the suffix '{tag}'. Please remove the tag and try again. The agent "
251
+ "will automatically tag each image with a unique hash of the source code."
252
+ )
253
+ if uri.startswith("https://"):
254
+ uri = uri[8:]
255
+
256
+ match = GCP_ARTIFACT_REGISTRY_URI_REGEX.match(uri)
257
+ if match:
258
+ if match.group("tag"):
259
+ raise ValueError(tag_msg.format(tag=match.group("tag")))
260
+ if not match.group("image_name"):
261
+ raise ValueError(
262
+ "An image name must be specified in the URI for a GCP Artifact Registry. "
263
+ "Please provide a uri with the format "
264
+ "'https://<region>-docker.pkg.dev/<project>/<repository>/<image>'."
265
+ )
266
+ return uri
267
+
268
+ match = AZURE_CONTAINER_REGISTRY_URI_REGEX.match(uri)
269
+ if match:
270
+ if match.group("tag"):
271
+ raise ValueError(tag_msg.format(tag=match.group("tag")))
272
+ if not match.group("repository"):
273
+ raise ValueError(
274
+ "A repository name must be specified in the URI for an "
275
+ "Azure Container Registry. Please provide a uri with the format "
276
+ "'https://<registry-name>.azurecr.io/<repository>'."
277
+ )
278
+ return uri
279
+
280
+ match = ELASTIC_CONTAINER_REGISTRY_URI_REGEX.match(uri)
281
+ if match:
282
+ if match.group("tag"):
283
+ raise ValueError(tag_msg.format(tag=match.group("tag")))
284
+ if not match.group("repository"):
285
+ raise ValueError(
286
+ "A repository name must be specified in the URI for an "
287
+ "Elastic Container Registry. Please provide a uri with the format "
288
+ "'https://<account-id>.dkr.ecr.<region>.amazonaws.com/<repository>'."
289
+ )
290
+ return uri
291
+
292
+ wandb.termwarn(
293
+ f"Unable to recognize registry type in URI {uri}. You are responsible "
294
+ "for ensuring the agent can push images to this registry."
295
+ )
296
+ return uri
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/job_status_tracker.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ from wandb.apis.internal import Api
6
+ from wandb.errors import CommError
7
+ from wandb.sdk.launch._project_spec import LaunchProject
8
+
9
+ from ..runner.abstract import AbstractRun
10
+ from ..utils import event_loop_thread_exec
11
+ from .run_queue_item_file_saver import RunQueueItemFileSaver
12
+
13
+ _logger = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class JobAndRunStatusTracker:
18
+ run_queue_item_id: str
19
+ queue: str
20
+ saver: RunQueueItemFileSaver
21
+ run_id: Optional[str] = None
22
+ project: Optional[str] = None
23
+ entity: Optional[str] = None
24
+ run: Optional[AbstractRun] = None
25
+ failed_to_start: bool = False
26
+ completed_status: Optional[str] = None
27
+ is_scheduler: bool = False
28
+ err_stage: str = "agent"
29
+
30
+ @property
31
+ def job_completed(self) -> bool:
32
+ return self.failed_to_start or self.completed_status is not None
33
+
34
+ def update_run_info(self, launch_project: LaunchProject) -> None:
35
+ self.run_id = launch_project.run_id
36
+ self.project = launch_project.target_project
37
+ self.entity = launch_project.target_entity
38
+
39
+ def set_err_stage(self, stage: str) -> None:
40
+ self.err_stage = stage
41
+
42
+ async def check_wandb_run_stopped(self, api: Api) -> bool:
43
+ assert (
44
+ self.run_id is not None
45
+ and self.project is not None
46
+ and self.entity is not None
47
+ ), "Job tracker does not contain run info. Update with run info before checking if run stopped"
48
+ check_stop = event_loop_thread_exec(api.api.check_stop_requested)
49
+ try:
50
+ return bool(await check_stop(self.project, self.entity, self.run_id))
51
+ except CommError as e:
52
+ _logger.error(f"CommError when checking if wandb run stopped: {e}")
53
+ return False
parrot/lib/python3.10/site-packages/wandb/sdk/launch/agent/run_queue_item_file_saver.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of the run queue item file saver class."""
2
+
3
+ import os
4
+ import sys
5
+ from typing import List, Optional
6
+
7
+ import wandb
8
+
9
+ if sys.version_info >= (3, 8):
10
+ from typing import Literal
11
+ else:
12
+ from typing_extensions import Literal
13
+
14
+ FileSubtypes = Literal["warning", "error"]
15
+
16
+
17
+ class RunQueueItemFileSaver:
18
+ def __init__(
19
+ self,
20
+ agent_run: Optional["wandb.sdk.wandb_run.Run"],
21
+ run_queue_item_id: str,
22
+ ):
23
+ self.run_queue_item_id = run_queue_item_id
24
+ self.run = agent_run
25
+
26
+ def save_contents(
27
+ self, contents: str, fname: str, file_sub_type: FileSubtypes
28
+ ) -> Optional[List[str]]:
29
+ if not isinstance(self.run, wandb.sdk.wandb_run.Run):
30
+ wandb.termwarn("Not saving file contents because agent has no run")
31
+ return None
32
+ root_dir = self.run._settings.files_dir
33
+ saved_run_path = os.path.join(self.run_queue_item_id, file_sub_type, fname)
34
+ local_path = os.path.join(root_dir, saved_run_path)
35
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
36
+ with open(local_path, "w") as f:
37
+ f.write(contents)
38
+ res = self.run.save(local_path, base_path=root_dir, policy="now")
39
+ if isinstance(res, list):
40
+ return [saved_run_path]
41
+ else:
42
+ wandb.termwarn(
43
+ f"Failed to save files for run queue item: {self.run_queue_item_id}"
44
+ )
45
+ return None
parrot/lib/python3.10/site-packages/wandb/sdk/launch/create_job.py ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import re
5
+ import sys
6
+ import tempfile
7
+ from typing import Any, Dict, List, Optional, Tuple
8
+
9
+ import wandb
10
+ from wandb.apis.internal import Api
11
+ from wandb.sdk.artifacts.artifact import Artifact
12
+ from wandb.sdk.internal.job_builder import JobBuilder
13
+ from wandb.sdk.launch.git_reference import GitReference
14
+ from wandb.sdk.launch.utils import (
15
+ _is_git_uri,
16
+ get_current_python_version,
17
+ get_entrypoint_file,
18
+ )
19
+ from wandb.sdk.lib import filesystem
20
+ from wandb.util import make_artifact_name_safe
21
+
22
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
23
+ _logger = logging.getLogger("wandb")
24
+
25
+
26
+ CODE_ARTIFACT_EXCLUDE_PATHS = ["wandb", ".git"]
27
+
28
+
29
+ def create_job(
30
+ path: str,
31
+ job_type: str,
32
+ entity: Optional[str] = None,
33
+ project: Optional[str] = None,
34
+ name: Optional[str] = None,
35
+ description: Optional[str] = None,
36
+ aliases: Optional[List[str]] = None,
37
+ runtime: Optional[str] = None,
38
+ entrypoint: Optional[str] = None,
39
+ git_hash: Optional[str] = None,
40
+ build_context: Optional[str] = None,
41
+ dockerfile: Optional[str] = None,
42
+ ) -> Optional[Artifact]:
43
+ """Create a job from a path, not as the output of a run.
44
+
45
+ Arguments:
46
+ path (str): Path to the job directory.
47
+ job_type (str): Type of the job. One of "git", "code", or "image".
48
+ entity (Optional[str]): Entity to create the job under.
49
+ project (Optional[str]): Project to create the job under.
50
+ name (Optional[str]): Name of the job.
51
+ description (Optional[str]): Description of the job.
52
+ aliases (Optional[List[str]]): Aliases for the job.
53
+ runtime (Optional[str]): Python runtime of the job, like 3.9.
54
+ entrypoint (Optional[str]): Entrypoint of the job. If build_context is
55
+ provided, path is relative to build_context.
56
+ git_hash (Optional[str]): Git hash of a specific commit, when using git type jobs.
57
+ build_context (Optional[str]): Path to the build context, when using image type jobs.
58
+ dockerfile (Optional[str]): Path to the Dockerfile, when using image type jobs.
59
+ If build_context is provided, path is relative to build_context.
60
+
61
+ Returns:
62
+ Optional[Artifact]: The artifact created by the job, the action (for printing), and job aliases.
63
+ None if job creation failed.
64
+
65
+ Example:
66
+ ```python
67
+ artifact_job = wandb.create_job(
68
+ job_type="code",
69
+ path=".",
70
+ entity="wandb",
71
+ project="jobs",
72
+ name="my-train-job",
73
+ description="My training job",
74
+ aliases=["train"],
75
+ runtime="3.9",
76
+ entrypoint="train.py",
77
+ )
78
+ # then run the newly created job
79
+ artifact_job.call()
80
+ ```
81
+ """
82
+ api = Api()
83
+
84
+ artifact_job, _action, _aliases = _create_job(
85
+ api,
86
+ job_type,
87
+ path,
88
+ entity,
89
+ project,
90
+ name,
91
+ description,
92
+ aliases,
93
+ runtime,
94
+ entrypoint,
95
+ git_hash,
96
+ build_context,
97
+ dockerfile,
98
+ )
99
+
100
+ return artifact_job
101
+
102
+
103
+ def _create_job(
104
+ api: Api,
105
+ job_type: str,
106
+ path: str,
107
+ entity: Optional[str] = None,
108
+ project: Optional[str] = None,
109
+ name: Optional[str] = None,
110
+ description: Optional[str] = None,
111
+ aliases: Optional[List[str]] = None,
112
+ runtime: Optional[str] = None,
113
+ entrypoint: Optional[str] = None,
114
+ git_hash: Optional[str] = None,
115
+ build_context: Optional[str] = None,
116
+ dockerfile: Optional[str] = None,
117
+ base_image: Optional[str] = None,
118
+ ) -> Tuple[Optional[Artifact], str, List[str]]:
119
+ wandb.termlog(f"Creating launch job of type: {job_type}...")
120
+
121
+ if name and name != make_artifact_name_safe(name):
122
+ wandb.termerror(
123
+ f"Artifact names may only contain alphanumeric characters, dashes, underscores, and dots. Did you mean: {make_artifact_name_safe(name)}"
124
+ )
125
+ return None, "", []
126
+
127
+ if runtime is not None:
128
+ if not re.match(r"^3\.\d+$", runtime):
129
+ wandb.termerror(
130
+ f"Runtime (-r, --runtime) must be a minor version of Python 3, "
131
+ f"e.g. 3.9 or 3.10, received {runtime}"
132
+ )
133
+ return None, "", []
134
+ aliases = aliases or []
135
+ tempdir = tempfile.TemporaryDirectory()
136
+ try:
137
+ metadata, requirements = _make_metadata_for_partial_job(
138
+ job_type=job_type,
139
+ tempdir=tempdir,
140
+ git_hash=git_hash,
141
+ runtime=runtime,
142
+ path=path,
143
+ entrypoint=entrypoint,
144
+ )
145
+ if not metadata:
146
+ return None, "", []
147
+ except Exception as e:
148
+ wandb.termerror(f"Error creating job: {e}")
149
+ return None, "", []
150
+
151
+ _dump_metadata_and_requirements(
152
+ metadata=metadata,
153
+ tmp_path=tempdir.name,
154
+ requirements=requirements,
155
+ )
156
+
157
+ try:
158
+ # init hidden wandb run with job building disabled (handled manually)
159
+ run = wandb.init(
160
+ dir=tempdir.name,
161
+ settings={"silent": True, "disable_job_creation": True},
162
+ entity=entity,
163
+ project=project,
164
+ job_type="cli_create_job",
165
+ )
166
+ except Exception:
167
+ # Error printed by wandb.init
168
+ return None, "", []
169
+
170
+ job_builder = _configure_job_builder_for_partial(tempdir.name, job_source=job_type)
171
+ if job_type == "code":
172
+ assert entrypoint is not None
173
+ job_name = _make_code_artifact(
174
+ api=api,
175
+ job_builder=job_builder,
176
+ path=path,
177
+ entrypoint=entrypoint,
178
+ run=run, # type: ignore
179
+ entity=entity,
180
+ project=project,
181
+ name=name,
182
+ )
183
+ if not job_name:
184
+ return None, "", []
185
+ name = job_name
186
+
187
+ # build job artifact, loads wandb-metadata and creates wandb-job.json here
188
+ artifact = job_builder.build(
189
+ api.api,
190
+ dockerfile=dockerfile,
191
+ build_context=build_context,
192
+ base_image=base_image,
193
+ )
194
+ if not artifact:
195
+ wandb.termerror("JobBuilder failed to build a job")
196
+ _logger.debug("Failed to build job, check job source and metadata")
197
+ return None, "", []
198
+
199
+ if not name:
200
+ name = artifact.name
201
+
202
+ aliases += job_builder._aliases
203
+ if "latest" not in aliases:
204
+ aliases += ["latest"]
205
+
206
+ res, _ = api.create_artifact(
207
+ artifact_type_name="job",
208
+ artifact_collection_name=name,
209
+ digest=artifact.digest,
210
+ client_id=artifact._client_id,
211
+ sequence_client_id=artifact._sequence_client_id,
212
+ entity_name=entity,
213
+ project_name=project,
214
+ run_name=run.id, # type: ignore # run will be deleted after creation
215
+ description=description,
216
+ metadata={"_partial": True},
217
+ is_user_created=True,
218
+ aliases=[{"artifactCollectionName": name, "alias": a} for a in aliases],
219
+ )
220
+ action = "No changes detected for"
221
+ if not res.get("artifactSequence", {}).get("latestArtifact"):
222
+ # When there is no latestArtifact, we are creating new
223
+ action = "Created"
224
+ elif res.get("state") == "PENDING":
225
+ # updating an existing artifafct, state is pending awaiting call to
226
+ # log_artifact to upload and finalize artifact. If not pending, digest
227
+ # is the same as latestArtifact, so no changes detected
228
+ action = "Updated"
229
+
230
+ run.log_artifact(artifact, aliases=aliases) # type: ignore
231
+ artifact.wait()
232
+ run.finish() # type: ignore
233
+
234
+ # fetch, then delete hidden run
235
+ _run = wandb.Api().run(f"{entity}/{project}/{run.id}") # type: ignore
236
+ _run.delete()
237
+
238
+ return artifact, action, aliases
239
+
240
+
241
+ def _make_metadata_for_partial_job(
242
+ job_type: str,
243
+ tempdir: tempfile.TemporaryDirectory,
244
+ git_hash: Optional[str],
245
+ runtime: Optional[str],
246
+ path: str,
247
+ entrypoint: Optional[str],
248
+ ) -> Tuple[Optional[Dict[str, Any]], Optional[List[str]]]:
249
+ """Create metadata for partial jobs, return metadata and requirements."""
250
+ metadata = {}
251
+ if job_type == "git":
252
+ assert entrypoint is not None
253
+ repo_metadata = _create_repo_metadata(
254
+ path=path,
255
+ tempdir=tempdir.name,
256
+ entrypoint=entrypoint,
257
+ git_hash=git_hash,
258
+ runtime=runtime,
259
+ )
260
+ if not repo_metadata:
261
+ tempdir.cleanup() # otherwise git can pollute
262
+ return None, None
263
+ metadata.update(repo_metadata)
264
+ return metadata, None
265
+
266
+ if job_type == "code":
267
+ assert entrypoint is not None
268
+ artifact_metadata, requirements = _create_artifact_metadata(
269
+ path=path, entrypoint=entrypoint, runtime=runtime
270
+ )
271
+ if not artifact_metadata:
272
+ return None, None
273
+ metadata.update(artifact_metadata)
274
+ return metadata, requirements
275
+
276
+ if job_type == "image":
277
+ if runtime:
278
+ wandb.termwarn(
279
+ "Setting runtime is not supported for image jobs, ignoring runtime"
280
+ )
281
+ # TODO(gst): support entrypoint for image based jobs
282
+ if entrypoint:
283
+ wandb.termwarn(
284
+ "Setting an entrypoint is not currently supported for image jobs, ignoring entrypoint argument"
285
+ )
286
+ metadata.update({"python": runtime or "", "docker": path})
287
+ return metadata, None
288
+
289
+ wandb.termerror(f"Invalid job type: {job_type}")
290
+ return None, None
291
+
292
+
293
+ def _maybe_warn_python_no_executable(entrypoint: str):
294
+ entrypoint_list = entrypoint.split(" ")
295
+ if len(entrypoint_list) == 1 and entrypoint_list[0].endswith(".py"):
296
+ wandb.termwarn(
297
+ f"Entrypoint {entrypoint} is a python file without an executable, you may want to use `python {entrypoint}` as the entrypoint instead."
298
+ )
299
+
300
+
301
+ def _create_repo_metadata(
302
+ path: str,
303
+ tempdir: str,
304
+ entrypoint: str,
305
+ git_hash: Optional[str] = None,
306
+ runtime: Optional[str] = None,
307
+ ) -> Optional[Dict[str, Any]]:
308
+ # Make sure the entrypoint doesn't contain any backward path traversal
309
+ if entrypoint and ".." in entrypoint:
310
+ wandb.termerror("Entrypoint cannot contain backward path traversal")
311
+ return None
312
+
313
+ _maybe_warn_python_no_executable(entrypoint)
314
+
315
+ if not _is_git_uri(path):
316
+ wandb.termerror("Path must be a git URI")
317
+ return None
318
+
319
+ ref = GitReference(path, git_hash)
320
+ if not ref:
321
+ wandb.termerror("Could not parse git URI")
322
+ return None
323
+
324
+ ref.fetch(tempdir)
325
+
326
+ commit = ref.commit_hash
327
+ if not commit:
328
+ if not ref.commit_hash:
329
+ wandb.termerror("Could not find git commit hash")
330
+ return None
331
+ commit = ref.commit_hash
332
+
333
+ local_dir = os.path.join(tempdir, ref.path or "")
334
+ python_version = runtime
335
+ if not python_version:
336
+ if os.path.exists(os.path.join(local_dir, "runtime.txt")):
337
+ with open(os.path.join(local_dir, "runtime.txt")) as f:
338
+ python_version = f.read().strip()
339
+ elif os.path.exists(os.path.join(local_dir, ".python-version")):
340
+ with open(os.path.join(local_dir, ".python-version")) as f:
341
+ python_version = f.read().strip().splitlines()[0]
342
+ else:
343
+ python_version, _ = get_current_python_version()
344
+
345
+ python_version = _clean_python_version(python_version)
346
+
347
+ metadata = {
348
+ "git": {
349
+ "commit": commit,
350
+ "remote": ref.url,
351
+ },
352
+ "entrypoint": entrypoint.split(" "),
353
+ "python": python_version, # used to build container
354
+ "notebook": False, # partial jobs from notebooks not supported
355
+ }
356
+
357
+ return metadata
358
+
359
+
360
+ def _create_artifact_metadata(
361
+ path: str, entrypoint: str, runtime: Optional[str] = None
362
+ ) -> Tuple[Optional[Dict[str, Any]], Optional[List[str]]]:
363
+ if not os.path.isdir(path):
364
+ wandb.termerror("Path must be a valid file or directory")
365
+ return {}, []
366
+
367
+ _maybe_warn_python_no_executable(entrypoint)
368
+
369
+ entrypoint_list = entrypoint.split(" ")
370
+ entrypoint_file = get_entrypoint_file(entrypoint_list)
371
+
372
+ # read local requirements.txt and dump to temp dir for builder
373
+ requirements = []
374
+ depspath = os.path.join(path, "requirements.txt")
375
+ if os.path.exists(depspath):
376
+ with open(depspath) as f:
377
+ requirements = f.read().splitlines()
378
+
379
+ if not any(["wandb" in r for r in requirements]):
380
+ wandb.termwarn("wandb is not present in requirements.txt.")
381
+
382
+ if runtime:
383
+ python_version = _clean_python_version(runtime)
384
+ else:
385
+ python_version, _ = get_current_python_version()
386
+ python_version = _clean_python_version(python_version)
387
+
388
+ metadata = {
389
+ "python": python_version,
390
+ "codePath": entrypoint_file,
391
+ "entrypoint": entrypoint_list,
392
+ }
393
+ return metadata, requirements
394
+
395
+
396
+ def _configure_job_builder_for_partial(tmpdir: str, job_source: str) -> JobBuilder:
397
+ """Configure job builder with temp dir and job source."""
398
+ # adjust git source to repo
399
+ if job_source == "git":
400
+ job_source = "repo"
401
+
402
+ # adjust code source to artifact
403
+ if job_source == "code":
404
+ job_source = "artifact"
405
+
406
+ settings = wandb.Settings()
407
+ settings.update({"files_dir": tmpdir, "job_source": job_source})
408
+ job_builder = JobBuilder(
409
+ settings=settings, # type: ignore
410
+ verbose=True,
411
+ )
412
+ job_builder._partial = True
413
+ # never allow notebook runs
414
+ job_builder._is_notebook_run = False
415
+ # set run inputs and outputs to empty dicts
416
+ job_builder.set_config({})
417
+ job_builder.set_summary({})
418
+ return job_builder
419
+
420
+
421
+ def _make_code_artifact(
422
+ api: Api,
423
+ job_builder: JobBuilder,
424
+ run: "wandb.sdk.wandb_run.Run",
425
+ path: str,
426
+ entrypoint: str,
427
+ entity: Optional[str],
428
+ project: Optional[str],
429
+ name: Optional[str],
430
+ ) -> Optional[str]:
431
+ """Helper for creating and logging code artifacts.
432
+
433
+ Returns the name of the eventual job.
434
+ """
435
+ entrypoint_list = entrypoint.split(" ")
436
+ # We no longer require the entrypoint to end in an existing file. But we
437
+ # need something to use as the default job artifact name. In the future we
438
+ # may require the user to provide a job name explicitly when calling
439
+ # wandb job create.
440
+ entrypoint_file = entrypoint_list[-1]
441
+ artifact_name = _make_code_artifact_name(os.path.join(path, entrypoint_file), name)
442
+ code_artifact = wandb.Artifact(
443
+ name=artifact_name,
444
+ type="code",
445
+ description="Code artifact for job",
446
+ )
447
+
448
+ try:
449
+ code_artifact.add_dir(path)
450
+ except Exception as e:
451
+ if os.path.islink(path):
452
+ wandb.termerror(
453
+ "Symlinks are not supported for code artifact jobs, please copy the code into a directory and try again"
454
+ )
455
+ wandb.termerror(f"Error adding to code artifact: {e}")
456
+ return None
457
+
458
+ # Remove paths we don't want to include, if present
459
+ for item in CODE_ARTIFACT_EXCLUDE_PATHS:
460
+ try:
461
+ code_artifact.remove(item)
462
+ except FileNotFoundError:
463
+ pass
464
+
465
+ res, _ = api.create_artifact(
466
+ artifact_type_name="code",
467
+ artifact_collection_name=artifact_name,
468
+ digest=code_artifact.digest,
469
+ client_id=code_artifact._client_id,
470
+ sequence_client_id=code_artifact._sequence_client_id,
471
+ entity_name=entity,
472
+ project_name=project,
473
+ run_name=run.id, # run will be deleted after creation
474
+ description="Code artifact for job",
475
+ metadata={"codePath": path, "entrypoint": entrypoint_file},
476
+ is_user_created=True,
477
+ aliases=[
478
+ {"artifactCollectionName": artifact_name, "alias": a} for a in ["latest"]
479
+ ],
480
+ )
481
+ run.log_artifact(code_artifact)
482
+ code_artifact.wait()
483
+ job_builder._handle_server_artifact(res, code_artifact) # type: ignore
484
+
485
+ # code artifacts have "code" prefix, remove it and alias
486
+ if not name:
487
+ name = code_artifact.name.replace("code", "job").split(":")[0]
488
+
489
+ return name
490
+
491
+
492
+ def _make_code_artifact_name(path: str, name: Optional[str]) -> str:
493
+ """Make a code artifact name from a path and user provided name."""
494
+ if name:
495
+ return f"code-{name}"
496
+
497
+ clean_path = path.replace("./", "")
498
+ if clean_path[0] == "/":
499
+ clean_path = clean_path[1:]
500
+ if clean_path[-1] == "/":
501
+ clean_path = clean_path[:-1]
502
+
503
+ path_name = f"code-{make_artifact_name_safe(clean_path)}"
504
+ return path_name
505
+
506
+
507
+ def _dump_metadata_and_requirements(
508
+ tmp_path: str, metadata: Dict[str, Any], requirements: Optional[List[str]]
509
+ ) -> None:
510
+ """Dump manufactured metadata and requirements.txt.
511
+
512
+ File used by the job_builder to create a job from provided metadata.
513
+ """
514
+ filesystem.mkdir_exists_ok(tmp_path)
515
+ with open(os.path.join(tmp_path, "wandb-metadata.json"), "w") as f:
516
+ json.dump(metadata, f)
517
+
518
+ requirements = requirements or []
519
+ with open(os.path.join(tmp_path, "requirements.txt"), "w") as f:
520
+ f.write("\n".join(requirements))
521
+
522
+
523
+ def _clean_python_version(python_version: str) -> str:
524
+ # remove micro if present
525
+ if python_version.count(".") > 1:
526
+ python_version = ".".join(python_version.split(".")[:2])
527
+ _logger.debug(f"micro python version stripped. Now: {python_version}")
528
+ return python_version
parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/abstract.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/aws_environment.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/azure_environment.cpython-310.pyc ADDED
Binary file (4.28 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/gcp_environment.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/__pycache__/local_environment.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
parrot/lib/python3.10/site-packages/wandb/sdk/launch/environment/abstract.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Abstract base class for environments."""
2
+
3
+ from abc import ABC, abstractmethod
4
+
5
+
6
+ class AbstractEnvironment(ABC):
7
+ """Abstract base class for environments."""
8
+
9
+ region: str
10
+
11
+ @abstractmethod
12
+ async def verify(self) -> None:
13
+ """Verify that the environment is configured correctly."""
14
+ raise NotImplementedError
15
+
16
+ @abstractmethod
17
+ async def upload_file(self, source: str, destination: str) -> None:
18
+ """Upload a file from the local filesystem to storage in the environment."""
19
+ raise NotImplementedError
20
+
21
+ @abstractmethod
22
+ async def upload_dir(self, source: str, destination: str) -> None:
23
+ """Upload the contents of a directory from the local filesystem to the environment."""
24
+ raise NotImplementedError
25
+
26
+ @abstractmethod
27
+ async def verify_storage_uri(self, uri: str) -> None:
28
+ """Verify that the storage URI is configured correctly."""
29
+ raise NotImplementedError