ZTWHHH commited on
Commit
010198d
·
verified ·
1 Parent(s): 5480190

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. deepseekvl2/lib/python3.10/__pycache__/__phello__.foo.cpython-310.pyc +0 -0
  3. deepseekvl2/lib/python3.10/__pycache__/_aix_support.cpython-310.pyc +0 -0
  4. deepseekvl2/lib/python3.10/__pycache__/_markupbase.cpython-310.pyc +0 -0
  5. deepseekvl2/lib/python3.10/__pycache__/_strptime.cpython-310.pyc +0 -0
  6. deepseekvl2/lib/python3.10/__pycache__/abc.cpython-310.pyc +0 -0
  7. deepseekvl2/lib/python3.10/__pycache__/argparse.cpython-310.pyc +0 -0
  8. deepseekvl2/lib/python3.10/__pycache__/asynchat.cpython-310.pyc +0 -0
  9. deepseekvl2/lib/python3.10/__pycache__/asyncore.cpython-310.pyc +0 -0
  10. deepseekvl2/lib/python3.10/__pycache__/cgitb.cpython-310.pyc +0 -0
  11. deepseekvl2/lib/python3.10/__pycache__/code.cpython-310.pyc +0 -0
  12. deepseekvl2/lib/python3.10/__pycache__/configparser.cpython-310.pyc +0 -0
  13. deepseekvl2/lib/python3.10/__pycache__/csv.cpython-310.pyc +0 -0
  14. deepseekvl2/lib/python3.10/__pycache__/doctest.cpython-310.pyc +0 -0
  15. deepseekvl2/lib/python3.10/__pycache__/gzip.cpython-310.pyc +0 -0
  16. deepseekvl2/lib/python3.10/__pycache__/imaplib.cpython-310.pyc +0 -0
  17. deepseekvl2/lib/python3.10/__pycache__/mimetypes.cpython-310.pyc +0 -0
  18. deepseekvl2/lib/python3.10/__pycache__/nntplib.cpython-310.pyc +0 -0
  19. deepseekvl2/lib/python3.10/__pycache__/opcode.cpython-310.pyc +0 -0
  20. deepseekvl2/lib/python3.10/__pycache__/plistlib.cpython-310.pyc +0 -0
  21. deepseekvl2/lib/python3.10/__pycache__/pydoc.cpython-310.pyc +0 -0
  22. deepseekvl2/lib/python3.10/__pycache__/rlcompleter.cpython-310.pyc +0 -0
  23. deepseekvl2/lib/python3.10/__pycache__/sre_constants.cpython-310.pyc +0 -0
  24. deepseekvl2/lib/python3.10/__pycache__/symtable.cpython-310.pyc +0 -0
  25. deepseekvl2/lib/python3.10/__pycache__/threading.cpython-310.pyc +0 -0
  26. deepseekvl2/lib/python3.10/__pycache__/traceback.cpython-310.pyc +0 -0
  27. deepseekvl2/lib/python3.10/__pycache__/types.cpython-310.pyc +0 -0
  28. deepseekvl2/lib/python3.10/__pycache__/warnings.cpython-310.pyc +0 -0
  29. deepseekvl2/lib/python3.10/__pycache__/wave.cpython-310.pyc +0 -0
  30. deepseekvl2/lib/python3.10/__pycache__/zipfile.cpython-310.pyc +0 -0
  31. deepseekvl2/lib/python3.10/lib-dynload/_codecs_hk.cpython-310-x86_64-linux-gnu.so +3 -0
  32. deepseekvl2/lib/python3.10/lib-dynload/_codecs_tw.cpython-310-x86_64-linux-gnu.so +3 -0
  33. deepseekvl2/lib/python3.10/lib-dynload/_lzma.cpython-310-x86_64-linux-gnu.so +3 -0
  34. deepseekvl2/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so +3 -0
  35. deepseekvl2/lib/python3.10/lib-dynload/_ssl.cpython-310-x86_64-linux-gnu.so +3 -0
  36. deepseekvl2/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so +3 -0
  37. deepseekvl2/lib/python3.10/lib-dynload/_xxsubinterpreters.cpython-310-x86_64-linux-gnu.so +3 -0
  38. deepseekvl2/lib/python3.10/lib-dynload/_zoneinfo.cpython-310-x86_64-linux-gnu.so +3 -0
  39. deepseekvl2/lib/python3.10/lib-dynload/readline.cpython-310-x86_64-linux-gnu.so +3 -0
  40. infer_4_33_0/lib/python3.10/site-packages/wandb/__pycache__/__main__.cpython-310.pyc +0 -0
  41. infer_4_33_0/lib/python3.10/site-packages/wandb/agents/__pycache__/__init__.cpython-310.pyc +0 -0
  42. infer_4_33_0/lib/python3.10/site-packages/wandb/agents/pyagent.py +363 -0
  43. infer_4_33_0/lib/python3.10/site-packages/wandb/docker/__pycache__/__init__.cpython-310.pyc +0 -0
  44. infer_4_33_0/lib/python3.10/site-packages/wandb/docker/wandb-entrypoint.sh +33 -0
  45. infer_4_33_0/lib/python3.10/site-packages/wandb/docker/www_authenticate.py +94 -0
  46. infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/__init__.py +37 -0
  47. infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_config.py +322 -0
  48. infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_init.py +1304 -0
  49. infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_settings.py +1278 -0
  50. infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_summary.py +150 -0
.gitattributes CHANGED
@@ -745,3 +745,12 @@ deepseekvl2/lib/python3.10/lib-dynload/_curses.cpython-310-x86_64-linux-gnu.so f
745
  deepseekvl2/lib/python3.10/lib-dynload/_struct.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
746
  deepseekvl2/lib/python3.10/lib-dynload/audioop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
747
  deepseekvl2/lib/python3.10/lib-dynload/_testcapi.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
745
  deepseekvl2/lib/python3.10/lib-dynload/_struct.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
746
  deepseekvl2/lib/python3.10/lib-dynload/audioop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
747
  deepseekvl2/lib/python3.10/lib-dynload/_testcapi.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
748
+ deepseekvl2/lib/python3.10/lib-dynload/_codecs_hk.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
749
+ deepseekvl2/lib/python3.10/lib-dynload/_zoneinfo.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
750
+ deepseekvl2/lib/python3.10/lib-dynload/_xxsubinterpreters.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
751
+ deepseekvl2/lib/python3.10/lib-dynload/_ssl.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
752
+ deepseekvl2/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
753
+ deepseekvl2/lib/python3.10/lib-dynload/_lzma.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
754
+ deepseekvl2/lib/python3.10/lib-dynload/readline.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
755
+ deepseekvl2/lib/python3.10/lib-dynload/_codecs_tw.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
756
+ deepseekvl2/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
deepseekvl2/lib/python3.10/__pycache__/__phello__.foo.cpython-310.pyc ADDED
Binary file (384 Bytes). View file
 
deepseekvl2/lib/python3.10/__pycache__/_aix_support.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/_markupbase.cpython-310.pyc ADDED
Binary file (7.57 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/_strptime.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/abc.cpython-310.pyc ADDED
Binary file (6.75 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/argparse.cpython-310.pyc ADDED
Binary file (63.5 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/asynchat.cpython-310.pyc ADDED
Binary file (7.28 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/asyncore.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/cgitb.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/code.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/configparser.cpython-310.pyc ADDED
Binary file (45.5 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/csv.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/doctest.cpython-310.pyc ADDED
Binary file (76.4 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/gzip.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/imaplib.cpython-310.pyc ADDED
Binary file (42.3 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/mimetypes.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/nntplib.cpython-310.pyc ADDED
Binary file (31.9 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/opcode.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/plistlib.cpython-310.pyc ADDED
Binary file (23.6 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/pydoc.cpython-310.pyc ADDED
Binary file (85.4 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/rlcompleter.cpython-310.pyc ADDED
Binary file (6.21 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/sre_constants.cpython-310.pyc ADDED
Binary file (6.35 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/symtable.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/threading.cpython-310.pyc ADDED
Binary file (45.2 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/traceback.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/types.cpython-310.pyc ADDED
Binary file (9.78 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/warnings.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/wave.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
deepseekvl2/lib/python3.10/__pycache__/zipfile.cpython-310.pyc ADDED
Binary file (61.8 kB). View file
 
deepseekvl2/lib/python3.10/lib-dynload/_codecs_hk.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5028632a8d7f167721ab2e0de42fb9e1c4ea5a80161005dabfb5c559f784dcb
3
+ size 194368
deepseekvl2/lib/python3.10/lib-dynload/_codecs_tw.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:148bfe2a46df83fdece3e335e31ff78e92a273a1bfe660764988c88682ea3066
3
+ size 143232
deepseekvl2/lib/python3.10/lib-dynload/_lzma.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebfdcab2537ab089e99839348e41c28e8ce2b63d58d095f94c9f80a753748b0b
3
+ size 145376
deepseekvl2/lib/python3.10/lib-dynload/_socket.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:457b4b1f4e960408aea7e2a1cbb92a8840ac68a682ecf5baf3450ee0e796fc00
3
+ size 283680
deepseekvl2/lib/python3.10/lib-dynload/_ssl.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f93473eb2cfe128149c6d2973086402faa935d68856a103c752d0b513925432e
3
+ size 499736
deepseekvl2/lib/python3.10/lib-dynload/_testclinic.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d21d17fa352f1e585afb8240dcbe49ad67b9bed71232b699c294b97820792559
3
+ size 205672
deepseekvl2/lib/python3.10/lib-dynload/_xxsubinterpreters.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:462ae2906ffebca406891ed2955376243f777336816acb18c6efc32aa1249e8d
3
+ size 149936
deepseekvl2/lib/python3.10/lib-dynload/_zoneinfo.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb38a8e751825aa95af436b17da44762c3f446a02c33ee193788d4ab02124017
3
+ size 160600
deepseekvl2/lib/python3.10/lib-dynload/readline.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96bcc678065c5ffab0324a9b557a7511f7fda79d85d84fafb7f2012261f4cee7
3
+ size 112992
infer_4_33_0/lib/python3.10/site-packages/wandb/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (244 Bytes). View file
 
infer_4_33_0/lib/python3.10/site-packages/wandb/agents/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
infer_4_33_0/lib/python3.10/site-packages/wandb/agents/pyagent.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Agent - Agent object.
2
+
3
+ Manage wandb agent.
4
+
5
+ """
6
+
7
+ import ctypes
8
+ import logging
9
+ import os
10
+ import queue
11
+ import socket
12
+ import threading
13
+ import time
14
+ import traceback
15
+
16
+ import wandb
17
+ from wandb.apis import InternalApi
18
+ from wandb.sdk.launch.sweeps import utils as sweep_utils
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def _terminate_thread(thread):
24
+ if not thread.is_alive():
25
+ return
26
+ if hasattr(thread, "_terminated"):
27
+ return
28
+ thread._terminated = True
29
+ tid = getattr(thread, "_thread_id", None)
30
+ if tid is None:
31
+ for k, v in threading._active.items():
32
+ if v is thread:
33
+ tid = k
34
+ if tid is None:
35
+ # This should never happen
36
+ return
37
+ logger.debug(f"Terminating thread: {tid}")
38
+ res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
39
+ ctypes.c_long(tid), ctypes.py_object(Exception)
40
+ )
41
+ if res == 0:
42
+ # This should never happen
43
+ return
44
+ elif res != 1:
45
+ # Revert
46
+ logger.debug(f"Termination failed for thread {tid}")
47
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)
48
+
49
+
50
+ class Job:
51
+ def __init__(self, command):
52
+ self.command = command
53
+ job_type = command.get("type")
54
+ self.type = job_type
55
+ self.run_id = command.get("run_id")
56
+ self.config = command.get("args")
57
+
58
+ def __repr__(self):
59
+ if self.type == "run":
60
+ return f"Job({self.run_id},{self.config})"
61
+ elif self.type == "stop":
62
+ return f"stop({self.run_id})"
63
+ else:
64
+ return "exit"
65
+
66
+
67
+ class RunStatus:
68
+ QUEUED = "QUEUED"
69
+ RUNNING = "RUNNING"
70
+ STOPPED = "STOPPED"
71
+ ERRORED = "ERRORED"
72
+ DONE = "DONE"
73
+
74
+
75
+ class Agent:
76
+ FLAPPING_MAX_SECONDS = 60
77
+ FLAPPING_MAX_FAILURES = 3
78
+ MAX_INITIAL_FAILURES = 5
79
+
80
+ def __init__(
81
+ self, sweep_id=None, project=None, entity=None, function=None, count=None
82
+ ):
83
+ self._sweep_path = sweep_id
84
+ self._sweep_id = None
85
+ self._project = project
86
+ self._entity = entity
87
+ self._function = function
88
+ self._count = count
89
+ # glob_config = os.path.expanduser('~/.config/wandb/settings')
90
+ # loc_config = 'wandb/settings'
91
+ # files = (glob_config, loc_config)
92
+ self._api = InternalApi()
93
+ self._agent_id = None
94
+ self._max_initial_failures = wandb.env.get_agent_max_initial_failures(
95
+ self.MAX_INITIAL_FAILURES
96
+ )
97
+ # if the directory to log to is not set, set it
98
+ if os.environ.get(wandb.env.DIR) is None:
99
+ os.environ[wandb.env.DIR] = os.path.abspath(os.getcwd())
100
+
101
+ def _init(self):
102
+ # These are not in constructor so that Agent instance can be rerun
103
+ self._run_threads = {}
104
+ self._run_status = {}
105
+ self._queue = queue.Queue()
106
+ self._exit_flag = False
107
+ self._exceptions = {}
108
+ self._start_time = time.time()
109
+
110
+ def _register(self):
111
+ logger.debug("Agent._register()")
112
+ agent = self._api.register_agent(socket.gethostname(), sweep_id=self._sweep_id)
113
+ self._agent_id = agent["id"]
114
+ logger.debug(f"agent_id = {self._agent_id}")
115
+
116
+ def _setup(self):
117
+ logger.debug("Agent._setup()")
118
+ self._init()
119
+ parts = dict(entity=self._entity, project=self._project, name=self._sweep_path)
120
+ err = sweep_utils.parse_sweep_id(parts)
121
+ if err:
122
+ wandb.termerror(err)
123
+ return
124
+ entity = parts.get("entity") or self._entity
125
+ project = parts.get("project") or self._project
126
+ sweep_id = parts.get("name") or self._sweep_id
127
+ if sweep_id:
128
+ os.environ[wandb.env.SWEEP_ID] = sweep_id
129
+ if entity:
130
+ wandb.env.set_entity(entity)
131
+ if project:
132
+ wandb.env.set_project(project)
133
+ if sweep_id:
134
+ self._sweep_id = sweep_id
135
+ self._register()
136
+
137
+ def _stop_run(self, run_id):
138
+ logger.debug(f"Stopping run {run_id}.")
139
+ self._run_status[run_id] = RunStatus.STOPPED
140
+ thread = self._run_threads.get(run_id)
141
+ if thread:
142
+ _terminate_thread(thread)
143
+
144
+ def _stop_all_runs(self):
145
+ logger.debug("Stopping all runs.")
146
+ for run in list(self._run_threads.keys()):
147
+ self._stop_run(run)
148
+
149
+ def _exit(self):
150
+ self._stop_all_runs()
151
+ self._exit_flag = True
152
+ # _terminate_thread(self._main_thread)
153
+
154
+ def _heartbeat(self):
155
+ while True:
156
+ if self._exit_flag:
157
+ return
158
+ # if not self._main_thread.is_alive():
159
+ # return
160
+ run_status = {
161
+ run: True
162
+ for run, status in self._run_status.items()
163
+ if status in (RunStatus.QUEUED, RunStatus.RUNNING)
164
+ }
165
+ commands = self._api.agent_heartbeat(self._agent_id, {}, run_status)
166
+ if commands:
167
+ job = Job(commands[0])
168
+ logger.debug(f"Job received: {job}")
169
+ if job.type in ["run", "resume"]:
170
+ self._queue.put(job)
171
+ self._run_status[job.run_id] = RunStatus.QUEUED
172
+ elif job.type == "stop":
173
+ self._stop_run(job.run_id)
174
+ elif job.type == "exit":
175
+ self._exit()
176
+ return
177
+ time.sleep(5)
178
+
179
+ def _run_jobs_from_queue(self): # noqa:C901
180
+ global _INSTANCES
181
+ _INSTANCES += 1
182
+ try:
183
+ waiting = False
184
+ count = 0
185
+ while True:
186
+ if self._exit_flag:
187
+ return
188
+ try:
189
+ try:
190
+ job = self._queue.get(timeout=5)
191
+ if self._exit_flag:
192
+ logger.debug("Exiting main loop due to exit flag.")
193
+ wandb.termlog("Sweep Agent: Exiting.")
194
+ return
195
+ except queue.Empty:
196
+ if not waiting:
197
+ logger.debug("Paused.")
198
+ wandb.termlog("Sweep Agent: Waiting for job.")
199
+ waiting = True
200
+ time.sleep(5)
201
+ if self._exit_flag:
202
+ logger.debug("Exiting main loop due to exit flag.")
203
+ wandb.termlog("Sweep Agent: Exiting.")
204
+ return
205
+ continue
206
+ if waiting:
207
+ logger.debug("Resumed.")
208
+ wandb.termlog("Job received.")
209
+ waiting = False
210
+ count += 1
211
+ run_id = job.run_id
212
+ if self._run_status[run_id] == RunStatus.STOPPED:
213
+ continue
214
+ logger.debug(f"Spawning new thread for run {run_id}.")
215
+ thread = threading.Thread(target=self._run_job, args=(job,))
216
+ self._run_threads[run_id] = thread
217
+ thread.start()
218
+ self._run_status[run_id] = RunStatus.RUNNING
219
+ thread.join()
220
+ logger.debug(f"Thread joined for run {run_id}.")
221
+ if self._run_status[run_id] == RunStatus.RUNNING:
222
+ self._run_status[run_id] = RunStatus.DONE
223
+ elif self._run_status[run_id] == RunStatus.ERRORED:
224
+ exc = self._exceptions[run_id]
225
+ exc_type, exc_value, exc_traceback = (
226
+ exc.__class__,
227
+ exc,
228
+ exc.__traceback__,
229
+ )
230
+ exc_traceback_formatted = traceback.format_exception(
231
+ exc_type, exc_value, exc_traceback
232
+ )
233
+ exc_repr = "".join(exc_traceback_formatted)
234
+ logger.error(f"Run {run_id} errored:\n{exc_repr}")
235
+ wandb.termerror(f"Run {run_id} errored:\n{exc_repr}")
236
+ if os.getenv(wandb.env.AGENT_DISABLE_FLAPPING) == "true":
237
+ self._exit_flag = True
238
+ return
239
+ elif (
240
+ time.time() - self._start_time < self.FLAPPING_MAX_SECONDS
241
+ ) and (len(self._exceptions) >= self.FLAPPING_MAX_FAILURES):
242
+ msg = "Detected {} failed runs in the first {} seconds, killing sweep.".format(
243
+ self.FLAPPING_MAX_FAILURES, self.FLAPPING_MAX_SECONDS
244
+ )
245
+ logger.error(msg)
246
+ wandb.termerror(msg)
247
+ wandb.termlog(
248
+ "To disable this check set WANDB_AGENT_DISABLE_FLAPPING=true"
249
+ )
250
+ self._exit_flag = True
251
+ return
252
+ if (
253
+ self._max_initial_failures < len(self._exceptions)
254
+ and len(self._exceptions) >= count
255
+ ):
256
+ msg = "Detected {} failed runs in a row at start, killing sweep.".format(
257
+ self._max_initial_failures
258
+ )
259
+ logger.error(msg)
260
+ wandb.termerror(msg)
261
+ wandb.termlog(
262
+ "To change this value set WANDB_AGENT_MAX_INITIAL_FAILURES=val"
263
+ )
264
+ self._exit_flag = True
265
+ return
266
+ if self._count and self._count == count:
267
+ logger.debug("Exiting main loop because max count reached.")
268
+ self._exit_flag = True
269
+ return
270
+ except KeyboardInterrupt:
271
+ logger.debug("Ctrl + C detected. Stopping sweep.")
272
+ wandb.termlog("Ctrl + C detected. Stopping sweep.")
273
+ self._exit()
274
+ return
275
+ except Exception as e:
276
+ if self._exit_flag:
277
+ logger.debug("Exiting main loop due to exit flag.")
278
+ wandb.termlog("Sweep Agent: Killed.")
279
+ return
280
+ else:
281
+ raise e
282
+ finally:
283
+ _INSTANCES -= 1
284
+
285
+ def _run_job(self, job):
286
+ try:
287
+ run_id = job.run_id
288
+
289
+ config_file = os.path.join(
290
+ "wandb", "sweep-" + self._sweep_id, "config-" + run_id + ".yaml"
291
+ )
292
+ os.environ[wandb.env.RUN_ID] = run_id
293
+ base_dir = os.environ.get(wandb.env.DIR, "")
294
+ sweep_param_path = os.path.join(base_dir, config_file)
295
+ os.environ[wandb.env.SWEEP_PARAM_PATH] = sweep_param_path
296
+ wandb.wandb_lib.config_util.save_config_file_from_dict(
297
+ sweep_param_path, job.config
298
+ )
299
+ os.environ[wandb.env.SWEEP_ID] = self._sweep_id
300
+ wandb.teardown()
301
+
302
+ wandb.termlog(f"Agent Starting Run: {run_id} with config:")
303
+ for k, v in job.config.items():
304
+ wandb.termlog("\t{}: {}".format(k, v["value"]))
305
+
306
+ self._function()
307
+ wandb.finish()
308
+ except KeyboardInterrupt as ki:
309
+ raise ki
310
+ except Exception as e:
311
+ wandb.finish(exit_code=1)
312
+ if self._run_status[run_id] == RunStatus.RUNNING:
313
+ self._run_status[run_id] = RunStatus.ERRORED
314
+ self._exceptions[run_id] = e
315
+ finally:
316
+ # clean up the environment changes made
317
+ os.environ.pop(wandb.env.RUN_ID, None)
318
+ os.environ.pop(wandb.env.SWEEP_ID, None)
319
+ os.environ.pop(wandb.env.SWEEP_PARAM_PATH, None)
320
+
321
+ def run(self):
322
+ logger.info(
323
+ "Starting sweep agent: entity={}, project={}, count={}".format(
324
+ self._entity, self._project, self._count
325
+ )
326
+ )
327
+ self._setup()
328
+ # self._main_thread = threading.Thread(target=self._run_jobs_from_queue)
329
+ self._heartbeat_thread = threading.Thread(target=self._heartbeat)
330
+ self._heartbeat_thread.daemon = True
331
+ # self._main_thread.start()
332
+ self._heartbeat_thread.start()
333
+ # self._main_thread.join()
334
+ self._run_jobs_from_queue()
335
+
336
+
337
+ def pyagent(sweep_id, function, entity=None, project=None, count=None):
338
+ """Generic agent entrypoint, used for CLI or jupyter.
339
+
340
+ Args:
341
+ sweep_id (dict): Sweep ID generated by CLI or sweep API
342
+ function (func, optional): A function to call instead of the "program"
343
+ entity (str, optional): W&B Entity
344
+ project (str, optional): W&B Project
345
+ count (int, optional): the number of trials to run.
346
+ """
347
+ if not callable(function):
348
+ raise Exception("function parameter must be callable!")
349
+ agent = Agent(
350
+ sweep_id,
351
+ function=function,
352
+ entity=entity,
353
+ project=project,
354
+ count=count,
355
+ )
356
+ agent.run()
357
+
358
+
359
+ _INSTANCES = 0
360
+
361
+
362
+ def is_running():
363
+ return bool(_INSTANCES)
infer_4_33_0/lib/python3.10/site-packages/wandb/docker/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (9.1 kB). View file
 
infer_4_33_0/lib/python3.10/site-packages/wandb/docker/wandb-entrypoint.sh ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+ set -e
3
+
4
+ wandb="\x1b[34m\x1b[1mwandb\x1b[0m"
5
+ /bin/echo -e "${wandb}: Checking image for required packages."
6
+
7
+ if ! [ -x "$(command -v python)" ]; then
8
+ /bin/echo -e "${wandb}: python not installed, can't use wandb with this image."
9
+ exit 1
10
+ fi
11
+
12
+ if ! [ -x "$(command -v wandb)" ]; then
13
+ /bin/echo -e "${wandb}: wandb not installed, installing."
14
+ pip install wandb --upgrade
15
+ else
16
+ ver=$(wandb --version)
17
+ /bin/echo -e "${wandb}: Found $ver"
18
+ fi
19
+
20
+ if [ "$WANDB_ENSURE_JUPYTER" = "1" ]; then
21
+ if ! [ -x "$(command -v jupyter-lab)" ]; then
22
+ /bin/echo -e "${wandb}: jupyter not installed, installing."
23
+ pip install jupyterlab
24
+ /bin/echo -e "${wandb}: starting jupyter, you can access it at: http://127.0.0.1:8888"
25
+ fi
26
+ fi
27
+
28
+ if ! [ -z "$WANDB_COMMAND" ]; then
29
+ /bin/echo $WANDB_COMMAND >> ~/.bash_history
30
+ /bin/echo -e "${wandb}: Command added to history, press up arrow to access it."
31
+ /bin/echo -e "${wandb}: $WANDB_COMMAND"
32
+ fi
33
+ exec "$@"
infer_4_33_0/lib/python3.10/site-packages/wandb/docker/www_authenticate.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Taken from: https://github.com/alexsdutton/www-authenticate
2
+ import re
3
+ from collections import OrderedDict
4
+ from typing import Any, Optional
5
+
6
+ _tokens = (
7
+ ("token", re.compile(r"""^([!#$%&'*+\-.^_`|~\w/]+(?:={1,2}$)?)""")),
8
+ ("token", re.compile(r'''^"((?:[^"\\]|\\\\|\\")+)"''')),
9
+ (None, re.compile(r"^\s+")),
10
+ ("equals", re.compile(r"^(=)")),
11
+ ("comma", re.compile(r"^(,)")),
12
+ )
13
+
14
+
15
+ def _casefold(value: str) -> str:
16
+ try:
17
+ return value.casefold()
18
+ except AttributeError:
19
+ return value.lower()
20
+
21
+
22
+ class CaseFoldedOrderedDict(OrderedDict):
23
+ def __getitem__(self, key: str) -> Any:
24
+ return super().__getitem__(_casefold(key))
25
+
26
+ def __setitem__(self, key: str, value: Any) -> None:
27
+ super().__setitem__(_casefold(key), value)
28
+
29
+ def __contains__(self, key: object) -> bool:
30
+ return super().__contains__(_casefold(key)) # type: ignore
31
+
32
+ def get(self, key: str, default: Optional[Any] = None) -> Any:
33
+ return super().get(_casefold(key), default)
34
+
35
+ def pop(self, key: str, default: Optional[Any] = None) -> Any:
36
+ return super().pop(_casefold(key), default)
37
+
38
+
39
+ def _group_pairs(tokens: list) -> None:
40
+ i = 0
41
+ while i < len(tokens) - 2:
42
+ if (
43
+ tokens[i][0] == "token"
44
+ and tokens[i + 1][0] == "equals"
45
+ and tokens[i + 2][0] == "token"
46
+ ):
47
+ tokens[i : i + 3] = [("pair", (tokens[i][1], tokens[i + 2][1]))]
48
+ i += 1
49
+
50
+
51
+ def _group_challenges(tokens: list) -> list:
52
+ challenges = []
53
+ while tokens:
54
+ j = 1
55
+ if len(tokens) == 1:
56
+ pass
57
+ elif tokens[1][0] == "comma":
58
+ pass
59
+ elif tokens[1][0] == "token":
60
+ j = 2
61
+ else:
62
+ while j < len(tokens) and tokens[j][0] == "pair":
63
+ j += 2
64
+ j -= 1
65
+ challenges.append((tokens[0][1], tokens[1:j]))
66
+ tokens[: j + 1] = []
67
+ return challenges
68
+
69
+
70
+ def parse(value: str) -> CaseFoldedOrderedDict:
71
+ tokens = []
72
+ while value:
73
+ for token_name, pattern in _tokens:
74
+ match = pattern.match(value)
75
+ if match:
76
+ value = value[match.end() :]
77
+ if token_name:
78
+ tokens.append((token_name, match.group(1)))
79
+ break
80
+ else:
81
+ raise ValueError("Failed to parse value")
82
+ _group_pairs(tokens)
83
+
84
+ challenges = CaseFoldedOrderedDict()
85
+ for name, tokens in _group_challenges(tokens): # noqa: B020
86
+ args, kwargs = [], {}
87
+ for token_name, value in tokens:
88
+ if token_name == "token":
89
+ args.append(value)
90
+ elif token_name == "pair":
91
+ kwargs[value[0]] = value[1]
92
+ challenges[name] = (args and args[0]) or kwargs or None
93
+
94
+ return challenges
infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """W&B SDK module."""
2
+
3
+ __all__ = (
4
+ "Config",
5
+ "Settings",
6
+ "Summary",
7
+ "Artifact",
8
+ "AlertLevel",
9
+ "init",
10
+ "setup",
11
+ "_attach",
12
+ "_sync",
13
+ "login",
14
+ "require",
15
+ "finish",
16
+ "teardown",
17
+ "_watch",
18
+ "_unwatch",
19
+ "sweep",
20
+ "controller",
21
+ "helper",
22
+ )
23
+
24
+ from . import wandb_helper as helper
25
+ from .artifacts.artifact import Artifact
26
+ from .wandb_alerts import AlertLevel
27
+ from .wandb_config import Config
28
+ from .wandb_init import _attach, init
29
+ from .wandb_login import login
30
+ from .wandb_require import require
31
+ from .wandb_run import finish
32
+ from .wandb_settings import Settings
33
+ from .wandb_setup import setup, teardown
34
+ from .wandb_summary import Summary
35
+ from .wandb_sweep import controller, sweep
36
+ from .wandb_sync import _sync
37
+ from .wandb_watch import _unwatch, _watch
infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_config.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """config."""
2
+
3
+ import logging
4
+ from typing import Optional
5
+
6
+ import wandb
7
+ from wandb.util import (
8
+ _is_artifact_representation,
9
+ check_dict_contains_nested_artifact,
10
+ json_friendly_val,
11
+ )
12
+
13
+ from . import wandb_helper
14
+ from .lib import config_util
15
+
16
+ logger = logging.getLogger("wandb")
17
+
18
+
19
+ # TODO(jhr): consider a callback for persisting changes?
20
+ # if this is done right we might make sure this is pickle-able
21
+ # we might be able to do this on other objects like Run?
22
+ class Config:
23
+ """Config object.
24
+
25
+ Config objects are intended to hold all of the hyperparameters associated with
26
+ a wandb run and are saved with the run object when `wandb.init` is called.
27
+
28
+ We recommend setting `wandb.config` once at the top of your training experiment or
29
+ setting the config as a parameter to init, ie. `wandb.init(config=my_config_dict)`
30
+
31
+ You can create a file called `config-defaults.yaml`, and it will automatically be
32
+ loaded into `wandb.config`. See https://docs.wandb.com/guides/track/config#file-based-configs.
33
+
34
+ You can also load a config YAML file with your custom name and pass the filename
35
+ into `wandb.init(config="special_config.yaml")`.
36
+ See https://docs.wandb.com/guides/track/config#file-based-configs.
37
+
38
+ Examples:
39
+ Basic usage
40
+ ```
41
+ wandb.config.epochs = 4
42
+ wandb.init()
43
+ for x in range(wandb.config.epochs):
44
+ # train
45
+ ```
46
+
47
+ Using wandb.init to set config
48
+ ```
49
+ wandb.init(config={"epochs": 4, "batch_size": 32})
50
+ for x in range(wandb.config.epochs):
51
+ # train
52
+ ```
53
+
54
+ Nested configs
55
+ ```
56
+ wandb.config['train']['epochs'] = 4
57
+ wandb.init()
58
+ for x in range(wandb.config['train']['epochs']):
59
+ # train
60
+ ```
61
+
62
+ Using absl flags
63
+ ```
64
+ flags.DEFINE_string("model", None, "model to run") # name, default, help
65
+ wandb.config.update(flags.FLAGS) # adds all absl flags to config
66
+ ```
67
+
68
+ Argparse flags
69
+ ```python
70
+ wandb.init()
71
+ wandb.config.epochs = 4
72
+
73
+ parser = argparse.ArgumentParser()
74
+ parser.add_argument(
75
+ "-b",
76
+ "--batch-size",
77
+ type=int,
78
+ default=8,
79
+ metavar="N",
80
+ help="input batch size for training (default: 8)",
81
+ )
82
+ args = parser.parse_args()
83
+ wandb.config.update(args)
84
+ ```
85
+
86
+ Using TensorFlow flags (deprecated in tensorflow v2)
87
+ ```python
88
+ flags = tf.app.flags
89
+ flags.DEFINE_string("data_dir", "/tmp/data")
90
+ flags.DEFINE_integer("batch_size", 128, "Batch size.")
91
+ wandb.config.update(flags.FLAGS) # adds all of the tensorflow flags to config
92
+ ```
93
+ """
94
+
95
+ def __init__(self):
96
+ object.__setattr__(self, "_items", dict())
97
+ object.__setattr__(self, "_locked", dict())
98
+ object.__setattr__(self, "_users", dict())
99
+ object.__setattr__(self, "_users_inv", dict())
100
+ object.__setattr__(self, "_users_cnt", 0)
101
+ object.__setattr__(self, "_callback", None)
102
+ object.__setattr__(self, "_settings", None)
103
+ object.__setattr__(self, "_artifact_callback", None)
104
+
105
+ self._load_defaults()
106
+
107
+ def _set_callback(self, cb):
108
+ object.__setattr__(self, "_callback", cb)
109
+
110
+ def _set_artifact_callback(self, cb):
111
+ object.__setattr__(self, "_artifact_callback", cb)
112
+
113
+ def _set_settings(self, settings):
114
+ object.__setattr__(self, "_settings", settings)
115
+
116
+ def __repr__(self):
117
+ return str(dict(self))
118
+
119
+ def keys(self):
120
+ return [k for k in self._items.keys() if not k.startswith("_")]
121
+
122
+ def _as_dict(self):
123
+ return self._items
124
+
125
+ def as_dict(self):
126
+ # TODO: add telemetry, deprecate, then remove
127
+ return dict(self)
128
+
129
+ def __getitem__(self, key):
130
+ return self._items[key]
131
+
132
+ def __iter__(self):
133
+ return iter(self._items)
134
+
135
+ def _check_locked(self, key, ignore_locked=False) -> bool:
136
+ locked = self._locked.get(key)
137
+ if locked is not None:
138
+ locked_user = self._users_inv[locked]
139
+ if not ignore_locked:
140
+ wandb.termwarn(
141
+ f"Config item '{key}' was locked by '{locked_user}' (ignored update)."
142
+ )
143
+ return True
144
+ return False
145
+
146
+ def __setitem__(self, key, val):
147
+ if self._check_locked(key):
148
+ return
149
+ with wandb.sdk.lib.telemetry.context() as tel:
150
+ tel.feature.set_config_item = True
151
+ self._raise_value_error_on_nested_artifact(val, nested=True)
152
+ key, val = self._sanitize(key, val)
153
+ self._items[key] = val
154
+ logger.info("config set %s = %s - %s", key, val, self._callback)
155
+ if self._callback:
156
+ self._callback(key=key, val=val)
157
+
158
+ def items(self):
159
+ return [(k, v) for k, v in self._items.items() if not k.startswith("_")]
160
+
161
+ __setattr__ = __setitem__
162
+
163
+ def __getattr__(self, key):
164
+ try:
165
+ return self.__getitem__(key)
166
+ except KeyError as ke:
167
+ raise AttributeError(
168
+ f"{self.__class__!r} object has no attribute {key!r}"
169
+ ) from ke
170
+
171
+ def __contains__(self, key):
172
+ return key in self._items
173
+
174
+ def _update(self, d, allow_val_change=None, ignore_locked=None):
175
+ parsed_dict = wandb_helper.parse_config(d)
176
+ locked_keys = set()
177
+ for key in list(parsed_dict):
178
+ if self._check_locked(key, ignore_locked=ignore_locked):
179
+ locked_keys.add(key)
180
+ sanitized = self._sanitize_dict(
181
+ parsed_dict, allow_val_change, ignore_keys=locked_keys
182
+ )
183
+ self._items.update(sanitized)
184
+ return sanitized
185
+
186
+ def update(self, d, allow_val_change=None):
187
+ sanitized = self._update(d, allow_val_change)
188
+ if self._callback:
189
+ self._callback(data=sanitized)
190
+
191
+ def get(self, *args):
192
+ return self._items.get(*args)
193
+
194
+ def persist(self):
195
+ """Call the callback if it's set."""
196
+ if self._callback:
197
+ self._callback(data=self._as_dict())
198
+
199
+ def setdefaults(self, d):
200
+ d = wandb_helper.parse_config(d)
201
+ # strip out keys already configured
202
+ d = {k: v for k, v in d.items() if k not in self._items}
203
+ d = self._sanitize_dict(d)
204
+ self._items.update(d)
205
+ if self._callback:
206
+ self._callback(data=d)
207
+
208
+ def _get_user_id(self, user) -> int:
209
+ if user not in self._users:
210
+ self._users[user] = self._users_cnt
211
+ self._users_inv[self._users_cnt] = user
212
+ object.__setattr__(self, "_users_cnt", self._users_cnt + 1)
213
+
214
+ return self._users[user]
215
+
216
+ def update_locked(self, d, user=None, _allow_val_change=None):
217
+ """Shallow-update config with `d` and lock config updates on d's keys."""
218
+ num = self._get_user_id(user)
219
+
220
+ for k, v in d.items():
221
+ k, v = self._sanitize(k, v, allow_val_change=_allow_val_change)
222
+ self._locked[k] = num
223
+ self._items[k] = v
224
+
225
+ if self._callback:
226
+ self._callback(data=d)
227
+
228
+ def merge_locked(self, d, user=None, _allow_val_change=None):
229
+ """Recursively merge-update config with `d` and lock config updates on d's keys."""
230
+ num = self._get_user_id(user)
231
+ callback_d = {}
232
+
233
+ for k, v in d.items():
234
+ k, v = self._sanitize(k, v, allow_val_change=_allow_val_change)
235
+ self._locked[k] = num
236
+
237
+ if (
238
+ k in self._items
239
+ and isinstance(self._items[k], dict)
240
+ and isinstance(v, dict)
241
+ ):
242
+ self._items[k] = config_util.merge_dicts(self._items[k], v)
243
+ else:
244
+ self._items[k] = v
245
+
246
+ callback_d[k] = self._items[k]
247
+
248
+ if self._callback:
249
+ self._callback(data=callback_d)
250
+
251
+ def _load_defaults(self):
252
+ conf_dict = config_util.dict_from_config_file("config-defaults.yaml")
253
+ if conf_dict is not None:
254
+ self.update(conf_dict)
255
+
256
+ def _sanitize_dict(
257
+ self,
258
+ config_dict,
259
+ allow_val_change=None,
260
+ ignore_keys: Optional[set] = None,
261
+ ):
262
+ sanitized = {}
263
+ self._raise_value_error_on_nested_artifact(config_dict)
264
+ for k, v in config_dict.items():
265
+ if ignore_keys and k in ignore_keys:
266
+ continue
267
+ k, v = self._sanitize(k, v, allow_val_change)
268
+ sanitized[k] = v
269
+ return sanitized
270
+
271
+ def _sanitize(self, key, val, allow_val_change=None):
272
+ # TODO: enable WBValues in the config in the future
273
+ # refuse all WBValues which is all Media and Histograms
274
+ if isinstance(val, wandb.sdk.data_types.base_types.wb_value.WBValue):
275
+ raise ValueError("WBValue objects cannot be added to the run config")
276
+ # Let jupyter change config freely by default
277
+ if self._settings and self._settings._jupyter and allow_val_change is None:
278
+ allow_val_change = True
279
+ # We always normalize keys by stripping '-'
280
+ key = key.strip("-")
281
+ if _is_artifact_representation(val):
282
+ val = self._artifact_callback(key, val)
283
+ # if the user inserts an artifact into the config
284
+ if not isinstance(val, wandb.Artifact):
285
+ val = json_friendly_val(val)
286
+ if not allow_val_change:
287
+ if key in self._items and val != self._items[key]:
288
+ raise config_util.ConfigError(
289
+ f'Attempted to change value of key "{key}" '
290
+ f"from {self._items[key]} to {val}\n"
291
+ "If you really want to do this, pass"
292
+ " allow_val_change=True to config.update()"
293
+ )
294
+ return key, val
295
+
296
+ def _raise_value_error_on_nested_artifact(self, v, nested=False):
297
+ # we can't swap nested artifacts because their root key can be locked by other values
298
+ # best if we don't allow nested artifacts until we can lock nested keys in the config
299
+ if isinstance(v, dict) and check_dict_contains_nested_artifact(v, nested):
300
+ raise ValueError(
301
+ "Instances of wandb.Artifact can only be top level keys in wandb.config"
302
+ )
303
+
304
+
305
+ class ConfigStatic:
306
+ def __init__(self, config):
307
+ object.__setattr__(self, "__dict__", dict(config))
308
+
309
+ def __setattr__(self, name, value):
310
+ raise AttributeError("Error: wandb.run.config_static is a readonly object")
311
+
312
+ def __setitem__(self, key, val):
313
+ raise AttributeError("Error: wandb.run.config_static is a readonly object")
314
+
315
+ def keys(self):
316
+ return self.__dict__.keys()
317
+
318
+ def __getitem__(self, key):
319
+ return self.__dict__[key]
320
+
321
+ def __str__(self):
322
+ return str(self.__dict__)
infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_init.py ADDED
@@ -0,0 +1,1304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Defines wandb.init() and associated classes and methods.
2
+
3
+ `wandb.init()` indicates the beginning of a new run. In an ML training pipeline,
4
+ you could add `wandb.init()` to the beginning of your training script as well as
5
+ your evaluation script, and each step would be tracked as a run in W&B.
6
+
7
+ For more on using `wandb.init()`, including code snippets, check out our
8
+ [guide and FAQs](https://docs.wandb.ai/guides/track/launch).
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import copy
14
+ import json
15
+ import logging
16
+ import os
17
+ import platform
18
+ import sys
19
+ import tempfile
20
+ import time
21
+ from typing import TYPE_CHECKING, Any, Literal, Sequence
22
+
23
+ if sys.version_info >= (3, 11):
24
+ from typing import Self
25
+ else:
26
+ from typing_extensions import Self
27
+
28
+ import wandb
29
+ import wandb.env
30
+ from wandb import trigger
31
+ from wandb.errors import CommError, Error, UsageError
32
+ from wandb.errors.links import url_registry
33
+ from wandb.errors.util import ProtobufErrorHandler
34
+ from wandb.integration import sagemaker
35
+ from wandb.sdk.lib import runid
36
+ from wandb.sdk.lib.paths import StrPath
37
+ from wandb.util import _is_artifact_representation
38
+
39
+ from . import wandb_login, wandb_setup
40
+ from .backend.backend import Backend
41
+ from .lib import SummaryDisabled, filesystem, module, printer, telemetry
42
+ from .lib.deprecate import Deprecated, deprecate
43
+ from .lib.mailbox import Mailbox, MailboxProgress
44
+ from .wandb_helper import parse_config
45
+ from .wandb_run import Run, TeardownHook, TeardownStage
46
+ from .wandb_settings import Settings
47
+
48
+ if TYPE_CHECKING:
49
+ from wandb.proto import wandb_internal_pb2 as pb
50
+
51
+ logger: wandb_setup.Logger | None = None # logger configured during wandb.init()
52
+
53
+
54
+ def _set_logger(log_object: wandb_setup.Logger | None) -> None:
55
+ """Configure module logger."""
56
+ global logger
57
+ logger = log_object
58
+
59
+
60
+ def _huggingface_version() -> str | None:
61
+ if "transformers" in sys.modules:
62
+ trans = wandb.util.get_module("transformers")
63
+ if hasattr(trans, "__version__"):
64
+ return str(trans.__version__)
65
+ return None
66
+
67
+
68
+ def _maybe_mp_process(backend: Backend) -> bool:
69
+ parent_process = getattr(
70
+ backend._multiprocessing, "parent_process", None
71
+ ) # New in version 3.8.
72
+ if parent_process:
73
+ return parent_process() is not None
74
+ process = backend._multiprocessing.current_process()
75
+ if process.name == "MainProcess":
76
+ return False
77
+ if process.name.startswith("Process-"):
78
+ return True
79
+ return False
80
+
81
+
82
+ def _handle_launch_config(settings: Settings) -> dict[str, Any]:
83
+ launch_run_config: dict[str, Any] = {}
84
+ if not settings.launch:
85
+ return launch_run_config
86
+ if os.environ.get("WANDB_CONFIG") is not None:
87
+ try:
88
+ launch_run_config = json.loads(os.environ.get("WANDB_CONFIG", "{}"))
89
+ except (ValueError, SyntaxError):
90
+ wandb.termwarn("Malformed WANDB_CONFIG, using original config")
91
+ elif settings.launch_config_path and os.path.exists(settings.launch_config_path):
92
+ with open(settings.launch_config_path) as fp:
93
+ launch_config = json.loads(fp.read())
94
+ launch_run_config = launch_config.get("overrides", {}).get("run_config")
95
+ else:
96
+ i = 0
97
+ chunks = []
98
+ while True:
99
+ key = f"WANDB_CONFIG_{i}"
100
+ if key in os.environ:
101
+ chunks.append(os.environ[key])
102
+ i += 1
103
+ else:
104
+ break
105
+ if len(chunks) > 0:
106
+ config_string = "".join(chunks)
107
+ try:
108
+ launch_run_config = json.loads(config_string)
109
+ except (ValueError, SyntaxError):
110
+ wandb.termwarn("Malformed WANDB_CONFIG, using original config")
111
+
112
+ return launch_run_config
113
+
114
+
115
+ class _WandbInit:
116
+ _init_telemetry_obj: telemetry.TelemetryRecord
117
+
118
+ def __init__(self) -> None:
119
+ self.kwargs = None
120
+ self.settings: Settings | None = None
121
+ self.sweep_config: dict[str, Any] = {}
122
+ self.launch_config: dict[str, Any] = {}
123
+ self.config: dict[str, Any] = {}
124
+ self.run: Run | None = None
125
+ self.backend: Backend | None = None
126
+
127
+ self._teardown_hooks: list[TeardownHook] = []
128
+ self._wl = wandb.setup()
129
+ self.notebook: wandb.jupyter.Notebook | None = None # type: ignore
130
+ self.printer = printer.new_printer()
131
+
132
+ self._init_telemetry_obj = telemetry.TelemetryRecord()
133
+
134
+ self.deprecated_features_used: dict[str, str] = dict()
135
+
136
+ def warn_env_vars_change_after_setup(self) -> None:
137
+ """Warn if environment variables change after wandb singleton is initialized.
138
+
139
+ Any settings from environment variables set after the singleton is initialized
140
+ (via login/setup/etc.) will be ignored.
141
+ """
142
+ singleton = wandb_setup.singleton()
143
+ if singleton is None:
144
+ return
145
+
146
+ exclude_env_vars = {"WANDB_SERVICE", "WANDB_KUBEFLOW_URL"}
147
+ # check if environment variables have changed
148
+ singleton_env = {
149
+ k: v
150
+ for k, v in singleton._environ.items()
151
+ if k.startswith("WANDB_") and k not in exclude_env_vars
152
+ }
153
+ os_env = {
154
+ k: v
155
+ for k, v in os.environ.items()
156
+ if k.startswith("WANDB_") and k not in exclude_env_vars
157
+ }
158
+ if set(singleton_env.keys()) != set(os_env.keys()) or set(
159
+ singleton_env.values()
160
+ ) != set(os_env.values()):
161
+ line = (
162
+ "Changes to your `wandb` environment variables will be ignored "
163
+ "because your `wandb` session has already started. "
164
+ "For more information on how to modify your settings with "
165
+ "`wandb.init()` arguments, please refer to "
166
+ f"{self.printer.link(url_registry.url('wandb-init'), 'the W&B docs')}."
167
+ )
168
+ self.printer.display(line, level="warn")
169
+
170
+ def clear_run_path_if_sweep_or_launch(
171
+ self,
172
+ init_settings: Settings,
173
+ ) -> None:
174
+ """Clear project/entity/run_id keys if in a Sweep or a Launch context.
175
+
176
+ Args:
177
+ init_settings: Settings specified in the call to `wandb.init()`.
178
+ """
179
+ when_doing_thing = ""
180
+
181
+ if self._wl.settings.sweep_id:
182
+ when_doing_thing = "when running a sweep"
183
+ elif self._wl.settings.launch:
184
+ when_doing_thing = "when running from a wandb launch context"
185
+
186
+ if not when_doing_thing:
187
+ return
188
+
189
+ def warn(key: str, value: str) -> None:
190
+ self.printer.display(
191
+ f"Ignoring {key} {value!r} {when_doing_thing}.",
192
+ level="warn",
193
+ )
194
+
195
+ if init_settings.project is not None:
196
+ warn("project", init_settings.project)
197
+ init_settings.project = None
198
+ if init_settings.entity is not None:
199
+ warn("entity", init_settings.entity)
200
+ init_settings.entity = None
201
+ if init_settings.run_id is not None:
202
+ warn("run_id", init_settings.run_id)
203
+ init_settings.run_id = None
204
+
205
+ def setup( # noqa: C901
206
+ self,
207
+ init_settings: Settings,
208
+ config: dict | str | None = None,
209
+ config_exclude_keys: list[str] | None = None,
210
+ config_include_keys: list[str] | None = None,
211
+ allow_val_change: bool | None = None,
212
+ monitor_gym: bool | None = None,
213
+ ) -> None:
214
+ """Complete setup for `wandb.init()`.
215
+
216
+ This includes parsing all arguments, applying them with settings and enabling logging.
217
+ """
218
+ self.warn_env_vars_change_after_setup()
219
+
220
+ _set_logger(self._wl._get_logger())
221
+ assert logger
222
+
223
+ self.clear_run_path_if_sweep_or_launch(init_settings)
224
+
225
+ # Inherit global settings.
226
+ settings = self._wl.settings.model_copy()
227
+
228
+ # Apply settings from wandb.init() call.
229
+ settings.update_from_settings(init_settings)
230
+
231
+ # Infer the run ID from SageMaker.
232
+ if not settings.sagemaker_disable and sagemaker.is_using_sagemaker():
233
+ if sagemaker.set_run_id(settings):
234
+ logger.info("set run ID and group based on SageMaker")
235
+ with telemetry.context(obj=self._init_telemetry_obj) as tel:
236
+ tel.feature.sagemaker = True
237
+
238
+ with telemetry.context(obj=self._init_telemetry_obj) as tel:
239
+ if config is not None:
240
+ tel.feature.set_init_config = True
241
+ if settings.run_name is not None:
242
+ tel.feature.set_init_name = True
243
+ if settings.run_id is not None:
244
+ tel.feature.set_init_id = True
245
+ if settings.run_tags is not None:
246
+ tel.feature.set_init_tags = True
247
+
248
+ # TODO: remove this once officially deprecated
249
+ if config_exclude_keys:
250
+ self.deprecated_features_used["config_exclude_keys"] = (
251
+ "Use `config=wandb.helper.parse_config(config_object, exclude=('key',))` instead."
252
+ )
253
+ if config_include_keys:
254
+ self.deprecated_features_used["config_include_keys"] = (
255
+ "Use `config=wandb.helper.parse_config(config_object, include=('key',))` instead."
256
+ )
257
+ config = parse_config(
258
+ config or dict(),
259
+ include=config_include_keys,
260
+ exclude=config_exclude_keys,
261
+ )
262
+
263
+ # Construct the run's config.
264
+ self.config = dict()
265
+ self.init_artifact_config: dict[str, Any] = dict()
266
+
267
+ if not settings.sagemaker_disable and sagemaker.is_using_sagemaker():
268
+ sagemaker_config = sagemaker.parse_sm_config()
269
+ self._split_artifacts_from_config(sagemaker_config, self.config)
270
+
271
+ with telemetry.context(obj=self._init_telemetry_obj) as tel:
272
+ tel.feature.sagemaker = True
273
+
274
+ if self._wl._config:
275
+ self._split_artifacts_from_config(self._wl._config, self.config)
276
+
277
+ if config and isinstance(config, dict):
278
+ self._split_artifacts_from_config(config, self.config)
279
+
280
+ self.sweep_config = dict()
281
+ sweep_config = self._wl._sweep_config or dict()
282
+ if sweep_config:
283
+ self._split_artifacts_from_config(sweep_config, self.sweep_config)
284
+
285
+ if monitor_gym and len(wandb.patched["gym"]) == 0:
286
+ wandb.gym.monitor() # type: ignore
287
+
288
+ if wandb.patched["tensorboard"]:
289
+ with telemetry.context(obj=self._init_telemetry_obj) as tel:
290
+ tel.feature.tensorboard_patch = True
291
+
292
+ if settings.sync_tensorboard:
293
+ if len(wandb.patched["tensorboard"]) == 0:
294
+ wandb.tensorboard.patch() # type: ignore
295
+ with telemetry.context(obj=self._init_telemetry_obj) as tel:
296
+ tel.feature.tensorboard_sync = True
297
+
298
+ if not settings._offline and not settings._noop:
299
+ wandb_login._login(
300
+ anonymous=settings.anonymous,
301
+ force=settings.force,
302
+ _disable_warning=True,
303
+ _silent=settings.quiet or settings.silent,
304
+ _entity=settings.entity,
305
+ )
306
+
307
+ # apply updated global state after login was handled
308
+ login_settings = {
309
+ k: v
310
+ for k, v in {
311
+ "anonymous": self._wl.settings.anonymous,
312
+ "api_key": self._wl.settings.api_key,
313
+ "base_url": self._wl.settings.base_url,
314
+ "force": self._wl.settings.force,
315
+ "login_timeout": self._wl.settings.login_timeout,
316
+ }.items()
317
+ if v is not None
318
+ }
319
+ if login_settings:
320
+ settings.update_from_dict(login_settings)
321
+
322
+ # handle custom resume logic
323
+ settings.handle_resume_logic()
324
+
325
+ # get status of code saving before applying user settings
326
+ save_code_pre_user_settings = settings.save_code
327
+ if not settings._offline and not settings._noop:
328
+ user_settings = self._wl._load_user_settings()
329
+ if user_settings is not None:
330
+ settings.update_from_dict(user_settings)
331
+
332
+ # ensure that user settings don't set saving to true
333
+ # if user explicitly set these to false in UI
334
+ if save_code_pre_user_settings is False:
335
+ settings.save_code = False
336
+
337
+ # TODO: remove this once we refactor the client. This is a temporary
338
+ # fix to make sure that we use the same project name for wandb-core.
339
+ # The reason this is not going through the settings object is to
340
+ # avoid failure cases in other parts of the code that will be
341
+ # removed with the switch to wandb-core.
342
+ if settings.project is None:
343
+ settings.project = wandb.util.auto_project_name(settings.program)
344
+
345
+ settings.x_start_time = time.time()
346
+
347
+ if not settings._noop:
348
+ self._log_setup(settings)
349
+
350
+ if settings._jupyter:
351
+ self._jupyter_setup(settings)
352
+ launch_config = _handle_launch_config(settings)
353
+ if launch_config:
354
+ self._split_artifacts_from_config(launch_config, self.launch_config)
355
+
356
+ self.settings = settings
357
+
358
+ def teardown(self) -> None:
359
+ # TODO: currently this is only called on failed wandb.init attempts
360
+ # normally this happens on the run object
361
+ assert logger
362
+ logger.info("tearing down wandb.init")
363
+ for hook in self._teardown_hooks:
364
+ hook.call()
365
+
366
+ def _split_artifacts_from_config(
367
+ self, config_source: dict, config_target: dict
368
+ ) -> None:
369
+ for k, v in config_source.items():
370
+ if _is_artifact_representation(v):
371
+ self.init_artifact_config[k] = v
372
+ else:
373
+ config_target.setdefault(k, v)
374
+
375
+ def _enable_logging(self, log_fname: str) -> None:
376
+ """Enable logging to the global debug log.
377
+
378
+ This adds a run_id to the log, in case of multiple processes on the same machine.
379
+ Currently, there is no way to disable logging after it's enabled.
380
+ """
381
+ handler = logging.FileHandler(log_fname)
382
+ handler.setLevel(logging.INFO)
383
+
384
+ formatter = logging.Formatter(
385
+ "%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d "
386
+ "[%(filename)s:%(funcName)s():%(lineno)s] %(message)s"
387
+ )
388
+
389
+ handler.setFormatter(formatter)
390
+ assert isinstance(logger, logging.Logger)
391
+ logger.propagate = False
392
+ logger.addHandler(handler)
393
+ # TODO: make me configurable
394
+ logger.setLevel(logging.DEBUG)
395
+ self._teardown_hooks.append(
396
+ TeardownHook(
397
+ lambda: (handler.close(), logger.removeHandler(handler)), # type: ignore
398
+ TeardownStage.LATE,
399
+ )
400
+ )
401
+
402
+ def _safe_symlink(
403
+ self, base: str, target: str, name: str, delete: bool = False
404
+ ) -> None:
405
+ # TODO(jhr): do this with relpaths, but i can't figure it out on no sleep
406
+ if not hasattr(os, "symlink"):
407
+ return
408
+
409
+ pid = os.getpid()
410
+ tmp_name = os.path.join(base, f"{name}.{pid}")
411
+
412
+ if delete:
413
+ try:
414
+ os.remove(os.path.join(base, name))
415
+ except OSError:
416
+ pass
417
+ target = os.path.relpath(target, base)
418
+ try:
419
+ os.symlink(target, tmp_name)
420
+ os.rename(tmp_name, os.path.join(base, name))
421
+ except OSError:
422
+ pass
423
+
424
+ def _pause_backend(self, *args: Any, **kwargs: Any) -> None: # noqa
425
+ if self.backend is None:
426
+ return None
427
+
428
+ # Attempt to save the code on every execution
429
+ if self.notebook.save_ipynb(): # type: ignore
430
+ assert self.run is not None
431
+ res = self.run.log_code(root=None)
432
+ logger.info("saved code: %s", res) # type: ignore
433
+ if self.backend.interface is not None:
434
+ logger.info("pausing backend") # type: ignore
435
+ self.backend.interface.publish_pause()
436
+
437
+ def _resume_backend(self, *args: Any, **kwargs: Any) -> None: # noqa
438
+ if self.backend is not None and self.backend.interface is not None:
439
+ logger.info("resuming backend") # type: ignore
440
+ self.backend.interface.publish_resume()
441
+
442
+ def _jupyter_teardown(self) -> None:
443
+ """Teardown hooks and display saving, called with wandb.finish."""
444
+ assert self.notebook
445
+ ipython = self.notebook.shell
446
+ self.notebook.save_history()
447
+ if self.notebook.save_ipynb():
448
+ assert self.run is not None
449
+ res = self.run.log_code(root=None)
450
+ logger.info("saved code and history: %s", res) # type: ignore
451
+ logger.info("cleaning up jupyter logic") # type: ignore
452
+ # because of how we bind our methods we manually find them to unregister
453
+ for hook in ipython.events.callbacks["pre_run_cell"]:
454
+ if "_resume_backend" in hook.__name__:
455
+ ipython.events.unregister("pre_run_cell", hook)
456
+ for hook in ipython.events.callbacks["post_run_cell"]:
457
+ if "_pause_backend" in hook.__name__:
458
+ ipython.events.unregister("post_run_cell", hook)
459
+ ipython.display_pub.publish = ipython.display_pub._orig_publish
460
+ del ipython.display_pub._orig_publish
461
+
462
+ def _jupyter_setup(self, settings: Settings) -> None:
463
+ """Add hooks, and session history saving."""
464
+ self.notebook = wandb.jupyter.Notebook(settings) # type: ignore
465
+ ipython = self.notebook.shell
466
+
467
+ # Monkey patch ipython publish to capture displayed outputs
468
+ if not hasattr(ipython.display_pub, "_orig_publish"):
469
+ logger.info("configuring jupyter hooks %s", self) # type: ignore
470
+ ipython.display_pub._orig_publish = ipython.display_pub.publish
471
+ # Registering resume and pause hooks
472
+
473
+ ipython.events.register("pre_run_cell", self._resume_backend)
474
+ ipython.events.register("post_run_cell", self._pause_backend)
475
+ self._teardown_hooks.append(
476
+ TeardownHook(self._jupyter_teardown, TeardownStage.EARLY)
477
+ )
478
+
479
+ def publish(data, metadata=None, **kwargs) -> None: # type: ignore
480
+ ipython.display_pub._orig_publish(data, metadata=metadata, **kwargs)
481
+ assert self.notebook is not None
482
+ self.notebook.save_display(
483
+ ipython.execution_count, {"data": data, "metadata": metadata}
484
+ )
485
+
486
+ ipython.display_pub.publish = publish
487
+
488
+ def _log_setup(self, settings: Settings) -> None:
489
+ """Set up logging from settings."""
490
+ filesystem.mkdir_exists_ok(os.path.dirname(settings.log_user))
491
+ filesystem.mkdir_exists_ok(os.path.dirname(settings.log_internal))
492
+ filesystem.mkdir_exists_ok(os.path.dirname(settings.sync_file))
493
+ filesystem.mkdir_exists_ok(settings.files_dir)
494
+ filesystem.mkdir_exists_ok(settings._tmp_code_dir)
495
+
496
+ if settings.symlink:
497
+ self._safe_symlink(
498
+ os.path.dirname(settings.sync_symlink_latest),
499
+ os.path.dirname(settings.sync_file),
500
+ os.path.basename(settings.sync_symlink_latest),
501
+ delete=True,
502
+ )
503
+ self._safe_symlink(
504
+ os.path.dirname(settings.log_symlink_user),
505
+ settings.log_user,
506
+ os.path.basename(settings.log_symlink_user),
507
+ delete=True,
508
+ )
509
+ self._safe_symlink(
510
+ os.path.dirname(settings.log_symlink_internal),
511
+ settings.log_internal,
512
+ os.path.basename(settings.log_symlink_internal),
513
+ delete=True,
514
+ )
515
+
516
+ _set_logger(logging.getLogger("wandb"))
517
+ self._enable_logging(settings.log_user)
518
+
519
+ assert self._wl
520
+ assert logger
521
+
522
+ self._wl._early_logger_flush(logger)
523
+ logger.info(f"Logging user logs to {settings.log_user}")
524
+ logger.info(f"Logging internal logs to {settings.log_internal}")
525
+
526
+ def _make_run_disabled(self) -> Run:
527
+ """Returns a Run-like object where all methods are no-ops.
528
+
529
+ This method is used when wandb.init(mode="disabled") is called or WANDB_MODE=disabled
530
+ is set. It creates a Run object that mimics the behavior of a normal Run but doesn't
531
+ communicate with the W&B servers.
532
+
533
+ The returned Run object has all expected attributes and methods, but they are
534
+ no-op versions that don't perform any actual logging or communication.
535
+ """
536
+ run_id = runid.generate_id()
537
+ drun = Run(
538
+ settings=Settings(
539
+ mode="disabled",
540
+ x_files_dir=tempfile.gettempdir(),
541
+ run_id=run_id,
542
+ run_tags=tuple(),
543
+ run_notes=None,
544
+ run_group=None,
545
+ run_name=f"dummy-{run_id}",
546
+ project="dummy",
547
+ entity="dummy",
548
+ )
549
+ )
550
+ # config, summary, and metadata objects
551
+ drun._config = wandb.sdk.wandb_config.Config()
552
+ drun._config.update(self.sweep_config)
553
+ drun._config.update(self.config)
554
+ drun.summary = SummaryDisabled() # type: ignore
555
+ drun._Run__metadata = wandb.sdk.wandb_metadata.Metadata()
556
+
557
+ # methods
558
+ drun.log = lambda data, *_, **__: drun.summary.update(data) # type: ignore
559
+ drun.finish = lambda *_, **__: module.unset_globals() # type: ignore
560
+ drun.join = drun.finish # type: ignore
561
+ drun.define_metric = lambda *_, **__: wandb.sdk.wandb_metric.Metric("dummy") # type: ignore
562
+ drun.save = lambda *_, **__: False # type: ignore
563
+ for symbol in (
564
+ "alert",
565
+ "finish_artifact",
566
+ "get_project_url",
567
+ "get_sweep_url",
568
+ "get_url",
569
+ "link_artifact",
570
+ "link_model",
571
+ "use_artifact",
572
+ "log_code",
573
+ "log_model",
574
+ "use_model",
575
+ "mark_preempting",
576
+ "restore",
577
+ "status",
578
+ "watch",
579
+ "unwatch",
580
+ "upsert_artifact",
581
+ "_finish",
582
+ ):
583
+ setattr(drun, symbol, lambda *_, **__: None) # type: ignore
584
+
585
+ class _ChainableNoOp:
586
+ """An object that allows chaining arbitrary attributes and method calls."""
587
+
588
+ def __getattr__(self, _: str) -> Self:
589
+ return self
590
+
591
+ def __call__(self, *_: Any, **__: Any) -> Self:
592
+ return self
593
+
594
+ class _ChainableNoOpField:
595
+ # This is used to chain arbitrary attributes and method calls.
596
+ # For example, `run.log_artifact().state` will work in disabled mode.
597
+ def __init__(self) -> None:
598
+ self._value = None
599
+
600
+ def __set__(self, instance: Any, value: Any) -> None:
601
+ self._value = value
602
+
603
+ def __get__(self, instance: Any, owner: type) -> Any:
604
+ return _ChainableNoOp() if (self._value is None) else self._value
605
+
606
+ def __call__(self, *args: Any, **kwargs: Any) -> _ChainableNoOp:
607
+ return _ChainableNoOp()
608
+
609
+ drun.log_artifact = _ChainableNoOpField()
610
+ # attributes
611
+ drun._start_time = time.time()
612
+ drun._starting_step = 0
613
+ drun._step = 0
614
+ drun._attach_id = None
615
+ drun._backend = None
616
+
617
+ # set the disabled run as the global run
618
+ module.set_global(
619
+ run=drun,
620
+ config=drun.config,
621
+ log=drun.log,
622
+ summary=drun.summary,
623
+ save=drun.save,
624
+ use_artifact=drun.use_artifact,
625
+ log_artifact=drun.log_artifact,
626
+ define_metric=drun.define_metric,
627
+ alert=drun.alert,
628
+ watch=drun.watch,
629
+ unwatch=drun.unwatch,
630
+ )
631
+ return drun
632
+
633
+ def _on_progress_init(self, handle: MailboxProgress) -> None:
634
+ line = "Waiting for wandb.init()...\r"
635
+ percent_done = handle.percent_done
636
+ self.printer.progress_update(line, percent_done=percent_done)
637
+
638
+ def init(self) -> Run: # noqa: C901
639
+ if logger is None:
640
+ raise RuntimeError("Logger not initialized")
641
+ logger.info("calling init triggers")
642
+ trigger.call("on_init")
643
+
644
+ assert self.settings is not None
645
+ assert self._wl is not None
646
+
647
+ logger.info(
648
+ f"wandb.init called with sweep_config: {self.sweep_config}\nconfig: {self.config}"
649
+ )
650
+
651
+ if self.settings._noop:
652
+ return self._make_run_disabled()
653
+ if (
654
+ self.settings.reinit
655
+ or (self.settings._jupyter and self.settings.reinit is not False)
656
+ ) and len(self._wl._global_run_stack) > 0:
657
+ if len(self._wl._global_run_stack) > 1:
658
+ wandb.termwarn(
659
+ "Launching multiple wandb runs using Python's threading"
660
+ " module is not well-supported."
661
+ " Please use multiprocessing instead."
662
+ " Finishing previous run before initializing another."
663
+ )
664
+
665
+ latest_run = self._wl._global_run_stack[-1]
666
+ logger.info(f"found existing run on stack: {latest_run.id}")
667
+ latest_run.finish()
668
+ elif wandb.run is not None and os.getpid() == wandb.run._init_pid:
669
+ logger.info("wandb.init() called when a run is still active")
670
+ with telemetry.context() as tel:
671
+ tel.feature.init_return_run = True
672
+ return wandb.run
673
+
674
+ logger.info("starting backend")
675
+
676
+ if not self.settings.x_disable_service:
677
+ service = self._wl.ensure_service()
678
+ logger.info("sending inform_init request")
679
+ service.inform_init(
680
+ settings=self.settings.to_proto(),
681
+ run_id=self.settings.run_id, # type: ignore
682
+ )
683
+ else:
684
+ service = None
685
+
686
+ mailbox = Mailbox()
687
+ backend = Backend(
688
+ settings=self.settings,
689
+ service=service,
690
+ mailbox=mailbox,
691
+ )
692
+ backend.ensure_launched()
693
+ logger.info("backend started and connected")
694
+
695
+ # resuming needs access to the server, check server_status()?
696
+ run = Run(
697
+ config=self.config,
698
+ settings=self.settings,
699
+ sweep_config=self.sweep_config,
700
+ launch_config=self.launch_config,
701
+ )
702
+
703
+ # Populate initial telemetry
704
+ with telemetry.context(run=run, obj=self._init_telemetry_obj) as tel:
705
+ tel.cli_version = wandb.__version__
706
+ tel.python_version = platform.python_version()
707
+ tel.platform = f"{platform.system()}-{platform.machine()}".lower()
708
+ hf_version = _huggingface_version()
709
+ if hf_version:
710
+ tel.huggingface_version = hf_version
711
+ if self.settings._jupyter:
712
+ tel.env.jupyter = True
713
+ if self.settings._ipython:
714
+ tel.env.ipython = True
715
+ if self.settings._colab:
716
+ tel.env.colab = True
717
+ if self.settings._kaggle:
718
+ tel.env.kaggle = True
719
+ if self.settings._windows:
720
+ tel.env.windows = True
721
+
722
+ if self.settings.launch:
723
+ tel.feature.launch = True
724
+
725
+ for module_name in telemetry.list_telemetry_imports(only_imported=True):
726
+ setattr(tel.imports_init, module_name, True)
727
+
728
+ # probe the active start method
729
+ active_start_method: str | None = None
730
+ if self.settings.start_method == "thread":
731
+ active_start_method = self.settings.start_method
732
+ else:
733
+ active_start_method = getattr(
734
+ backend._multiprocessing, "get_start_method", lambda: None
735
+ )()
736
+
737
+ if active_start_method == "spawn":
738
+ tel.env.start_spawn = True
739
+ elif active_start_method == "fork":
740
+ tel.env.start_fork = True
741
+ elif active_start_method == "forkserver":
742
+ tel.env.start_forkserver = True
743
+ elif active_start_method == "thread":
744
+ tel.env.start_thread = True
745
+
746
+ if os.environ.get("PEX"):
747
+ tel.env.pex = True
748
+
749
+ if self.settings._aws_lambda:
750
+ tel.env.aws_lambda = True
751
+
752
+ if os.environ.get(wandb.env._DISABLE_SERVICE):
753
+ tel.feature.service_disabled = True
754
+
755
+ if service:
756
+ tel.feature.service = True
757
+ if self.settings.x_flow_control_disabled:
758
+ tel.feature.flow_control_disabled = True
759
+ if self.settings.x_flow_control_custom:
760
+ tel.feature.flow_control_custom = True
761
+ if not self.settings.x_require_legacy_service:
762
+ tel.feature.core = True
763
+ if self.settings._shared:
764
+ wandb.termwarn(
765
+ "The `_shared` feature is experimental and may change. "
766
+ "Please contact support@wandb.com for guidance and to report any issues."
767
+ )
768
+ tel.feature.shared_mode = True
769
+
770
+ tel.env.maybe_mp = _maybe_mp_process(backend)
771
+
772
+ if not self.settings.label_disable:
773
+ if self.notebook:
774
+ run._label_probe_notebook(self.notebook)
775
+ else:
776
+ run._label_probe_main()
777
+
778
+ for deprecated_feature, msg in self.deprecated_features_used.items():
779
+ warning_message = f"`{deprecated_feature}` is deprecated. {msg}"
780
+ deprecate(
781
+ field_name=getattr(Deprecated, "init__" + deprecated_feature),
782
+ warning_message=warning_message,
783
+ run=run,
784
+ )
785
+
786
+ logger.info("updated telemetry")
787
+
788
+ run._set_library(self._wl)
789
+ run._set_backend(backend)
790
+ run._set_teardown_hooks(self._teardown_hooks)
791
+
792
+ backend._hack_set_run(run)
793
+ assert backend.interface
794
+ mailbox.enable_keepalive()
795
+ backend.interface.publish_header()
796
+
797
+ # Using GitRepo() blocks & can be slow, depending on user's current git setup.
798
+ # We don't want to block run initialization/start request, so populate run's git
799
+ # info beforehand.
800
+ if not (self.settings.disable_git or self.settings.x_disable_machine_info):
801
+ run._populate_git_info()
802
+
803
+ run_result: pb.RunUpdateResult | None = None
804
+
805
+ if self.settings._offline:
806
+ with telemetry.context(run=run) as tel:
807
+ tel.feature.offline = True
808
+
809
+ if self.settings.resume:
810
+ wandb.termwarn(
811
+ "`resume` will be ignored since W&B syncing is set to `offline`. "
812
+ f"Starting a new run with run id {run.id}."
813
+ )
814
+ error: wandb.Error | None = None
815
+
816
+ timeout = self.settings.init_timeout
817
+
818
+ logger.info(f"communicating run to backend with {timeout} second timeout")
819
+
820
+ run_init_handle = backend.interface.deliver_run(run)
821
+ result = run_init_handle.wait(
822
+ timeout=timeout,
823
+ on_progress=self._on_progress_init,
824
+ cancel=True,
825
+ )
826
+ if result:
827
+ run_result = result.run_result
828
+
829
+ if run_result is None:
830
+ error_message = (
831
+ f"Run initialization has timed out after {timeout} sec. "
832
+ "Please try increasing the timeout with the `init_timeout` setting: "
833
+ "`wandb.init(settings=wandb.Settings(init_timeout=120))`."
834
+ )
835
+ # We're not certain whether the error we encountered is due to an issue
836
+ # with the server (a "CommError") or if it's a problem within the SDK (an "Error").
837
+ # This means that the error could be a result of the server being unresponsive,
838
+ # or it could be because we were unable to communicate with the wandb service.
839
+ error = CommError(error_message)
840
+ run_init_handle._cancel()
841
+ elif run_result.HasField("error"):
842
+ error = ProtobufErrorHandler.to_exception(run_result.error)
843
+
844
+ if error is not None:
845
+ logger.error(f"encountered error: {error}")
846
+ if not service:
847
+ # Shutdown the backend and get rid of the logger
848
+ # we don't need to do console cleanup at this point
849
+ backend.cleanup()
850
+ self.teardown()
851
+ raise error
852
+
853
+ assert run_result is not None # for mypy
854
+
855
+ if not run_result.HasField("run"):
856
+ raise Error(
857
+ "It appears that something have gone wrong during the program "
858
+ "execution as an unexpected missing field was encountered. "
859
+ "(run_result is missing the 'run' field)"
860
+ )
861
+
862
+ if run_result.run.resumed:
863
+ logger.info("run resumed")
864
+ with telemetry.context(run=run) as tel:
865
+ tel.feature.resumed = run_result.run.resumed
866
+ run._set_run_obj(run_result.run)
867
+
868
+ logger.info("starting run threads in backend")
869
+ # initiate run (stats and metadata probing)
870
+
871
+ if service:
872
+ assert self.settings.run_id
873
+ service.inform_start(
874
+ settings=self.settings.to_proto(),
875
+ run_id=self.settings.run_id,
876
+ )
877
+
878
+ assert backend.interface
879
+
880
+ run_start_handle = backend.interface.deliver_run_start(run)
881
+ # TODO: add progress to let user know we are doing something
882
+ run_start_result = run_start_handle.wait(timeout=30)
883
+ if run_start_result is None:
884
+ run_start_handle.abandon()
885
+
886
+ assert self._wl is not None
887
+ self._wl._global_run_stack.append(run)
888
+ self.run = run
889
+
890
+ run._handle_launch_artifact_overrides()
891
+ if (
892
+ self.settings.launch
893
+ and self.settings.launch_config_path
894
+ and os.path.exists(self.settings.launch_config_path)
895
+ ):
896
+ run.save(self.settings.launch_config_path)
897
+ # put artifacts in run config here
898
+ # since doing so earlier will cause an error
899
+ # as the run is not upserted
900
+ for k, v in self.init_artifact_config.items():
901
+ run.config.update({k: v}, allow_val_change=True)
902
+ job_artifact = run._launch_artifact_mapping.get(
903
+ wandb.util.LAUNCH_JOB_ARTIFACT_SLOT_NAME
904
+ )
905
+ if job_artifact:
906
+ run.use_artifact(job_artifact)
907
+
908
+ self.backend = backend
909
+ run._on_start()
910
+ logger.info("run started, returning control to user process")
911
+ return run
912
+
913
+
914
+ def _attach(
915
+ attach_id: str | None = None,
916
+ run_id: str | None = None,
917
+ *,
918
+ run: Run | None = None,
919
+ ) -> Run | None:
920
+ """Attach to a run currently executing in another process/thread.
921
+
922
+ Args:
923
+ attach_id: (str, optional) The id of the run or an attach identifier
924
+ that maps to a run.
925
+ run_id: (str, optional) The id of the run to attach to.
926
+ run: (Run, optional) The run instance to attach
927
+ """
928
+ attach_id = attach_id or run_id
929
+ if not ((attach_id is None) ^ (run is None)):
930
+ raise UsageError("Either (`attach_id` or `run_id`) or `run` must be specified")
931
+
932
+ attach_id = attach_id or (run._attach_id if run else None)
933
+
934
+ if attach_id is None:
935
+ raise UsageError(
936
+ "Either `attach_id` or `run_id` must be specified or `run` must have `_attach_id`"
937
+ )
938
+ wandb._assert_is_user_process() # type: ignore
939
+
940
+ _wl = wandb.setup()
941
+
942
+ _set_logger(_wl._get_logger())
943
+ if logger is None:
944
+ raise UsageError("logger is not initialized")
945
+
946
+ service = _wl.ensure_service()
947
+
948
+ try:
949
+ attach_settings = service.inform_attach(attach_id=attach_id)
950
+ except Exception as e:
951
+ raise UsageError(f"Unable to attach to run {attach_id}") from e
952
+
953
+ settings: Settings = copy.copy(_wl._settings)
954
+
955
+ settings.update_from_dict(
956
+ {
957
+ "run_id": attach_id,
958
+ "x_start_time": attach_settings.x_start_time.value,
959
+ "mode": attach_settings.mode.value,
960
+ }
961
+ )
962
+
963
+ # TODO: consolidate this codepath with wandb.init()
964
+ mailbox = Mailbox()
965
+ backend = Backend(settings=settings, service=service, mailbox=mailbox)
966
+ backend.ensure_launched()
967
+ logger.info("attach backend started and connected")
968
+
969
+ if run is None:
970
+ run = Run(settings=settings)
971
+ else:
972
+ run._init(settings=settings)
973
+ run._set_library(_wl)
974
+ run._set_backend(backend)
975
+ backend._hack_set_run(run)
976
+ assert backend.interface
977
+
978
+ mailbox.enable_keepalive()
979
+
980
+ attach_handle = backend.interface.deliver_attach(attach_id)
981
+ # TODO: add progress to let user know we are doing something
982
+ attach_result = attach_handle.wait(timeout=30)
983
+ if not attach_result:
984
+ attach_handle.abandon()
985
+ raise UsageError("Timeout attaching to run")
986
+ attach_response = attach_result.response.attach_response
987
+ if attach_response.error and attach_response.error.message:
988
+ raise UsageError(f"Failed to attach to run: {attach_response.error.message}")
989
+
990
+ run._set_run_obj(attach_response.run)
991
+ run._on_attach()
992
+ return run
993
+
994
+
995
+ def init( # noqa: C901
996
+ entity: str | None = None,
997
+ project: str | None = None,
998
+ dir: StrPath | None = None,
999
+ id: str | None = None,
1000
+ name: str | None = None,
1001
+ notes: str | None = None,
1002
+ tags: Sequence[str] | None = None,
1003
+ config: dict[str, Any] | str | None = None,
1004
+ config_exclude_keys: list[str] | None = None,
1005
+ config_include_keys: list[str] | None = None,
1006
+ allow_val_change: bool | None = None,
1007
+ group: str | None = None,
1008
+ job_type: str | None = None,
1009
+ mode: Literal["online", "offline", "disabled"] | None = None,
1010
+ force: bool | None = None,
1011
+ anonymous: Literal["never", "allow", "must"] | None = None,
1012
+ reinit: bool | None = None,
1013
+ resume: bool | Literal["allow", "never", "must", "auto"] | None = None,
1014
+ resume_from: str | None = None,
1015
+ fork_from: str | None = None,
1016
+ save_code: bool | None = None,
1017
+ tensorboard: bool | None = None,
1018
+ sync_tensorboard: bool | None = None,
1019
+ monitor_gym: bool | None = None,
1020
+ settings: Settings | dict[str, Any] | None = None,
1021
+ ) -> Run:
1022
+ r"""Start a new run to track and log to W&B.
1023
+
1024
+ In an ML training pipeline, you could add `wandb.init()` to the beginning of
1025
+ your training script as well as your evaluation script, and each piece would
1026
+ be tracked as a run in W&B.
1027
+
1028
+ `wandb.init()` spawns a new background process to log data to a run, and it
1029
+ also syncs data to https://wandb.ai by default, so you can see your results
1030
+ in real-time.
1031
+
1032
+ Call `wandb.init()` to start a run before logging data with `wandb.log()`.
1033
+ When you're done logging data, call `wandb.finish()` to end the run. If you
1034
+ don't call `wandb.finish()`, the run will end when your script exits.
1035
+
1036
+ For more on using `wandb.init()`, including detailed examples, check out our
1037
+ [guide and FAQs](https://docs.wandb.ai/guides/track/launch).
1038
+
1039
+ Examples:
1040
+ ### Explicitly set the entity and project and choose a name for the run:
1041
+
1042
+ ```python
1043
+ import wandb
1044
+
1045
+ run = wandb.init(
1046
+ entity="geoff",
1047
+ project="capsules",
1048
+ name="experiment-2021-10-31",
1049
+ )
1050
+
1051
+ # ... your training code here ...
1052
+
1053
+ run.finish()
1054
+ ```
1055
+
1056
+ ### Add metadata about the run using the `config` argument:
1057
+
1058
+ ```python
1059
+ import wandb
1060
+
1061
+ config = {"lr": 0.01, "batch_size": 32}
1062
+ with wandb.init(config=config) as run:
1063
+ run.config.update({"architecture": "resnet", "depth": 34})
1064
+
1065
+ # ... your training code here ...
1066
+ ```
1067
+
1068
+ Note that you can use `wandb.init()` as a context manager to automatically
1069
+ call `wandb.finish()` at the end of the block.
1070
+
1071
+ Args:
1072
+ entity: The username or team name under which the runs will be logged.
1073
+ The entity must already exist, so ensure you’ve created your account
1074
+ or team in the UI before starting to log runs. If not specified, the
1075
+ run will default your default entity. To change the default entity,
1076
+ go to [your settings](https://wandb.ai/settings) and update the
1077
+ "Default location to create new projects" under "Default team".
1078
+ project: The name of the project under which this run will be logged.
1079
+ If not specified, we use a heuristic to infer the project name based
1080
+ on the system, such as checking the git root or the current program
1081
+ file. If we can't infer the project name, the project will default to
1082
+ `"uncategorized"`.
1083
+ dir: An absolute path to the directory where metadata and downloaded
1084
+ files will be stored. When calling `download()` on an artifact, files
1085
+ will be saved to this directory. If not specified, this defaults to
1086
+ the `./wandb` directory.
1087
+ id: A unique identifier for this run, used for resuming. It must be unique
1088
+ within the project and cannot be reused once a run is deleted. The
1089
+ identifier must not contain any of the following special characters:
1090
+ `/ \ # ? % :`. For a short descriptive name, use the `name` field,
1091
+ or for saving hyperparameters to compare across runs, use `config`.
1092
+ name: A short display name for this run, which appears in the UI to help
1093
+ you identify it. By default, we generate a random two-word name
1094
+ allowing easy cross-reference runs from table to charts. Keeping these
1095
+ run names brief enhances readability in chart legends and tables. For
1096
+ saving hyperparameters, we recommend using the `config` field.
1097
+ notes: A detailed description of the run, similar to a commit message in
1098
+ Git. Use this argument to capture any context or details that may
1099
+ help you recall the purpose or setup of this run in the future.
1100
+ tags: A list of tags to label this run in the UI. Tags are helpful for
1101
+ organizing runs or adding temporary identifiers like "baseline" or
1102
+ "production." You can easily add, remove tags, or filter by tags in
1103
+ the UI.
1104
+ If resuming a run, the tags provided here will replace any existing
1105
+ tags. To add tags to a resumed run without overwriting the current
1106
+ tags, use `run.tags += ["new_tag"]` after calling `run = wandb.init()`.
1107
+ config: Sets `wandb.config`, a dictionary-like object for storing input
1108
+ parameters to your run, such as model hyperparameters or data
1109
+ preprocessing settings.
1110
+ The config appears in the UI in an overview page, allowing you to
1111
+ group, filter, and sort runs based on these parameters.
1112
+ Keys should not contain periods (`.`), and values should be
1113
+ smaller than 10 MB.
1114
+ If a dictionary, `argparse.Namespace`, or `absl.flags.FLAGS` is
1115
+ provided, the key-value pairs will be loaded directly into
1116
+ `wandb.config`.
1117
+ If a string is provided, it is interpreted as a path to a YAML file,
1118
+ from which configuration values will be loaded into `wandb.config`.
1119
+ config_exclude_keys: A list of specific keys to exclude from `wandb.config`.
1120
+ config_include_keys: A list of specific keys to include in `wandb.config`.
1121
+ allow_val_change: Controls whether config values can be modified after their
1122
+ initial set. By default, an exception is raised if a config value is
1123
+ overwritten. For tracking variables that change during training, such as
1124
+ a learning rate, consider using `wandb.log()` instead. By default, this
1125
+ is `False` in scripts and `True` in Notebook environments.
1126
+ group: Specify a group name to organize individual runs as part of a larger
1127
+ experiment. This is useful for cases like cross-validation or running
1128
+ multiple jobs that train and evaluate a model on different test sets.
1129
+ Grouping allows you to manage related runs collectively in the UI,
1130
+ making it easy to toggle and review results as a unified experiment.
1131
+ For more information, refer to our
1132
+ [guide to grouping runs](https://docs.wandb.com/guides/runs/grouping).
1133
+ job_type: Specify the type of run, especially helpful when organizing runs
1134
+ within a group as part of a larger experiment. For example, in a group,
1135
+ you might label runs with job types such as "train" and "eval".
1136
+ Defining job types enables you to easily filter and group similar runs
1137
+ in the UI, facilitating direct comparisons.
1138
+ mode: Specifies how run data is managed, with the following options:
1139
+ - `"online"` (default): Enables live syncing with W&B when a network
1140
+ connection is available, with real-time updates to visualizations.
1141
+ - `"offline"`: Suitable for air-gapped or offline environments; data
1142
+ is saved locally and can be synced later. Ensure the run folder
1143
+ is preserved to enable future syncing.
1144
+ - `"disabled"`: Disables all W&B functionality, making the run’s methods
1145
+ no-ops. Typically used in testing to bypass W&B operations.
1146
+ force: Determines if a W&B login is required to run the script. If `True`,
1147
+ the user must be logged in to W&B; otherwise, the script will not
1148
+ proceed. If `False` (default), the script can proceed without a login,
1149
+ switching to offline mode if the user is not logged in.
1150
+ anonymous: Specifies the level of control over anonymous data logging.
1151
+ Available options are:
1152
+ - `"never"` (default): Requires you to link your W&B account before
1153
+ tracking the run. This prevents unintentional creation of anonymous
1154
+ runs by ensuring each run is associated with an account.
1155
+ - `"allow"`: Enables a logged-in user to track runs with their account,
1156
+ but also allows someone running the script without a W&B account
1157
+ to view the charts and data in the UI.
1158
+ - `"must"`: Forces the run to be logged to an anonymous account, even
1159
+ if the user is logged in.
1160
+ reinit: Determines if multiple `wandb.init()` calls can start new runs
1161
+ within the same process. By default (`False`), if an active run
1162
+ exists, calling `wandb.init()` returns the existing run instead of
1163
+ creating a new one. When `reinit=True`, the active run is finished
1164
+ before a new run is initialized. In notebook environments, runs are
1165
+ reinitialized by default unless `reinit` is explicitly set to `False`.
1166
+ resume: Controls the behavior when resuming a run with the specified `id`.
1167
+ Available options are:
1168
+ - `"allow"`: If a run with the specified `id` exists, it will resume
1169
+ from the last step; otherwise, a new run will be created.
1170
+ - `"never"`: If a run with the specified `id` exists, an error will
1171
+ be raised. If no such run is found, a new run will be created.
1172
+ - `"must"`: If a run with the specified `id` exists, it will resume
1173
+ from the last step. If no run is found, an error will be raised.
1174
+ - `"auto"`: Automatically resumes the previous run if it crashed on
1175
+ this machine; otherwise, starts a new run.
1176
+ - `True`: Deprecated. Use `"auto"` instead.
1177
+ - `False`: Deprecated. Use the default behavior (leaving `resume`
1178
+ unset) to always start a new run.
1179
+ Note: If `resume` is set, `fork_from` and `resume_from` cannot be
1180
+ used. When `resume` is unset, the system will always start a new run.
1181
+ For more details, see our
1182
+ [guide to resuming runs](https://docs.wandb.com/guides/runs/resuming).
1183
+ resume_from: Specifies a moment in a previous run to resume a run from,
1184
+ using the format `{run_id}?_step={step}`. This allows users to truncate
1185
+ the history logged to a run at an intermediate step and resume logging
1186
+ from that step. The target run must be in the same project.
1187
+ If an `id` argument is also provided, the `resume_from` argument will
1188
+ take precedence.
1189
+ `resume`, `resume_from` and `fork_from` cannot be used together, only
1190
+ one of them can be used at a time.
1191
+ Note: This feature is in beta and may change in the future.
1192
+ fork_from: Specifies a point in a previous run from which to fork a new
1193
+ run, using the format `{id}?_step={step}`. This creates a new run that
1194
+ resumes logging from the specified step in the target run’s history.
1195
+ The target run must be part of the current project.
1196
+ If an `id` argument is also provided, it must be different from the
1197
+ `fork_from` argument, an error will be raised if they are the same.
1198
+ `resume`, `resume_from` and `fork_from` cannot be used together, only
1199
+ one of them can be used at a time.
1200
+ Note: This feature is in beta and may change in the future.
1201
+ save_code: Enables saving the main script or notebook to W&B, aiding in
1202
+ experiment reproducibility and allowing code comparisons across runs in
1203
+ the UI. By default, this is disabled, but you can change the default to
1204
+ enable on your [settings page](https://wandb.ai/settings).
1205
+ tensorboard: Deprecated. Use `sync_tensorboard` instead.
1206
+ sync_tensorboard: Enables automatic syncing of W&B logs from TensorBoard
1207
+ or TensorBoardX, saving relevant event files for viewing in the W&B UI.
1208
+ saving relevant event files for viewing in the W&B UI. (Default: `False`)
1209
+ monitor_gym: Enables automatic logging of videos of the environment when
1210
+ using OpenAI Gym. For additional details, see our
1211
+ [guide for gym integration](https://docs.wandb.com/guides/integrations/openai-gym).
1212
+ settings: Specifies a dictionary or `wandb.Settings` object with advanced
1213
+ settings for the run.
1214
+
1215
+ Returns:
1216
+ A `Run` object, which is a handle to the current run. Use this object
1217
+ to perform operations like logging data, saving files, and finishing
1218
+ the run. See the [Run API](https://docs.wandb.ai/ref/python/run) for
1219
+ more details.
1220
+
1221
+ Raises:
1222
+ Error: If some unknown or internal error happened during the run
1223
+ initialization.
1224
+ AuthenticationError: If the user failed to provide valid credentials.
1225
+ CommError: If there was a problem communicating with the W&B server.
1226
+ UsageError: If the user provided invalid arguments to the function.
1227
+ KeyboardInterrupt: If the user interrupts the run initialization process.
1228
+ If the user interrupts the run initialization process.
1229
+ """
1230
+ wandb._assert_is_user_process() # type: ignore
1231
+
1232
+ init_settings = Settings()
1233
+ if isinstance(settings, dict):
1234
+ init_settings = Settings(**settings)
1235
+ elif isinstance(settings, Settings):
1236
+ init_settings = settings
1237
+
1238
+ # Explicit function arguments take precedence over settings
1239
+ if job_type is not None:
1240
+ init_settings.run_job_type = job_type
1241
+ if dir is not None:
1242
+ init_settings.root_dir = dir # type: ignore
1243
+ if project is not None:
1244
+ init_settings.project = project
1245
+ if entity is not None:
1246
+ init_settings.entity = entity
1247
+ if reinit is not None:
1248
+ init_settings.reinit = reinit
1249
+ if tags is not None:
1250
+ init_settings.run_tags = tuple(tags)
1251
+ if group is not None:
1252
+ init_settings.run_group = group
1253
+ if name is not None:
1254
+ init_settings.run_name = name
1255
+ if notes is not None:
1256
+ init_settings.run_notes = notes
1257
+ if anonymous is not None:
1258
+ init_settings.anonymous = anonymous # type: ignore
1259
+ if mode is not None:
1260
+ init_settings.mode = mode # type: ignore
1261
+ if resume is not None:
1262
+ init_settings.resume = resume # type: ignore
1263
+ if force is not None:
1264
+ init_settings.force = force
1265
+ # TODO: deprecate "tensorboard" in favor of "sync_tensorboard"
1266
+ if tensorboard is not None:
1267
+ init_settings.sync_tensorboard = tensorboard
1268
+ if sync_tensorboard is not None:
1269
+ init_settings.sync_tensorboard = sync_tensorboard
1270
+ if save_code is not None:
1271
+ init_settings.save_code = save_code
1272
+ if id is not None:
1273
+ init_settings.run_id = id
1274
+ if fork_from is not None:
1275
+ init_settings.fork_from = fork_from # type: ignore
1276
+ if resume_from is not None:
1277
+ init_settings.resume_from = resume_from # type: ignore
1278
+
1279
+ try:
1280
+ wi = _WandbInit()
1281
+ wi.setup(
1282
+ init_settings=init_settings,
1283
+ config=config,
1284
+ config_exclude_keys=config_exclude_keys,
1285
+ config_include_keys=config_include_keys,
1286
+ allow_val_change=allow_val_change,
1287
+ monitor_gym=monitor_gym,
1288
+ )
1289
+ return wi.init()
1290
+
1291
+ except KeyboardInterrupt as e:
1292
+ if logger is not None:
1293
+ logger.warning("interrupted", exc_info=e)
1294
+
1295
+ raise
1296
+
1297
+ except Exception as e:
1298
+ if logger is not None:
1299
+ logger.exception("error in wandb.init()", exc_info=e)
1300
+
1301
+ # Need to build delay into this sentry capture because our exit hooks
1302
+ # mess with sentry's ability to send out errors before the program ends.
1303
+ wandb._sentry.reraise(e)
1304
+ raise AssertionError() # should never get here
infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_settings.py ADDED
@@ -0,0 +1,1278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import configparser
4
+ import json
5
+ import logging
6
+ import multiprocessing
7
+ import os
8
+ import pathlib
9
+ import platform
10
+ import re
11
+ import shutil
12
+ import socket
13
+ import sys
14
+ import tempfile
15
+ from datetime import datetime
16
+ from typing import Any, Literal, Sequence
17
+ from urllib.parse import quote, unquote, urlencode
18
+
19
+ if sys.version_info >= (3, 11):
20
+ from typing import Self
21
+ else:
22
+ from typing_extensions import Self
23
+
24
+ from google.protobuf.wrappers_pb2 import BoolValue, DoubleValue, Int32Value, StringValue
25
+ from pydantic import (
26
+ BaseModel,
27
+ ConfigDict,
28
+ Field,
29
+ computed_field,
30
+ field_validator,
31
+ model_validator,
32
+ )
33
+ from pydantic_core import SchemaValidator, core_schema
34
+
35
+ import wandb
36
+ from wandb import env, termwarn, util
37
+ from wandb.apis.internal import Api
38
+ from wandb.errors import UsageError
39
+ from wandb.proto import wandb_settings_pb2
40
+
41
+ from .lib import apikey, credentials, filesystem, ipython
42
+ from .lib.gitlib import GitRepo
43
+ from .lib.run_moment import RunMoment
44
+ from .lib.runid import generate_id
45
+
46
+
47
+ def _path_convert(*args: str) -> str:
48
+ """Join path and apply os.path.expanduser to it."""
49
+ return os.path.expanduser(os.path.join(*args))
50
+
51
+
52
+ class Settings(BaseModel, validate_assignment=True):
53
+ """Settings for the W&B SDK."""
54
+
55
+ # Pydantic configuration.
56
+ model_config = ConfigDict(
57
+ extra="forbid", # throw an error if extra fields are provided
58
+ # validate_default=True, # validate default values
59
+ )
60
+
61
+ # Public settings.
62
+
63
+ # Flag to allow table artifacts to be synced in offline mode.
64
+ #
65
+ # To revert to the old behavior, set this to False.
66
+ allow_offline_artifacts: bool = True
67
+ allow_val_change: bool = False
68
+ # Controls anonymous data logging. Possible values are:
69
+ # - "never": requires you to link your W&B account before
70
+ # tracking the run, so you don't accidentally create an anonymous
71
+ # run.
72
+ # - "allow": lets a logged-in user track runs with their account, but
73
+ # lets someone who is running the script without a W&B account see
74
+ # the charts in the UI.
75
+ # - "must": sends the run to an anonymous account instead of to a
76
+ # signed-up user account.
77
+ anonymous: Literal["allow", "must", "never"] | None = None
78
+ # The W&B API key.
79
+ api_key: str | None = None
80
+ azure_account_url_to_access_key: dict[str, str] | None = None
81
+ # The URL of the W&B backend, used for GraphQL and filestream operations.
82
+ base_url: str = "https://api.wandb.ai"
83
+ code_dir: str | None = None
84
+ config_paths: Sequence[str] | None = None
85
+ # The type of console capture to be applied. Possible values are:
86
+ # "auto" - Automatically selects the console capture method based on the
87
+ # system environment and settings.
88
+ #
89
+ # "off" - Disables console capture.
90
+ #
91
+ # "redirect" - Redirects low-level file descriptors for capturing output.
92
+ #
93
+ # "wrap" - Overrides the write methods of sys.stdout/sys.stderr. Will be
94
+ # mapped to either "wrap_raw" or "wrap_emu" based on the state of the system.
95
+ #
96
+ # "wrap_raw" - Same as "wrap" but captures raw output directly instead of
97
+ # through an emulator.
98
+ #
99
+ # "wrap_emu" - Same as "wrap" but captures output through an emulator.
100
+ console: Literal["auto", "off", "wrap", "redirect", "wrap_raw", "wrap_emu"] = Field(
101
+ default="auto",
102
+ validate_default=True,
103
+ )
104
+ # Whether to produce multipart console log files.
105
+ console_multipart: bool = False
106
+ # Path to file for writing temporary access tokens.
107
+ credentials_file: str = Field(
108
+ default_factory=lambda: str(credentials.DEFAULT_WANDB_CREDENTIALS_FILE)
109
+ )
110
+ # Whether to disable code saving.
111
+ disable_code: bool = False
112
+ # Whether to disable capturing the git state.
113
+ disable_git: bool = False
114
+ # Whether to disable the creation of a job artifact for W&B Launch.
115
+ disable_job_creation: bool = True
116
+ # The Docker image used to execute the script.
117
+ docker: str | None = None
118
+ # The email address of the user.
119
+ email: str | None = None
120
+ # The W&B entity, like a user or a team.
121
+ entity: str | None = None
122
+ force: bool = False
123
+ fork_from: RunMoment | None = None
124
+ git_commit: str | None = None
125
+ git_remote: str = "origin"
126
+ git_remote_url: str | None = None
127
+ git_root: str | None = None
128
+ heartbeat_seconds: int = 30
129
+ host: str | None = None
130
+ # The custom proxy servers for http requests to W&B.
131
+ http_proxy: str | None = None
132
+ # The custom proxy servers for https requests to W&B.
133
+ https_proxy: str | None = None
134
+ # Path to file containing an identity token (JWT) for authentication.
135
+ identity_token_file: str | None = None
136
+ # Unix glob patterns relative to `files_dir` to not upload.
137
+ ignore_globs: tuple[str, ...] = ()
138
+ # Time in seconds to wait for the wandb.init call to complete before timing out.
139
+ init_timeout: float = 90.0
140
+ # Whether to insecurely disable SSL verification.
141
+ insecure_disable_ssl: bool = False
142
+ job_name: str | None = None
143
+ job_source: Literal["repo", "artifact", "image"] | None = None
144
+ label_disable: bool = False
145
+ launch: bool = False
146
+ launch_config_path: str | None = None
147
+ login_timeout: float | None = None
148
+ mode: Literal["online", "offline", "dryrun", "disabled", "run", "shared"] = Field(
149
+ default="online",
150
+ validate_default=True,
151
+ )
152
+ notebook_name: str | None = None
153
+ # Path to the script that created the run, if available.
154
+ program: str | None = None
155
+ # The absolute path from the root repository directory to the script that
156
+ # created the run.
157
+ #
158
+ # Root repository directory is defined as the directory containing the
159
+ # .git directory, if it exists. Otherwise, it's the current working directory.
160
+ program_abspath: str | None = None
161
+ program_relpath: str | None = None
162
+ # The W&B project ID.
163
+ project: str | None = None
164
+ quiet: bool = False
165
+ reinit: bool = False
166
+ relogin: bool = False
167
+ # Specifies the resume behavior for the run. The available options are:
168
+ #
169
+ # "must": Resumes from an existing run with the same ID. If no such run exists,
170
+ # it will result in failure.
171
+ #
172
+ # "allow": Attempts to resume from an existing run with the same ID. If none is
173
+ # found, a new run will be created.
174
+ #
175
+ # "never": Always starts a new run. If a run with the same ID already exists,
176
+ # it will result in failure.
177
+ #
178
+ # "auto": Automatically resumes from the most recent failed run on the same
179
+ # machine.
180
+ resume: Literal["allow", "must", "never", "auto"] | None = None
181
+ resume_from: RunMoment | None = None
182
+ # Indication from the server about the state of the run.
183
+ #
184
+ # This is different from resume, a user provided flag.
185
+ resumed: bool = False
186
+ # The root directory that will be used to derive other paths,
187
+ # such as the wandb directory, and the run directory.
188
+ root_dir: str = Field(default_factory=lambda: os.path.abspath(os.getcwd()))
189
+ run_group: str | None = None
190
+ # The ID of the run.
191
+ run_id: str | None = None
192
+ run_job_type: str | None = None
193
+ run_name: str | None = None
194
+ run_notes: str | None = None
195
+ run_tags: tuple[str, ...] | None = None
196
+ sagemaker_disable: bool = False
197
+ save_code: bool | None = None
198
+ settings_system: str = Field(
199
+ default_factory=lambda: _path_convert(
200
+ os.path.join("~", ".config", "wandb", "settings")
201
+ )
202
+ )
203
+ show_colors: bool | None = None
204
+ show_emoji: bool | None = None
205
+ show_errors: bool = True
206
+ show_info: bool = True
207
+ show_warnings: bool = True
208
+ silent: bool = False
209
+ start_method: str | None = None
210
+ strict: bool | None = None
211
+ summary_timeout: int = 60
212
+ summary_warnings: int = 5 # TODO: kill this with fire
213
+ sweep_id: str | None = None
214
+ sweep_param_path: str | None = None
215
+ symlink: bool = Field(
216
+ default_factory=lambda: False if platform.system() == "Windows" else True
217
+ )
218
+ sync_tensorboard: bool | None = None
219
+ table_raise_on_max_row_limit_exceeded: bool = False
220
+ username: str | None = None
221
+
222
+ # Internal settings.
223
+ #
224
+ # These are typically not meant to be set by the user and should not be considered
225
+ # a part of the public API as they may change or be removed in future versions.
226
+
227
+ # CLI mode.
228
+ x_cli_only_mode: bool = False
229
+ # Disable the collection of system metadata.
230
+ x_disable_meta: bool = False
231
+ # Pre-wandb-core, this setting was used to disable the (now legacy) wandb service.
232
+ #
233
+ # TODO: this is deprecated and will be removed in future versions.
234
+ x_disable_service: bool = False
235
+ # Do not use setproctitle for internal process in legacy service.
236
+ x_disable_setproctitle: bool = False
237
+ # Disable system metrics collection.
238
+ x_disable_stats: bool = False
239
+ # Disable check for latest version of wandb, from PyPI.
240
+ x_disable_update_check: bool = False
241
+ # Prevent early viewer query.
242
+ x_disable_viewer: bool = False
243
+ # Disable automatic machine info collection.
244
+ x_disable_machine_info: bool = False
245
+ # Python executable
246
+ x_executable: str | None = None
247
+ # Additional headers to add to all outgoing HTTP requests.
248
+ x_extra_http_headers: dict[str, str] | None = None
249
+ # An approximate maximum request size for the filestream API.
250
+ #
251
+ # This applies when wandb-core is enabled. Its purpose is to prevent
252
+ # HTTP requests from failing due to containing too much data.
253
+ #
254
+ # This number is approximate: requests will be slightly larger.
255
+ x_file_stream_max_bytes: int | None = None
256
+ # Max line length for filestream jsonl files.
257
+ x_file_stream_max_line_bytes: int | None = None
258
+ # Interval in seconds between filestream transmissions.
259
+ x_file_stream_transmit_interval: float | None = None
260
+ # Filestream retry client configuration.
261
+ # max number of retries
262
+ x_file_stream_retry_max: int | None = None
263
+ # min wait time between retries
264
+ x_file_stream_retry_wait_min_seconds: float | None = None
265
+ # max wait time between retries
266
+ x_file_stream_retry_wait_max_seconds: float | None = None
267
+ # timeout for individual HTTP requests
268
+ x_file_stream_timeout_seconds: float | None = None
269
+ # file transfer retry client configuration
270
+ x_file_transfer_retry_max: int | None = None
271
+ x_file_transfer_retry_wait_min_seconds: float | None = None
272
+ x_file_transfer_retry_wait_max_seconds: float | None = None
273
+ x_file_transfer_timeout_seconds: float | None = None
274
+ # override setting for the computed files_dir
275
+ x_files_dir: str | None = None
276
+ # flow control configuration for file stream
277
+ x_flow_control_custom: bool | None = None
278
+ x_flow_control_disabled: bool | None = None
279
+ # graphql retry client configuration
280
+ x_graphql_retry_max: int | None = None
281
+ x_graphql_retry_wait_min_seconds: float | None = None
282
+ x_graphql_retry_wait_max_seconds: float | None = None
283
+ x_graphql_timeout_seconds: float | None = None
284
+ x_internal_check_process: float = 8.0
285
+ x_jupyter_name: str | None = None
286
+ x_jupyter_path: str | None = None
287
+ x_jupyter_root: str | None = None
288
+ # Label to assign to system metrics and console logs collected for the run
289
+ # to group by on the frontend. Can be used to distinguish data from different
290
+ # nodes in a distributed training job.
291
+ x_label: str | None = None
292
+ x_live_policy_rate_limit: int | None = None
293
+ x_live_policy_wait_time: int | None = None
294
+ x_log_level: int = logging.INFO
295
+ x_network_buffer: int | None = None
296
+ # Determines whether to save internal wandb files and metadata.
297
+ # In a distributed setting, this is useful for avoiding file overwrites on secondary nodes
298
+ # when only system metrics and logs are needed, as the primary node handles the main logging.
299
+ x_primary_node: bool = True
300
+ # [deprecated, use http(s)_proxy] custom proxy servers for the requests to W&B
301
+ # [scheme -> url].
302
+ x_proxies: dict[str, str] | None = None
303
+ x_runqueue_item_id: str | None = None
304
+ x_require_legacy_service: bool = False
305
+ x_save_requirements: bool = True
306
+ x_service_transport: str | None = None
307
+ x_service_wait: float = 30.0
308
+ x_show_operation_stats: bool = True
309
+ # The start time of the run in seconds since the Unix epoch.
310
+ x_start_time: float | None = None
311
+ # PID of the process that started the wandb-core process to collect system stats for.
312
+ x_stats_pid: int = os.getpid()
313
+ # Sampling interval for the system monitor in seconds.
314
+ x_stats_sampling_interval: float = Field(default=10.0)
315
+ # Path to store the default config file for the neuron-monitor tool
316
+ # used to monitor AWS Trainium devices.
317
+ x_stats_neuron_monitor_config_path: str | None = None
318
+ # Open metrics endpoint names and urls.
319
+ x_stats_open_metrics_endpoints: dict[str, str] | None = None
320
+ # Filter to apply to metrics collected from OpenMetrics endpoints.
321
+ # Supports two formats:
322
+ # - {"metric regex pattern, including endpoint name as prefix": {"label": "label value regex pattern"}}
323
+ # - ("metric regex pattern 1", "metric regex pattern 2", ...)
324
+ x_stats_open_metrics_filters: dict[str, dict[str, str]] | Sequence[str] | None = (
325
+ None
326
+ )
327
+ # HTTP headers to add to OpenMetrics requests.
328
+ x_stats_open_metrics_http_headers: dict[str, str] | None = None
329
+ # System paths to monitor for disk usage.
330
+ x_stats_disk_paths: Sequence[str] | None = Field(
331
+ default_factory=lambda: ("/", "/System/Volumes/Data")
332
+ if platform.system() == "Darwin"
333
+ else ("/",)
334
+ )
335
+ # GPU device indices to monitor (e.g. [0, 1, 2]).
336
+ # If not set, captures metrics for all GPUs.
337
+ # Assumes 0-based indexing matching CUDA/ROCm device enumeration.
338
+ x_stats_gpu_device_ids: Sequence[int] | None = None
339
+ # Number of system metric samples to buffer in memory in the wandb-core process.
340
+ # Can be accessed via run._system_metrics.
341
+ x_stats_buffer_size: int = 0
342
+ # Flag to indicate whether we are syncing a run from the transaction log.
343
+ x_sync: bool = False
344
+ # Controls whether this process can update the run's final state (finished/failed) on the server.
345
+ # Set to False in distributed training when only the main process should determine the final state.
346
+ x_update_finish_state: bool = True
347
+
348
+ # Model validator to catch legacy settings.
349
+ @model_validator(mode="before")
350
+ @classmethod
351
+ def catch_private_settings(cls, values):
352
+ """Check if a private field is provided and assign to the corresponding public one.
353
+
354
+ This is a compatibility layer to handle previous versions of the settings.
355
+ """
356
+ new_values = {}
357
+ for key in values:
358
+ # Internal settings are prefixed with "x_" instead of "_"
359
+ # as Pydantic does not allow "_" in field names.
360
+ if key.startswith("_"):
361
+ new_values["x" + key] = values[key]
362
+ else:
363
+ new_values[key] = values[key]
364
+ return new_values
365
+
366
+ @model_validator(mode="after")
367
+ def validate_mutual_exclusion_of_branching_args(self) -> Self:
368
+ if (
369
+ sum(
370
+ o is not None
371
+ for o in [
372
+ self.fork_from,
373
+ self.resume,
374
+ self.resume_from,
375
+ ]
376
+ )
377
+ > 1
378
+ ):
379
+ raise ValueError(
380
+ "`fork_from`, `resume`, or `resume_from` are mutually exclusive. "
381
+ "Please specify only one of them."
382
+ )
383
+ return self
384
+
385
+ # Field validators.
386
+
387
+ @field_validator("x_disable_service", mode="after")
388
+ @classmethod
389
+ def validate_disable_service(cls, value):
390
+ if value:
391
+ termwarn(
392
+ "Disabling the wandb service is deprecated as of version 0.18.0 "
393
+ "and will be removed in future versions. ",
394
+ repeat=False,
395
+ )
396
+ return value
397
+
398
+ @field_validator("api_key", mode="after")
399
+ @classmethod
400
+ def validate_api_key(cls, value):
401
+ if value is not None and (len(value) > len(value.strip())):
402
+ raise UsageError("API key cannot start or end with whitespace")
403
+ return value
404
+
405
+ @field_validator("base_url", mode="after")
406
+ @classmethod
407
+ def validate_base_url(cls, value):
408
+ cls.validate_url(value)
409
+ # wandb.ai-specific checks
410
+ if re.match(r".*wandb\.ai[^\.]*$", value) and "api." not in value:
411
+ # user might guess app.wandb.ai or wandb.ai is the default cloud server
412
+ raise ValueError(
413
+ f"{value} is not a valid server address, did you mean https://api.wandb.ai?"
414
+ )
415
+ elif re.match(r".*wandb\.ai[^\.]*$", value) and not value.startswith("https"):
416
+ raise ValueError("http is not secure, please use https://api.wandb.ai")
417
+ return value.rstrip("/")
418
+
419
+ @field_validator("code_dir", mode="before")
420
+ @classmethod
421
+ def validate_code_dir(cls, value):
422
+ # TODO: add native support for pathlib.Path
423
+ if isinstance(value, pathlib.Path):
424
+ return str(value)
425
+ return value
426
+
427
+ @field_validator("console", mode="after")
428
+ @classmethod
429
+ def validate_console(cls, value, info):
430
+ if value != "auto":
431
+ return value
432
+ if (
433
+ ipython.in_jupyter()
434
+ or (info.data.get("start_method") == "thread")
435
+ or not info.data.get("disable_service")
436
+ or platform.system() == "Windows"
437
+ ):
438
+ value = "wrap"
439
+ else:
440
+ value = "redirect"
441
+ return value
442
+
443
+ @field_validator("x_executable", mode="before")
444
+ @classmethod
445
+ def validate_x_executable(cls, value):
446
+ # TODO: add native support for pathlib.Path
447
+ if isinstance(value, pathlib.Path):
448
+ return str(value)
449
+ return value
450
+
451
+ @field_validator("x_file_stream_max_line_bytes", mode="after")
452
+ @classmethod
453
+ def validate_file_stream_max_line_bytes(cls, value):
454
+ if value is not None and value < 1:
455
+ raise ValueError("File stream max line bytes must be greater than 0")
456
+ return value
457
+
458
+ @field_validator("x_files_dir", mode="before")
459
+ @classmethod
460
+ def validate_x_files_dir(cls, value):
461
+ # TODO: add native support for pathlib.Path
462
+ if isinstance(value, pathlib.Path):
463
+ return str(value)
464
+ return value
465
+
466
+ @field_validator("fork_from", mode="before")
467
+ @classmethod
468
+ def validate_fork_from(cls, value, info) -> RunMoment | None:
469
+ run_moment = cls._runmoment_preprocessor(value)
470
+ if run_moment and info.data.get("run_id") == run_moment.run:
471
+ raise ValueError(
472
+ "Provided `run_id` is the same as the run to `fork_from`. "
473
+ "Please provide a different `run_id` or remove the `run_id` argument. "
474
+ "If you want to rewind the current run, please use `resume_from` instead."
475
+ )
476
+ return run_moment
477
+
478
+ @field_validator("http_proxy", mode="after")
479
+ @classmethod
480
+ def validate_http_proxy(cls, value):
481
+ if value is None:
482
+ return None
483
+ cls.validate_url(value)
484
+ return value.rstrip("/")
485
+
486
+ @field_validator("https_proxy", mode="after")
487
+ @classmethod
488
+ def validate_https_proxy(cls, value):
489
+ if value is None:
490
+ return None
491
+ cls.validate_url(value)
492
+ return value.rstrip("/")
493
+
494
+ @field_validator("ignore_globs", mode="after")
495
+ @classmethod
496
+ def validate_ignore_globs(cls, value):
497
+ return tuple(value) if not isinstance(value, tuple) else value
498
+
499
+ @field_validator("program", mode="before")
500
+ @classmethod
501
+ def validate_program(cls, value):
502
+ # TODO: add native support for pathlib.Path
503
+ if isinstance(value, pathlib.Path):
504
+ return str(value)
505
+ return value
506
+
507
+ @field_validator("program_abspath", mode="before")
508
+ @classmethod
509
+ def validate_program_abspath(cls, value):
510
+ # TODO: add native support for pathlib.Path
511
+ if isinstance(value, pathlib.Path):
512
+ return str(value)
513
+ return value
514
+
515
+ @field_validator("program_relpath", mode="before")
516
+ @classmethod
517
+ def validate_program_relpath(cls, value):
518
+ # TODO: add native support for pathlib.Path
519
+ if isinstance(value, pathlib.Path):
520
+ return str(value)
521
+ return value
522
+
523
+ @field_validator("project", mode="after")
524
+ @classmethod
525
+ def validate_project(cls, value, info):
526
+ if value is None:
527
+ return None
528
+ invalid_chars_list = list("/\\#?%:")
529
+ if len(value) > 128:
530
+ raise UsageError(f"Invalid project name {value!r}: exceeded 128 characters")
531
+ invalid_chars = {char for char in invalid_chars_list if char in value}
532
+ if invalid_chars:
533
+ raise UsageError(
534
+ f"Invalid project name {value!r}: "
535
+ f"cannot contain characters {','.join(invalid_chars_list)!r}, "
536
+ f"found {','.join(invalid_chars)!r}"
537
+ )
538
+ return value
539
+
540
+ @field_validator("resume", mode="before")
541
+ @classmethod
542
+ def validate_resume(cls, value):
543
+ if value is False:
544
+ return None
545
+ if value is True:
546
+ return "auto"
547
+ return value
548
+
549
+ @field_validator("resume_from", mode="before")
550
+ @classmethod
551
+ def validate_resume_from(cls, value, info) -> RunMoment | None:
552
+ run_moment = cls._runmoment_preprocessor(value)
553
+ if run_moment and info.data.get("run_id") != run_moment.run:
554
+ raise ValueError(
555
+ "Both `run_id` and `resume_from` have been specified with different ids."
556
+ )
557
+ return run_moment
558
+
559
+ @field_validator("root_dir", mode="before")
560
+ @classmethod
561
+ def validate_root_dir(cls, value):
562
+ # TODO: add native support for pathlib.Path
563
+ if isinstance(value, pathlib.Path):
564
+ return str(value)
565
+ return value
566
+
567
+ @field_validator("run_id", mode="after")
568
+ @classmethod
569
+ def validate_run_id(cls, value, info):
570
+ if value is None:
571
+ return None
572
+
573
+ if len(value) == 0:
574
+ raise UsageError("Run ID cannot be empty")
575
+ if len(value) > len(value.strip()):
576
+ raise UsageError("Run ID cannot start or end with whitespace")
577
+ if not bool(value.strip()):
578
+ raise UsageError("Run ID cannot contain only whitespace")
579
+ return value
580
+
581
+ @field_validator("settings_system", mode="after")
582
+ @classmethod
583
+ def validate_settings_system(cls, value):
584
+ if isinstance(value, pathlib.Path):
585
+ return str(_path_convert(value))
586
+ return _path_convert(value)
587
+
588
+ @field_validator("x_service_wait", mode="after")
589
+ @classmethod
590
+ def validate_service_wait(cls, value):
591
+ if value < 0:
592
+ raise UsageError("Service wait time cannot be negative")
593
+ return value
594
+
595
+ @field_validator("start_method")
596
+ @classmethod
597
+ def validate_start_method(cls, value):
598
+ if value is None:
599
+ return value
600
+ available_methods = ["thread"]
601
+ if hasattr(multiprocessing, "get_all_start_methods"):
602
+ available_methods += multiprocessing.get_all_start_methods()
603
+ if value not in available_methods:
604
+ raise UsageError(
605
+ f"Settings field `start_method`: {value!r} not in {available_methods}"
606
+ )
607
+ return value
608
+
609
+ @field_validator("x_stats_gpu_device_ids", mode="before")
610
+ @classmethod
611
+ def validate_x_stats_gpu_device_ids(cls, value):
612
+ if isinstance(value, str):
613
+ return json.loads(value)
614
+ return value
615
+
616
+ @field_validator("x_stats_neuron_monitor_config_path", mode="before")
617
+ @classmethod
618
+ def validate_x_stats_neuron_monitor_config_path(cls, value):
619
+ # TODO: add native support for pathlib.Path
620
+ if isinstance(value, pathlib.Path):
621
+ return str(value)
622
+ return value
623
+
624
+ @field_validator("x_stats_open_metrics_endpoints", mode="before")
625
+ @classmethod
626
+ def validate_stats_open_metrics_endpoints(cls, value):
627
+ if isinstance(value, str):
628
+ return json.loads(value)
629
+ return value
630
+
631
+ @field_validator("x_stats_open_metrics_filters", mode="before")
632
+ @classmethod
633
+ def validate_stats_open_metrics_filters(cls, value):
634
+ if isinstance(value, str):
635
+ return json.loads(value)
636
+ return value
637
+
638
+ @field_validator("x_stats_open_metrics_http_headers", mode="before")
639
+ @classmethod
640
+ def validate_stats_open_metrics_http_headers(cls, value):
641
+ if isinstance(value, str):
642
+ return json.loads(value)
643
+ return value
644
+
645
+ @field_validator("x_stats_sampling_interval", mode="after")
646
+ @classmethod
647
+ def validate_stats_sampling_interval(cls, value):
648
+ if value < 0.1:
649
+ raise UsageError("Stats sampling interval cannot be less than 0.1 seconds")
650
+ return value
651
+
652
+ @field_validator("sweep_id", mode="after")
653
+ @classmethod
654
+ def validate_sweep_id(cls, value):
655
+ if value is None:
656
+ return None
657
+ if len(value) == 0:
658
+ raise UsageError("Sweep ID cannot be empty")
659
+ if len(value) > len(value.strip()):
660
+ raise UsageError("Sweep ID cannot start or end with whitespace")
661
+ if not bool(value.strip()):
662
+ raise UsageError("Sweep ID cannot contain only whitespace")
663
+ return value
664
+
665
+ @field_validator("sweep_param_path", mode="before")
666
+ @classmethod
667
+ def validate_sweep_param_path(cls, value):
668
+ # TODO: add native support for pathlib.Path
669
+ if isinstance(value, pathlib.Path):
670
+ return str(value)
671
+ return value
672
+
673
+ # Computed fields.
674
+
675
+ @computed_field # type: ignore[prop-decorator]
676
+ @property
677
+ def _args(self) -> list[str]:
678
+ if not self._jupyter:
679
+ return sys.argv[1:]
680
+ return []
681
+
682
+ @computed_field # type: ignore[prop-decorator]
683
+ @property
684
+ def _aws_lambda(self) -> bool:
685
+ """Check if we are running in a lambda environment."""
686
+ from sentry_sdk.integrations.aws_lambda import ( # type: ignore[import-not-found]
687
+ get_lambda_bootstrap,
688
+ )
689
+
690
+ lambda_bootstrap = get_lambda_bootstrap()
691
+ if not lambda_bootstrap or not hasattr(
692
+ lambda_bootstrap, "handle_event_request"
693
+ ):
694
+ return False
695
+ return True
696
+
697
+ @computed_field # type: ignore[prop-decorator]
698
+ @property
699
+ def _code_path_local(self) -> str | None:
700
+ """The relative path from the current working directory to the code path.
701
+
702
+ For example, if the code path is /home/user/project/example.py, and the
703
+ current working directory is /home/user/project, then the code path local
704
+ is example.py.
705
+
706
+ If couldn't find the relative path, this will be an empty string.
707
+ """
708
+ return self._get_program_relpath(self.program) if self.program else None
709
+
710
+ @computed_field # type: ignore[prop-decorator]
711
+ @property
712
+ def _colab(self) -> bool:
713
+ return "google.colab" in sys.modules
714
+
715
+ @computed_field # type: ignore[prop-decorator]
716
+ @property
717
+ def _ipython(self) -> bool:
718
+ return ipython.in_ipython()
719
+
720
+ @computed_field # type: ignore[prop-decorator]
721
+ @property
722
+ def _jupyter(self) -> bool:
723
+ return ipython.in_jupyter()
724
+
725
+ @computed_field # type: ignore[prop-decorator]
726
+ @property
727
+ def _kaggle(self) -> bool:
728
+ return util._is_likely_kaggle()
729
+
730
+ @computed_field # type: ignore[prop-decorator]
731
+ @property
732
+ def _noop(self) -> bool:
733
+ return self.mode == "disabled"
734
+
735
+ @computed_field # type: ignore[prop-decorator]
736
+ @property
737
+ def _notebook(self) -> bool:
738
+ return self._ipython or self._jupyter or self._colab or self._kaggle
739
+
740
+ @computed_field # type: ignore[prop-decorator]
741
+ @property
742
+ def _offline(self) -> bool:
743
+ return self.mode in ("offline", "dryrun")
744
+
745
+ @computed_field # type: ignore[prop-decorator]
746
+ @property
747
+ def _os(self) -> str:
748
+ """The operating system of the machine running the script."""
749
+ return platform.platform(aliased=True)
750
+
751
+ @computed_field # type: ignore[prop-decorator]
752
+ @property
753
+ def _platform(self) -> str:
754
+ return f"{platform.system()}-{platform.machine()}".lower()
755
+
756
+ @computed_field # type: ignore[prop-decorator]
757
+ @property
758
+ def _python(self) -> str:
759
+ return f"{platform.python_implementation()} {platform.python_version()}"
760
+
761
+ @computed_field # type: ignore[prop-decorator]
762
+ @property
763
+ def _shared(self) -> bool:
764
+ """Whether we are in shared mode.
765
+
766
+ In "shared" mode, multiple processes can write to the same run,
767
+ for example from different machines.
768
+ """
769
+ return self.mode == "shared"
770
+
771
+ @computed_field # type: ignore[prop-decorator]
772
+ @property
773
+ def _start_datetime(self) -> str:
774
+ if self.x_start_time is None:
775
+ return ""
776
+ datetime_now = datetime.fromtimestamp(self.x_start_time)
777
+ return datetime_now.strftime("%Y%m%d_%H%M%S")
778
+
779
+ @computed_field # type: ignore[prop-decorator]
780
+ @property
781
+ def _tmp_code_dir(self) -> str:
782
+ return _path_convert(
783
+ self.wandb_dir,
784
+ f"{self.run_mode}-{self.timespec}-{self.run_id}",
785
+ "tmp",
786
+ "code",
787
+ )
788
+
789
+ @computed_field # type: ignore[prop-decorator]
790
+ @property
791
+ def _windows(self) -> bool:
792
+ return platform.system() == "Windows"
793
+
794
+ @computed_field # type: ignore[prop-decorator]
795
+ @property
796
+ def colab_url(self) -> str | None:
797
+ """The URL to the Colab notebook, if running in Colab."""
798
+ if not self._colab:
799
+ return None
800
+ if self.x_jupyter_path and self.x_jupyter_path.startswith("fileId="):
801
+ unescaped = unquote(self.x_jupyter_path)
802
+ return "https://colab.research.google.com/notebook#" + unescaped
803
+ return None
804
+
805
+ @computed_field # type: ignore[prop-decorator]
806
+ @property
807
+ def deployment(self) -> Literal["local", "cloud"]:
808
+ return "local" if self.is_local else "cloud"
809
+
810
+ @computed_field # type: ignore[prop-decorator]
811
+ @property
812
+ def files_dir(self) -> str:
813
+ """Absolute path to the local directory where the run's files are stored."""
814
+ return self.x_files_dir or _path_convert(
815
+ self.wandb_dir,
816
+ f"{self.run_mode}-{self.timespec}-{self.run_id}",
817
+ "files",
818
+ )
819
+
820
+ @computed_field # type: ignore[prop-decorator]
821
+ @property
822
+ def is_local(self) -> bool:
823
+ return str(self.base_url) != "https://api.wandb.ai"
824
+
825
+ @computed_field # type: ignore[prop-decorator]
826
+ @property
827
+ def log_dir(self) -> str:
828
+ """The directory for storing log files."""
829
+ return _path_convert(
830
+ self.wandb_dir, f"{self.run_mode}-{self.timespec}-{self.run_id}", "logs"
831
+ )
832
+
833
+ @computed_field # type: ignore[prop-decorator]
834
+ @property
835
+ def log_internal(self) -> str:
836
+ """The path to the file to use for internal logs."""
837
+ return _path_convert(self.log_dir, "debug-internal.log")
838
+
839
+ @computed_field # type: ignore[prop-decorator]
840
+ @property
841
+ def log_symlink_internal(self) -> str:
842
+ """The path to the symlink to the internal log file of the most recent run."""
843
+ return _path_convert(self.wandb_dir, "debug-internal.log")
844
+
845
+ @computed_field # type: ignore[prop-decorator]
846
+ @property
847
+ def log_symlink_user(self) -> str:
848
+ """The path to the symlink to the user-process log file of the most recent run."""
849
+ return _path_convert(self.wandb_dir, "debug.log")
850
+
851
+ @computed_field # type: ignore[prop-decorator]
852
+ @property
853
+ def log_user(self) -> str:
854
+ """The path to the file to use for user-process logs."""
855
+ return _path_convert(self.log_dir, "debug.log")
856
+
857
+ @computed_field # type: ignore[prop-decorator]
858
+ @property
859
+ def project_url(self) -> str:
860
+ """The W&B URL where the project can be viewed."""
861
+ project_url = self._project_url_base()
862
+ if not project_url:
863
+ return ""
864
+
865
+ query = self._get_url_query_string()
866
+
867
+ return f"{project_url}{query}"
868
+
869
+ @computed_field # type: ignore[prop-decorator]
870
+ @property
871
+ def resume_fname(self) -> str:
872
+ """The path to the resume file."""
873
+ return _path_convert(self.wandb_dir, "wandb-resume.json")
874
+
875
+ @computed_field # type: ignore[prop-decorator]
876
+ @property
877
+ def run_mode(self) -> Literal["run", "offline-run"]:
878
+ return "run" if not self._offline else "offline-run"
879
+
880
+ @computed_field # type: ignore[prop-decorator]
881
+ @property
882
+ def run_url(self) -> str:
883
+ """The W&B URL where the run can be viewed."""
884
+ project_url = self._project_url_base()
885
+ if not all([project_url, self.run_id]):
886
+ return ""
887
+
888
+ query = self._get_url_query_string()
889
+ return f"{project_url}/runs/{quote(self.run_id or '')}{query}"
890
+
891
+ @computed_field # type: ignore[prop-decorator]
892
+ @property
893
+ def settings_workspace(self) -> str:
894
+ """The path to the workspace settings file."""
895
+ return _path_convert(self.wandb_dir, "settings")
896
+
897
+ @computed_field # type: ignore[prop-decorator]
898
+ @property
899
+ def sweep_url(self) -> str:
900
+ """The W&B URL where the sweep can be viewed."""
901
+ project_url = self._project_url_base()
902
+ if not all([project_url, self.sweep_id]):
903
+ return ""
904
+
905
+ query = self._get_url_query_string()
906
+ return f"{project_url}/sweeps/{quote(self.sweep_id or '')}{query}"
907
+
908
+ @computed_field # type: ignore[prop-decorator]
909
+ @property
910
+ def sync_dir(self) -> str:
911
+ return _path_convert(
912
+ self.wandb_dir, f"{self.run_mode}-{self.timespec}-{self.run_id}"
913
+ )
914
+
915
+ @computed_field # type: ignore[prop-decorator]
916
+ @property
917
+ def sync_file(self) -> str:
918
+ """Path to the append-only binary transaction log file."""
919
+ return _path_convert(self.sync_dir, f"run-{self.run_id}.wandb")
920
+
921
+ @computed_field # type: ignore[prop-decorator]
922
+ @property
923
+ def sync_symlink_latest(self) -> str:
924
+ return _path_convert(self.wandb_dir, "latest-run")
925
+
926
+ @computed_field # type: ignore[prop-decorator]
927
+ @property
928
+ def timespec(self) -> str:
929
+ return self._start_datetime
930
+
931
+ @computed_field # type: ignore[prop-decorator]
932
+ @property
933
+ def wandb_dir(self) -> str:
934
+ """Full path to the wandb directory.
935
+
936
+ The setting exposed to users as `dir=` or `WANDB_DIR` is the `root_dir`.
937
+ We add the `__stage_dir__` to it to get the full `wandb_dir`
938
+ """
939
+ root_dir = self.root_dir or ""
940
+
941
+ # We use the hidden version if it already exists, otherwise non-hidden.
942
+ if os.path.exists(os.path.join(root_dir, ".wandb")):
943
+ __stage_dir__ = ".wandb" + os.sep
944
+ else:
945
+ __stage_dir__ = "wandb" + os.sep
946
+
947
+ path = os.path.join(root_dir, __stage_dir__)
948
+ if not os.access(root_dir or ".", os.W_OK):
949
+ termwarn(
950
+ f"Path {path} wasn't writable, using system temp directory.",
951
+ repeat=False,
952
+ )
953
+ path = os.path.join(
954
+ tempfile.gettempdir(), __stage_dir__ or ("wandb" + os.sep)
955
+ )
956
+
957
+ return os.path.expanduser(path)
958
+
959
+ # Methods to collect and update settings from different sources.
960
+ #
961
+ # The Settings class does not track the source of the settings,
962
+ # so it is up to the developer to ensure that the settings are applied
963
+ # in the correct order. Most of the updates are done in
964
+ # wandb/sdk/wandb_setup.py::_WandbSetup._settings_setup.
965
+
966
+ def update_from_system_config_file(self):
967
+ """Update settings from the system config file."""
968
+ if not self.settings_system or not os.path.exists(self.settings_system):
969
+ return
970
+ for key, value in self._load_config_file(self.settings_system).items():
971
+ if value is not None:
972
+ setattr(self, key, value)
973
+
974
+ def update_from_workspace_config_file(self):
975
+ """Update settings from the workspace config file."""
976
+ if not self.settings_workspace or not os.path.exists(self.settings_workspace):
977
+ return
978
+ for key, value in self._load_config_file(self.settings_workspace).items():
979
+ if value is not None:
980
+ setattr(self, key, value)
981
+
982
+ def update_from_env_vars(self, environ: dict[str, Any]):
983
+ """Update settings from environment variables."""
984
+ env_prefix: str = "WANDB_"
985
+ private_env_prefix: str = env_prefix + "_"
986
+ special_env_var_names = {
987
+ "WANDB_DISABLE_SERVICE": "x_disable_service",
988
+ "WANDB_SERVICE_TRANSPORT": "x_service_transport",
989
+ "WANDB_DIR": "root_dir",
990
+ "WANDB_NAME": "run_name",
991
+ "WANDB_NOTES": "run_notes",
992
+ "WANDB_TAGS": "run_tags",
993
+ "WANDB_JOB_TYPE": "run_job_type",
994
+ "WANDB_HTTP_TIMEOUT": "x_graphql_timeout_seconds",
995
+ "WANDB_FILE_PUSHER_TIMEOUT": "x_file_transfer_timeout_seconds",
996
+ "WANDB_USER_EMAIL": "email",
997
+ }
998
+ env = dict()
999
+ for setting, value in environ.items():
1000
+ if not setting.startswith(env_prefix):
1001
+ continue
1002
+
1003
+ if setting in special_env_var_names:
1004
+ key = special_env_var_names[setting]
1005
+ elif setting.startswith(private_env_prefix):
1006
+ key = "x_" + setting[len(private_env_prefix) :].lower()
1007
+ else:
1008
+ # otherwise, strip the prefix and convert to lowercase
1009
+ key = setting[len(env_prefix) :].lower()
1010
+
1011
+ if key in self.__dict__:
1012
+ if key in ("ignore_globs", "run_tags"):
1013
+ value = value.split(",")
1014
+ env[key] = value
1015
+
1016
+ for key, value in env.items():
1017
+ if value is not None:
1018
+ setattr(self, key, value)
1019
+
1020
+ def update_from_system_environment(self):
1021
+ """Update settings from the system environment."""
1022
+ # For code saving, only allow env var override if value from server is true, or
1023
+ # if no preference was specified.
1024
+ if (self.save_code is True or self.save_code is None) and (
1025
+ os.getenv(env.SAVE_CODE) is not None
1026
+ or os.getenv(env.DISABLE_CODE) is not None
1027
+ ):
1028
+ self.save_code = env.should_save_code()
1029
+
1030
+ self.disable_git = env.disable_git()
1031
+
1032
+ # Attempt to get notebook information if not already set by the user
1033
+ if self._jupyter and (self.notebook_name is None or self.notebook_name == ""):
1034
+ meta = wandb.jupyter.notebook_metadata(self.silent) # type: ignore
1035
+ self.x_jupyter_path = meta.get("path")
1036
+ self.x_jupyter_name = meta.get("name")
1037
+ self.x_jupyter_root = meta.get("root")
1038
+ elif (
1039
+ self._jupyter
1040
+ and self.notebook_name is not None
1041
+ and os.path.exists(self.notebook_name)
1042
+ ):
1043
+ self.x_jupyter_path = self.notebook_name
1044
+ self.x_jupyter_name = self.notebook_name
1045
+ self.x_jupyter_root = os.getcwd()
1046
+ elif self._jupyter:
1047
+ wandb.termwarn(
1048
+ "WANDB_NOTEBOOK_NAME should be a path to a notebook file, "
1049
+ f"couldn't find {self.notebook_name}.",
1050
+ )
1051
+
1052
+ # host and username are populated by apply_env_vars if corresponding env
1053
+ # vars exist -- but if they don't, we'll fill them in here
1054
+ if self.host is None:
1055
+ self.host = socket.gethostname() # type: ignore
1056
+
1057
+ _executable = (
1058
+ self.x_executable
1059
+ or os.environ.get(env._EXECUTABLE)
1060
+ or sys.executable
1061
+ or shutil.which("python3")
1062
+ or "python3"
1063
+ )
1064
+ self.x_executable = _executable
1065
+
1066
+ if self.docker is None:
1067
+ self.docker = env.get_docker(util.image_id_from_k8s())
1068
+
1069
+ # proceed if not in CLI mode
1070
+ if self.x_cli_only_mode:
1071
+ return
1072
+
1073
+ program = self.program or self._get_program()
1074
+
1075
+ if program is not None:
1076
+ repo = GitRepo()
1077
+ root = repo.root or os.getcwd()
1078
+
1079
+ self.program_relpath = self.program_relpath or self._get_program_relpath(
1080
+ program, root
1081
+ )
1082
+ program_abspath = os.path.abspath(
1083
+ os.path.join(root, os.path.relpath(os.getcwd(), root), program)
1084
+ )
1085
+ if os.path.exists(program_abspath):
1086
+ self.program_abspath = program_abspath
1087
+ else:
1088
+ program = "<python with no main file>"
1089
+
1090
+ self.program = program
1091
+
1092
+ def update_from_dict(self, settings: dict[str, Any]) -> None:
1093
+ """Update settings from a dictionary."""
1094
+ for key, value in dict(settings).items():
1095
+ if value is not None:
1096
+ setattr(self, key, value)
1097
+
1098
+ def update_from_settings(self, settings: Settings) -> None:
1099
+ """Update settings from another instance of `Settings`."""
1100
+ d = {field: getattr(settings, field) for field in settings.model_fields_set}
1101
+ if d:
1102
+ self.update_from_dict(d)
1103
+
1104
+ # Helper methods.
1105
+
1106
+ def to_proto(self) -> wandb_settings_pb2.Settings:
1107
+ """Generate a protobuf representation of the settings."""
1108
+ settings_proto = wandb_settings_pb2.Settings()
1109
+ for k, v in self.model_dump(exclude_none=True).items():
1110
+ # special case for x_stats_open_metrics_filters
1111
+ if k == "x_stats_open_metrics_filters":
1112
+ if isinstance(v, (list, set, tuple)):
1113
+ setting = getattr(settings_proto, k)
1114
+ setting.sequence.value.extend(v)
1115
+ elif isinstance(v, dict):
1116
+ setting = getattr(settings_proto, k)
1117
+ for key, value in v.items():
1118
+ for kk, vv in value.items():
1119
+ setting.mapping.value[key].value[kk] = vv
1120
+ else:
1121
+ raise TypeError(f"Unsupported type {type(v)} for setting {k}")
1122
+ continue
1123
+
1124
+ if isinstance(v, bool):
1125
+ getattr(settings_proto, k).CopyFrom(BoolValue(value=v))
1126
+ elif isinstance(v, int):
1127
+ getattr(settings_proto, k).CopyFrom(Int32Value(value=v))
1128
+ elif isinstance(v, float):
1129
+ getattr(settings_proto, k).CopyFrom(DoubleValue(value=v))
1130
+ elif isinstance(v, str):
1131
+ getattr(settings_proto, k).CopyFrom(StringValue(value=v))
1132
+ elif isinstance(v, (list, set, tuple)):
1133
+ # we only support sequences of strings for now
1134
+ sequence = getattr(settings_proto, k)
1135
+ sequence.value.extend(v)
1136
+ elif isinstance(v, dict):
1137
+ mapping = getattr(settings_proto, k)
1138
+ for key, value in v.items():
1139
+ # we only support dicts with string values for now
1140
+ mapping.value[key] = value
1141
+ elif isinstance(v, RunMoment):
1142
+ getattr(settings_proto, k).CopyFrom(
1143
+ wandb_settings_pb2.RunMoment(
1144
+ run=v.run,
1145
+ value=v.value,
1146
+ metric=v.metric,
1147
+ )
1148
+ )
1149
+ elif v is None:
1150
+ # None means that the setting value was not set.
1151
+ pass
1152
+ else:
1153
+ raise TypeError(f"Unsupported type {type(v)} for setting {k}")
1154
+
1155
+ return settings_proto
1156
+
1157
+ def handle_resume_logic(self):
1158
+ """Handle logic for resuming runs."""
1159
+ # handle auto resume logic
1160
+ if self.resume == "auto":
1161
+ if os.path.exists(self.resume_fname):
1162
+ with open(self.resume_fname) as f:
1163
+ resume_run_id = json.load(f)["run_id"]
1164
+ if self.run_id is None:
1165
+ self.run_id = resume_run_id
1166
+ elif self.run_id != resume_run_id:
1167
+ wandb.termwarn(
1168
+ "Tried to auto resume run with "
1169
+ f"id {resume_run_id} but id {self.run_id} is set.",
1170
+ )
1171
+ if self.run_id is None:
1172
+ self.run_id = generate_id()
1173
+
1174
+ # persist run_id in case of failure
1175
+ if self.resume == "auto" and self.resume_fname is not None:
1176
+ filesystem.mkdir_exists_ok(self.wandb_dir)
1177
+ with open(self.resume_fname, "w") as f:
1178
+ f.write(json.dumps({"run_id": self.run_id}))
1179
+
1180
+ @staticmethod
1181
+ def validate_url(url: str) -> None:
1182
+ """Validate a URL string."""
1183
+ url_validator = SchemaValidator(
1184
+ core_schema.url_schema(
1185
+ allowed_schemes=["http", "https"],
1186
+ strict=True,
1187
+ )
1188
+ )
1189
+ url_validator.validate_python(url)
1190
+
1191
+ def _get_program(self) -> str | None:
1192
+ """Get the program that started the current process."""
1193
+ if not self._jupyter:
1194
+ # If not in a notebook, try to get the program from the environment
1195
+ # or the __main__ module for scripts run as `python -m ...`.
1196
+ program = os.getenv(env.PROGRAM)
1197
+ if program is not None:
1198
+ return program
1199
+ try:
1200
+ import __main__
1201
+
1202
+ if __main__.__spec__ is None:
1203
+ return __main__.__file__
1204
+ return f"-m {__main__.__spec__.name}"
1205
+ except (ImportError, AttributeError):
1206
+ return None
1207
+ else:
1208
+ # If in a notebook, try to get the program from the notebook metadata.
1209
+ if self.notebook_name:
1210
+ return self.notebook_name
1211
+
1212
+ if not self.x_jupyter_path:
1213
+ return self.program
1214
+
1215
+ if self.x_jupyter_path.startswith("fileId="):
1216
+ return self.x_jupyter_name
1217
+ else:
1218
+ return self.x_jupyter_path
1219
+
1220
+ @staticmethod
1221
+ def _get_program_relpath(program: str, root: str | None = None) -> str | None:
1222
+ """Get the relative path to the program from the root directory."""
1223
+ if not program:
1224
+ return None
1225
+
1226
+ root = root or os.getcwd()
1227
+ if not root:
1228
+ return None
1229
+
1230
+ full_path_to_program = os.path.join(
1231
+ root, os.path.relpath(os.getcwd(), root), program
1232
+ )
1233
+ if os.path.exists(full_path_to_program):
1234
+ relative_path = os.path.relpath(full_path_to_program, start=root)
1235
+ if "../" in relative_path:
1236
+ return None
1237
+ return relative_path
1238
+
1239
+ return None
1240
+
1241
+ @staticmethod
1242
+ def _load_config_file(file_name: str, section: str = "default") -> dict:
1243
+ """Load a config file and return the settings for a given section."""
1244
+ parser = configparser.ConfigParser()
1245
+ parser.add_section(section)
1246
+ parser.read(file_name)
1247
+ config: dict[str, Any] = dict()
1248
+ for k in parser[section]:
1249
+ config[k] = parser[section][k]
1250
+ if k == "ignore_globs":
1251
+ config[k] = config[k].split(",")
1252
+ return config
1253
+
1254
+ def _project_url_base(self) -> str:
1255
+ """Construct the base URL for the project."""
1256
+ if not all([self.entity, self.project]):
1257
+ return ""
1258
+
1259
+ app_url = util.app_url(self.base_url)
1260
+ return f"{app_url}/{quote(self.entity or '')}/{quote(self.project or '')}"
1261
+
1262
+ def _get_url_query_string(self) -> str:
1263
+ """Construct the query string for project, run, and sweep URLs."""
1264
+ # TODO: remove dependency on Api()
1265
+ if Api().settings().get("anonymous") not in ["allow", "must"]:
1266
+ return ""
1267
+
1268
+ api_key = apikey.api_key(settings=self)
1269
+
1270
+ return f"?{urlencode({'apiKey': api_key})}"
1271
+
1272
+ @staticmethod
1273
+ def _runmoment_preprocessor(val: RunMoment | str | None) -> RunMoment | None:
1274
+ """Preprocess the setting for forking or resuming a run."""
1275
+ if isinstance(val, RunMoment) or val is None:
1276
+ return val
1277
+ elif isinstance(val, str):
1278
+ return RunMoment.from_uri(val)
infer_4_33_0/lib/python3.10/site-packages/wandb/sdk/wandb_summary.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import typing as t
3
+
4
+ from .interface.summary_record import SummaryItem, SummaryRecord
5
+
6
+
7
+ def _get_dict(d):
8
+ if isinstance(d, dict):
9
+ return d
10
+ # assume argparse Namespace
11
+ return vars(d)
12
+
13
+
14
+ class SummaryDict(metaclass=abc.ABCMeta):
15
+ """dict-like wrapper for the nested dictionaries in a SummarySubDict.
16
+
17
+ Triggers self._root._callback on property changes.
18
+ """
19
+
20
+ @abc.abstractmethod
21
+ def _as_dict(self):
22
+ raise NotImplementedError
23
+
24
+ @abc.abstractmethod
25
+ def _update(self, record: SummaryRecord):
26
+ raise NotImplementedError
27
+
28
+ def keys(self):
29
+ return [k for k in self._as_dict().keys() if k != "_wandb"]
30
+
31
+ def get(self, key, default=None):
32
+ return self._as_dict().get(key, default)
33
+
34
+ def __getitem__(self, key):
35
+ item = self._as_dict()[key]
36
+
37
+ if isinstance(item, dict):
38
+ # this nested dict needs to be wrapped:
39
+ wrapped_item = SummarySubDict()
40
+ object.__setattr__(wrapped_item, "_items", item)
41
+ object.__setattr__(wrapped_item, "_parent", self)
42
+ object.__setattr__(wrapped_item, "_parent_key", key)
43
+
44
+ return wrapped_item
45
+
46
+ # this item isn't a nested dict
47
+ return item
48
+
49
+ __getattr__ = __getitem__
50
+
51
+ def __setitem__(self, key, val):
52
+ self.update({key: val})
53
+
54
+ __setattr__ = __setitem__
55
+
56
+ def __delattr__(self, key):
57
+ record = SummaryRecord()
58
+ item = SummaryItem()
59
+ item.key = (key,)
60
+ record.remove = (item,)
61
+ self._update(record)
62
+
63
+ __delitem__ = __delattr__
64
+
65
+ def update(self, d: t.Dict):
66
+ # import ipdb; ipdb.set_trace()
67
+ record = SummaryRecord()
68
+ for key, value in d.items():
69
+ item = SummaryItem()
70
+ item.key = (key,)
71
+ item.value = value
72
+ record.update.append(item)
73
+
74
+ self._update(record)
75
+
76
+
77
+ class Summary(SummaryDict):
78
+ """Track single values for each metric for each run.
79
+
80
+ By default, a metric's summary is the last value of its History.
81
+
82
+ For example, `wandb.log({'accuracy': 0.9})` will add a new step to History and
83
+ update Summary to the latest value. In some cases, it's more useful to have
84
+ the maximum or minimum of a metric instead of the final value. You can set
85
+ history manually `(wandb.summary['accuracy'] = best_acc)`.
86
+
87
+ In the UI, summary metrics appear in the table to compare across runs.
88
+ Summary metrics are also used in visualizations like the scatter plot and
89
+ parallel coordinates chart.
90
+
91
+ After training has completed, you may want to save evaluation metrics to a
92
+ run. Summary can handle numpy arrays and PyTorch/TensorFlow tensors. When
93
+ you save one of these types to Summary, we persist the entire tensor in a
94
+ binary file and store high level metrics in the summary object, such as min,
95
+ mean, variance, and 95th percentile.
96
+
97
+ Examples:
98
+ ```python
99
+ wandb.init(config=args)
100
+
101
+ best_accuracy = 0
102
+ for epoch in range(1, args.epochs + 1):
103
+ test_loss, test_accuracy = test()
104
+ if test_accuracy > best_accuracy:
105
+ wandb.run.summary["best_accuracy"] = test_accuracy
106
+ best_accuracy = test_accuracy
107
+ ```
108
+ """
109
+
110
+ _update_callback: t.Callable
111
+ _get_current_summary_callback: t.Callable
112
+
113
+ def __init__(self, get_current_summary_callback: t.Callable):
114
+ super().__init__()
115
+ object.__setattr__(self, "_update_callback", None)
116
+ object.__setattr__(
117
+ self, "_get_current_summary_callback", get_current_summary_callback
118
+ )
119
+
120
+ def _set_update_callback(self, update_callback: t.Callable):
121
+ object.__setattr__(self, "_update_callback", update_callback)
122
+
123
+ def _as_dict(self):
124
+ return self._get_current_summary_callback()
125
+
126
+ def _update(self, record: SummaryRecord):
127
+ if self._update_callback: # type: ignore
128
+ self._update_callback(record)
129
+
130
+
131
+ class SummarySubDict(SummaryDict):
132
+ """Non-root node of the summary data structure.
133
+
134
+ Contains a path to itself from the root.
135
+ """
136
+
137
+ _items: t.Dict
138
+ _parent: SummaryDict
139
+ _parent_key: str
140
+
141
+ def __init__(self):
142
+ object.__setattr__(self, "_items", dict())
143
+ object.__setattr__(self, "_parent", None)
144
+ object.__setattr__(self, "_parent_key", None)
145
+
146
+ def _as_dict(self):
147
+ return self._items
148
+
149
+ def _update(self, record: SummaryRecord):
150
+ return self._parent._update(record._add_next_parent(self._parent_key))