id
int64
1
6.07M
name
stringlengths
1
295
code
stringlengths
12
426k
language
stringclasses
1 value
source_file
stringlengths
5
202
start_line
int64
1
158k
end_line
int64
1
158k
repo
dict
2,401
begin
def begin(self): if wandb.run is None: raise wandb.Error("You must call `wandb.init()` before calling `WandbHook`") if self._summary_op is None: self._summary_op = merge_all_summaries() self._step = -1
python
wandb/integration/tensorflow/estimator_hook.py
35
40
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,402
before_run
def before_run(self, run_context): return SessionRunArgs( {"summary": self._summary_op, "global_step": get_global_step()} )
python
wandb/integration/tensorflow/estimator_hook.py
42
45
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,403
after_run
def after_run(self, run_context, run_values): step = run_values.results["global_step"] if step % self._steps_per_log == 0: wandb.tensorboard._log( run_values.results["summary"], history=self._history, step=step, )
python
wandb/integration/tensorflow/estimator_hook.py
47
54
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,404
make_ndarray
def make_ndarray(tensor: Any) -> Optional["np.ndarray"]: if tensor_util: res = tensor_util.make_ndarray(tensor) # Tensorboard can log generic objects, and we don't want to save them if res.dtype == "object": return None else: return res else: wandb.termwarn( "Can't convert tensor summary, upgrade tensorboard with `pip" " install tensorboard --upgrade`" ) return None
python
wandb/integration/tensorboard/log.py
38
51
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,405
namespaced_tag
def namespaced_tag(tag: str, namespace: str = "") -> str: if not namespace: return tag elif tag in namespace: # This happens with tensorboardX return namespace else: return namespace + "/" + tag
python
wandb/integration/tensorboard/log.py
54
61
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,406
history_image_key
def history_image_key(key: str, namespace: str = "") -> str: """Convert invalid filesystem characters to _ for use in History keys. Unfortunately this means currently certain image keys will collide silently. We implement this mapping up here in the TensorFlow stuff rather than in the History stuff so that we don't have to store a mapping anywhere from the original keys to the safe ones. """ return namespaced_tag(re.sub(r"[/\\]", "_", key), namespace)
python
wandb/integration/tensorboard/log.py
64
72
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,407
tf_summary_to_dict
def tf_summary_to_dict( # noqa: C901 tf_summary_str_or_pb: Any, namespace: str = "" ) -> Optional[Dict[str, Any]]: """Convert a Tensorboard Summary to a dictionary. Accepts a tensorflow.summary.Summary, one encoded as a string, or a list of such encoded as strings. """ values = {} if hasattr(tf_summary_str_or_pb, "summary"): summary_pb = tf_summary_str_or_pb.summary values[namespaced_tag("global_step", namespace)] = tf_summary_str_or_pb.step values["_timestamp"] = tf_summary_str_or_pb.wall_time elif isinstance(tf_summary_str_or_pb, (str, bytes, bytearray)): summary_pb = Summary() summary_pb.ParseFromString(tf_summary_str_or_pb) elif hasattr(tf_summary_str_or_pb, "__iter__"): summary_pb = [Summary() for _ in range(len(tf_summary_str_or_pb))] for i, summary in enumerate(tf_summary_str_or_pb): summary_pb[i].ParseFromString(summary) if i > 0: summary_pb[0].MergeFrom(summary_pb[i]) summary_pb = summary_pb[0] else: summary_pb = tf_summary_str_or_pb if not hasattr(summary_pb, "value") or len(summary_pb.value) == 0: # Ignore these, caller is responsible for handling None return None def encode_images(_img_strs: List[bytes], _value: Any) -> None: try: from PIL import Image except ImportError: wandb.termwarn( "Install pillow if you are logging images with Tensorboard. " "To install, run `pip install pillow`.", repeat=False, ) return None if len(_img_strs) == 0: return None images: List[Union["wandb.Video", "wandb.Image"]] = [] for _img_str in _img_strs: # Supports gifs from TensorboardX if _img_str.startswith(b"GIF"): images.append(wandb.Video(io.BytesIO(_img_str), format="gif")) else: images.append(wandb.Image(Image.open(io.BytesIO(_img_str)))) tag_idx = _value.tag.rsplit("/", 1) if len(tag_idx) > 1 and tag_idx[1].isdigit(): tag, idx = tag_idx values.setdefault(history_image_key(tag, namespace), []).extend(images) else: values[history_image_key(_value.tag, namespace)] = images return None for value in summary_pb.value: kind = value.WhichOneof("value") if kind in IGNORE_KINDS: continue if kind == "simple_value": values[namespaced_tag(value.tag, namespace)] = value.simple_value elif kind == "tensor": plugin_name = value.metadata.plugin_data.plugin_name if plugin_name == "scalars" or plugin_name == "": values[namespaced_tag(value.tag, namespace)] = make_ndarray( value.tensor ) elif plugin_name == "images": img_strs = value.tensor.string_val[2:] # First two items are dims. encode_images(img_strs, value) elif plugin_name == "histograms": # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/summary_v2.py#L15-L26 ndarray = make_ndarray(value.tensor) if ndarray is None: continue shape = ndarray.shape counts = [] bins = [] if shape[0] > 1: bins.append(ndarray[0][0]) # Add the left most edge for v in ndarray: counts.append(v[2]) bins.append(v[1]) # Add the right most edges elif shape[0] == 1: counts = [ndarray[0][2]] bins = ndarray[0][:2] if len(counts) > 0: try: # TODO: we should just re-bin if there are too many buckets values[namespaced_tag(value.tag, namespace)] = wandb.Histogram( np_histogram=(counts, bins) ) except ValueError: wandb.termwarn( 'Not logging key "{}". ' "Histograms must have fewer than {} bins".format( namespaced_tag(value.tag, namespace), wandb.Histogram.MAX_LENGTH, ), repeat=False, ) elif plugin_name == "pr_curves": pr_curve_data = make_ndarray(value.tensor) if pr_curve_data is None: continue precision = pr_curve_data[-2, :].tolist() recall = pr_curve_data[-1, :].tolist() # TODO: (kdg) implement spec for showing additional info in tool tips # true_pos = pr_curve_data[1,:] # false_pos = pr_curve_data[2,:] # true_neg = pr_curve_data[1,:] # false_neg = pr_curve_data[1,:] # threshold = [1.0 / n for n in range(len(true_pos), 0, -1)] # min of each in case tensorboard ever changes their pr_curve # to allow for different length outputs data = [] for i in range(min(len(precision), len(recall))): # drop additional threshold values if they exist if precision[i] != 0 or recall[i] != 0: data.append((recall[i], precision[i])) # sort data so custom chart looks the same as tb generated pr curve # ascending recall, descending precision for the same recall values data = sorted(data, key=lambda x: (x[0], -x[1])) data_table = wandb.Table(data=data, columns=["recall", "precision"]) name = namespaced_tag(value.tag, namespace) values[name] = custom_chart( "wandb/line/v0", data_table, {"x": "recall", "y": "precision"}, {"title": f"{name} Precision v. Recall"}, ) elif kind == "image": img_str = value.image.encoded_image_string encode_images([img_str], value) # Coming soon... # elif kind == "audio": # audio = wandb.Audio( # six.BytesIO(value.audio.encoded_audio_string), # sample_rate=value.audio.sample_rate, # content_type=value.audio.content_type, # ) elif kind == "histo": tag = namespaced_tag(value.tag, namespace) if len(value.histo.bucket_limit) >= 3: first = ( value.histo.bucket_limit[0] + value.histo.bucket_limit[0] - value.histo.bucket_limit[1] ) last = ( value.histo.bucket_limit[-2] + value.histo.bucket_limit[-2] - value.histo.bucket_limit[-3] ) np_histogram = ( list(value.histo.bucket), [first] + value.histo.bucket_limit[:-1] + [last], ) try: # TODO: we should just re-bin if there are too many buckets values[tag] = wandb.Histogram(np_histogram=np_histogram) except ValueError: wandb.termwarn( f"Not logging key {tag!r}. " f"Histograms must have fewer than {wandb.Histogram.MAX_LENGTH} bins", repeat=False, ) else: # TODO: is there a case where we can render this? wandb.termwarn( f"Not logging key {tag!r}. Found a histogram with only 2 bins.", repeat=False, ) # TODO(jhr): figure out how to share this between userspace and internal process or dont # elif value.tag == "_hparams_/session_start_info": # if wandb.util.get_module("tensorboard.plugins.hparams"): # from tensorboard.plugins.hparams import plugin_data_pb2 # # plugin_data = plugin_data_pb2.HParamsPluginData() # # plugin_data.ParseFromString(value.metadata.plugin_data.content) # for key, param in six.iteritems(plugin_data.session_start_info.hparams): # if not wandb.run.config.get(key): # wandb.run.config[key] = ( # param.number_value or param.string_value or param.bool_value # ) # else: # wandb.termerror( # "Received hparams tf.summary, but could not import " # "the hparams plugin from tensorboard" # ) return values
python
wandb/integration/tensorboard/log.py
75
271
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,408
encode_images
def encode_images(_img_strs: List[bytes], _value: Any) -> None: try: from PIL import Image except ImportError: wandb.termwarn( "Install pillow if you are logging images with Tensorboard. " "To install, run `pip install pillow`.", repeat=False, ) return None if len(_img_strs) == 0: return None images: List[Union["wandb.Video", "wandb.Image"]] = [] for _img_str in _img_strs: # Supports gifs from TensorboardX if _img_str.startswith(b"GIF"): images.append(wandb.Video(io.BytesIO(_img_str), format="gif")) else: images.append(wandb.Image(Image.open(io.BytesIO(_img_str)))) tag_idx = _value.tag.rsplit("/", 1) if len(tag_idx) > 1 and tag_idx[1].isdigit(): tag, idx = tag_idx values.setdefault(history_image_key(tag, namespace), []).extend(images) else: values[history_image_key(_value.tag, namespace)] = images return None
python
wandb/integration/tensorboard/log.py
105
133
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,409
reset_state
def reset_state() -> None: """Internal method for resetting state, called by wandb.finish().""" global STEPS STEPS = {"": {"step": 0}, "global": {"step": 0, "last_log": None}}
python
wandb/integration/tensorboard/log.py
274
277
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,410
_log
def _log( tf_summary_str_or_pb: Any, history: Optional["TBHistory"] = None, step: int = 0, namespace: str = "", **kwargs: Any, ) -> None: """Logs a tfsummary to wandb. Can accept a tf summary string or parsed event. Will use wandb.run.history unless a history object is passed. Can optionally namespace events. Results are committed when step increases for this namespace. NOTE: This assumes that events being passed in are in chronological order """ global STEPS global RATE_LIMIT_SECONDS # To handle multiple global_steps, we keep track of them here instead # of the global log last_step = STEPS.get(namespace, {"step": 0}) # Commit our existing data if this namespace increased its step commit = False if last_step["step"] < step: commit = True log_dict = tf_summary_to_dict(tf_summary_str_or_pb, namespace) if log_dict is None: # not an event, just return return # Pass timestamp to history for loading historic data timestamp = log_dict.get("_timestamp", time.time()) # Store our initial timestamp if STEPS["global"]["last_log"] is None: STEPS["global"]["last_log"] = timestamp # Rollup events that share the same step across namespaces if commit and step == STEPS["global"]["step"]: commit = False # Always add the biggest global_step key for non-default namespaces if step > STEPS["global"]["step"]: STEPS["global"]["step"] = step if namespace != "": log_dict["global_step"] = STEPS["global"]["step"] # Keep internal step counter STEPS[namespace] = {"step": step} if commit: # Only commit our data if we're below the rate limit or don't have one if ( RATE_LIMIT_SECONDS is None or timestamp - STEPS["global"]["last_log"] >= RATE_LIMIT_SECONDS ): if history is None: if wandb.run is not None: wandb.run._log({}) else: history.add({}) STEPS["global"]["last_log"] = timestamp if history is None: if wandb.run is not None: wandb.run._log(log_dict, commit=False) else: history._row_update(log_dict)
python
wandb/integration/tensorboard/log.py
280
346
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,411
log
def log(tf_summary_str_or_pb: Any, step: int = 0, namespace: str = "") -> None: if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` before calling `wandb.tensorflow.log`" ) with telemetry.context() as tel: tel.feature.tensorboard_log = True _log(tf_summary_str_or_pb, namespace=namespace, step=step)
python
wandb/integration/tensorboard/log.py
349
358
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,412
unpatch
def unpatch() -> None: for module, method in wandb.patched["tensorboard"]: writer = wandb.util.get_module(module, lazy=False) setattr(writer, method, getattr(writer, f"orig_{method}")) wandb.patched["tensorboard"] = []
python
wandb/integration/tensorboard/monkeypatch.py
18
22
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,413
patch
def patch( save: bool = True, tensorboard_x: Optional[bool] = None, pytorch: Optional[bool] = None, root_logdir: str = "", ) -> None: if len(wandb.patched["tensorboard"]) > 0: raise ValueError( "Tensorboard already patched, remove `sync_tensorboard=True` " "from `wandb.init` or only call `wandb.tensorboard.patch` once." ) # TODO: Some older versions of tensorflow don't require tensorboard to be present. # we may want to lift this requirement, but it's safer to have it for now wandb.util.get_module( "tensorboard", required="Please install tensorboard package", lazy=False ) c_writer = wandb.util.get_module(TENSORBOARD_C_MODULE, lazy=False) py_writer = wandb.util.get_module(TENSORFLOW_PY_MODULE, lazy=False) tb_writer = wandb.util.get_module(TENSORBOARD_WRITER_MODULE, lazy=False) pt_writer = wandb.util.get_module(TENSORBOARD_PYTORCH_MODULE, lazy=False) tbx_writer = wandb.util.get_module(TENSORBOARD_X_MODULE, lazy=False) if not pytorch and not tensorboard_x and c_writer: _patch_tensorflow2( writer=c_writer, module=TENSORBOARD_C_MODULE, save=save, root_logdir=root_logdir, ) # This is for tensorflow <= 1.15 (tf.compat.v1.summary.FileWriter) if py_writer: _patch_file_writer( writer=py_writer, module=TENSORFLOW_PY_MODULE, save=save, root_logdir=root_logdir, ) if tb_writer: _patch_file_writer( writer=tb_writer, module=TENSORBOARD_WRITER_MODULE, save=save, root_logdir=root_logdir, ) if pt_writer: _patch_file_writer( writer=pt_writer, module=TENSORBOARD_PYTORCH_MODULE, save=save, root_logdir=root_logdir, ) if tbx_writer: _patch_file_writer( writer=tbx_writer, module=TENSORBOARD_X_MODULE, save=save, root_logdir=root_logdir, ) if not c_writer and not tb_writer and not tb_writer: wandb.termerror("Unsupported tensorboard configuration")
python
wandb/integration/tensorboard/monkeypatch.py
25
85
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,414
_patch_tensorflow2
def _patch_tensorflow2( writer: Any, module: Any, save: bool = True, root_logdir: str = "", ) -> None: # This configures TensorFlow 2 style Tensorboard logging old_csfw_func = writer.create_summary_file_writer logdir_hist = [] def new_csfw_func(*args: Any, **kwargs: Any) -> Any: logdir = ( kwargs["logdir"].numpy().decode("utf8") if hasattr(kwargs["logdir"], "numpy") else kwargs["logdir"] ) logdir_hist.append(logdir) root_logdir_arg = root_logdir if len(set(logdir_hist)) > 1 and root_logdir == "": wandb.termwarn( "When using several event log directories, " 'please call `wandb.tensorboard.patch(root_logdir="...")` before `wandb.init`' ) # if the logdir contains the hostname, the writer was not given a logdir. # In this case, the generated logdir # is generated and ends with the hostname, update the root_logdir to match. hostname = socket.gethostname() search = re.search(rf"-\d+_{hostname}", logdir) if search: root_logdir_arg = logdir[: search.span()[1]] elif root_logdir is not None and not os.path.abspath(logdir).startswith( os.path.abspath(root_logdir) ): wandb.termwarn( "Found log directory outside of given root_logdir, " f"dropping given root_logdir for event file in {logdir}" ) root_logdir_arg = "" _notify_tensorboard_logdir(logdir, save=save, root_logdir=root_logdir_arg) return old_csfw_func(*args, **kwargs) writer.orig_create_summary_file_writer = old_csfw_func writer.create_summary_file_writer = new_csfw_func wandb.patched["tensorboard"].append([module, "create_summary_file_writer"])
python
wandb/integration/tensorboard/monkeypatch.py
88
133
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,415
new_csfw_func
def new_csfw_func(*args: Any, **kwargs: Any) -> Any: logdir = ( kwargs["logdir"].numpy().decode("utf8") if hasattr(kwargs["logdir"], "numpy") else kwargs["logdir"] ) logdir_hist.append(logdir) root_logdir_arg = root_logdir if len(set(logdir_hist)) > 1 and root_logdir == "": wandb.termwarn( "When using several event log directories, " 'please call `wandb.tensorboard.patch(root_logdir="...")` before `wandb.init`' ) # if the logdir contains the hostname, the writer was not given a logdir. # In this case, the generated logdir # is generated and ends with the hostname, update the root_logdir to match. hostname = socket.gethostname() search = re.search(rf"-\d+_{hostname}", logdir) if search: root_logdir_arg = logdir[: search.span()[1]] elif root_logdir is not None and not os.path.abspath(logdir).startswith( os.path.abspath(root_logdir) ): wandb.termwarn( "Found log directory outside of given root_logdir, " f"dropping given root_logdir for event file in {logdir}" ) root_logdir_arg = "" _notify_tensorboard_logdir(logdir, save=save, root_logdir=root_logdir_arg) return old_csfw_func(*args, **kwargs)
python
wandb/integration/tensorboard/monkeypatch.py
98
129
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,416
_patch_file_writer
def _patch_file_writer( writer: Any, module: Any, save: bool = True, root_logdir: str = "", ) -> None: # This configures non-TensorFlow Tensorboard logging, or tensorflow <= 1.15 logdir_hist = [] class TBXEventFileWriter(writer.EventFileWriter): def __init__(self, logdir: str, *args: Any, **kwargs: Any) -> None: logdir_hist.append(logdir) root_logdir_arg = root_logdir if len(set(logdir_hist)) > 1 and root_logdir == "": wandb.termwarn( "When using several event log directories, " 'please call `wandb.tensorboard.patch(root_logdir="...")` before `wandb.init`' ) # if the logdir contains the hostname, the writer was not given a logdir. # In this case, the logdir is generated and ends with the hostname, # update the root_logdir to match. hostname = socket.gethostname() search = re.search(rf"-\d+_{hostname}", logdir) if search: root_logdir_arg = logdir[: search.span()[1]] elif root_logdir is not None and not os.path.abspath(logdir).startswith( os.path.abspath(root_logdir) ): wandb.termwarn( "Found log directory outside of given root_logdir, " f"dropping given root_logdir for event file in {logdir}" ) root_logdir_arg = "" _notify_tensorboard_logdir(logdir, save=save, root_logdir=root_logdir_arg) super().__init__(logdir, *args, **kwargs) writer.orig_EventFileWriter = writer.EventFileWriter writer.EventFileWriter = TBXEventFileWriter wandb.patched["tensorboard"].append([module, "EventFileWriter"])
python
wandb/integration/tensorboard/monkeypatch.py
136
178
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,417
__init__
def __init__(self, logdir: str, *args: Any, **kwargs: Any) -> None: logdir_hist.append(logdir) root_logdir_arg = root_logdir if len(set(logdir_hist)) > 1 and root_logdir == "": wandb.termwarn( "When using several event log directories, " 'please call `wandb.tensorboard.patch(root_logdir="...")` before `wandb.init`' ) # if the logdir contains the hostname, the writer was not given a logdir. # In this case, the logdir is generated and ends with the hostname, # update the root_logdir to match. hostname = socket.gethostname() search = re.search(rf"-\d+_{hostname}", logdir) if search: root_logdir_arg = logdir[: search.span()[1]] elif root_logdir is not None and not os.path.abspath(logdir).startswith( os.path.abspath(root_logdir) ): wandb.termwarn( "Found log directory outside of given root_logdir, " f"dropping given root_logdir for event file in {logdir}" ) root_logdir_arg = "" _notify_tensorboard_logdir(logdir, save=save, root_logdir=root_logdir_arg) super().__init__(logdir, *args, **kwargs)
python
wandb/integration/tensorboard/monkeypatch.py
146
174
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,418
_notify_tensorboard_logdir
def _notify_tensorboard_logdir( logdir: str, save: bool = True, root_logdir: str = "" ) -> None: if wandb.run is not None: wandb.run._tensorboard_callback(logdir, save=save, root_logdir=root_logdir)
python
wandb/integration/tensorboard/monkeypatch.py
181
185
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,419
monitor
def monitor(): """Monitor a gym environment. Supports both gym and gymnasium. """ gym_lib: Optional[GymLib] = None # gym is not maintained anymore, gymnasium is the drop-in replacement - prefer it if wandb.util.get_module("gymnasium") is not None: gym_lib = "gymnasium" elif wandb.util.get_module("gym") is not None: gym_lib = "gym" if gym_lib is None: raise wandb.Error(_required_error_msg) vcr = wandb.util.get_module( f"{gym_lib}.wrappers.monitoring.video_recorder", required=_required_error_msg, ) global _gym_version_lt_0_26 if _gym_version_lt_0_26 is None: if gym_lib == "gym": import gym else: import gymnasium as gym # type: ignore from pkg_resources import parse_version if parse_version(gym.__version__) < parse_version("0.26.0"): _gym_version_lt_0_26 = True else: _gym_version_lt_0_26 = False # breaking change in gym 0.26.0 vcr_recorder_attribute = "ImageEncoder" if _gym_version_lt_0_26 else "VideoRecorder" recorder = getattr(vcr, vcr_recorder_attribute) path = "output_path" if _gym_version_lt_0_26 else "path" recorder.orig_close = recorder.close def close(self): recorder.orig_close(self) m = re.match(r".+(video\.\d+).+", getattr(self, path)) if m: key = m.group(1) else: key = "videos" wandb.log({key: wandb.Video(getattr(self, path))}) def del_(self): self.orig_close() if not _gym_version_lt_0_26: recorder.__del__ = del_ recorder.close = close wandb.patched["gym"].append( [ f"{gym_lib}.wrappers.monitoring.video_recorder.{vcr_recorder_attribute}", "close", ] )
python
wandb/integration/gym/__init__.py
22
84
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,420
close
def close(self): recorder.orig_close(self) m = re.match(r".+(video\.\d+).+", getattr(self, path)) if m: key = m.group(1) else: key = "videos" wandb.log({key: wandb.Video(getattr(self, path))})
python
wandb/integration/gym/__init__.py
64
71
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,421
del_
def del_(self): self.orig_close()
python
wandb/integration/gym/__init__.py
73
74
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,422
named_entity
def named_entity(docs): """Create a named entity visualization. Taken from https://github.com/wandb/wandb/blob/main/wandb/plots/named_entity.py. """ spacy = util.get_module( "spacy", required="part_of_speech requires the spacy library, install with `pip install spacy`", ) util.get_module( "en_core_web_md", required="part_of_speech requires `en_core_web_md` library, install with `python -m spacy download en_core_web_md`", ) # Test for required packages and missing & non-integer values in docs data if test_missing(docs=docs): html = spacy.displacy.render( docs, style="ent", page=True, minify=True, jupyter=False ) wandb_html = wandb.Html(html) return wandb_html
python
wandb/integration/prodigy/prodigy.py
33
54
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,423
merge
def merge(dict1, dict2): """Return a new dictionary by merging two dictionaries recursively.""" result = deepcopy(dict1) for key, value in dict2.items(): if isinstance(value, collections.abc.Mapping): result[key] = merge(result.get(key, {}), value) else: result[key] = deepcopy(dict2[key]) return result
python
wandb/integration/prodigy/prodigy.py
57
67
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,424
get_schema
def get_schema(list_data_dict, struct, array_dict_types): """Get a schema of the dataset's structure and data types.""" # Get the structure of the JSON objects in the database # This is similar to getting a JSON schema but with slightly different format for _i, item in enumerate(list_data_dict): # If the list contains dict objects for k, v in item.items(): # Check if key already exists in template if k not in struct.keys(): if isinstance(v, list): if len(v) > 0 and isinstance(v[0], list): # nested list structure struct[k] = type(v) # type list elif len(v) > 0 and not ( isinstance(v[0], list) or isinstance(v[0], dict) ): # list of singular values struct[k] = type(v) # type list else: # list of dicts array_dict_types.append( k ) # keep track of keys that are type list[dict] struct[k] = {} struct[k] = get_schema(v, struct[k], array_dict_types) elif isinstance(v, dict): struct[k] = {} struct[k] = get_schema([v], struct[k], array_dict_types) else: struct[k] = type(v) else: # Get the value of struct[k] which is the current template # Find new keys and then merge the two templates together cur_struct = struct[k] if isinstance(v, list): if len(v) > 0 and isinstance(v[0], list): # nested list coordinate structure # if the value in the item is currently None, then update if v is not None: struct[k] = type(v) # type list elif len(v) > 0 and not ( isinstance(v[0], list) or isinstance(v[0], dict) ): # single list with values # if the value in the item is currently None, then update if v is not None: struct[k] = type(v) # type list else: array_dict_types.append( k ) # keep track of keys that are type list[dict] struct[k] = {} struct[k] = get_schema(v, struct[k], array_dict_types) # merge cur_struct and struct[k], remove duplicates struct[k] = merge(struct[k], cur_struct) elif isinstance(v, dict): struct[k] = {} struct[k] = get_schema([v], struct[k], array_dict_types) # merge cur_struct and struct[k], remove duplicates struct[k] = merge(struct[k], cur_struct) else: # if the value in the item is currently None, then update if v is not None: struct[k] = type(v) return struct
python
wandb/integration/prodigy/prodigy.py
70
135
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,425
standardize
def standardize(item, structure, array_dict_types): """Standardize all rows/entries in dataset to fit the schema. Will look for missing values and fill it in so all rows have the same items and structure. """ for k, v in structure.items(): if k not in item: # If the structure/field does not exist if isinstance(v, dict) and (k not in array_dict_types): # If key k is of type dict, and not not a type list[dict] item[k] = {} standardize(item[k], v, array_dict_types) elif isinstance(v, dict) and (k in array_dict_types): # If key k is of type dict, and is actually of type list[dict], # just treat as a list and set to None by default item[k] = None else: # Assign a default type item[k] = v() else: # If the structure/field already exists and is a list or dict if isinstance(item[k], list): # ignore if item is a nested list structure or list of non-dicts condition = ( not (len(item[k]) > 0 and isinstance(item[k][0], list)) ) and ( not ( len(item[k]) > 0 and not ( isinstance(item[k][0], list) or isinstance(item[k][0], dict) ) ) ) if condition: for sub_item in item[k]: standardize(sub_item, v, array_dict_types) elif isinstance(item[k], dict): standardize(item[k], v, array_dict_types)
python
wandb/integration/prodigy/prodigy.py
138
176
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,426
create_table
def create_table(data): """Create a W&B Table. - Create/decode images from URL/Base64 - Uses spacy to translate NER span data to visualizations. """ # create table object from columns table_df = pd.DataFrame(data) columns = list(table_df.columns) if ("spans" in table_df.columns) and ("text" in table_df.columns): columns.append("spans_visual") if "image" in columns: columns.append("image_visual") main_table = wandb.Table(columns=columns) # Convert to dictionary format to maintain order during processing matrix = table_df.to_dict(orient="records") # Import en_core_web_md if exists en_core_web_md = util.get_module( "en_core_web_md", required="part_of_speech requires `en_core_web_md` library, install with `python -m spacy download en_core_web_md`", ) nlp = en_core_web_md.load(disable=["ner"]) # Go through each individual row for _i, document in enumerate(matrix): # Text NER span visualizations if ("spans_visual" in columns) and ("text" in columns): # Add visuals for spans document["spans_visual"] = None doc = nlp(document["text"]) ents = [] if ("spans" in document) and (document["spans"] is not None): for span in document["spans"]: if ("start" in span) and ("end" in span) and ("label" in span): charspan = doc.char_span( span["start"], span["end"], span["label"] ) ents.append(charspan) doc.ents = ents document["spans_visual"] = named_entity(docs=doc) # Convert image link to wandb Image if "image" in columns: # Turn into wandb image document["image_visual"] = None if ("image" in document) and (document["image"] is not None): isurl = urllib.parse.urlparse(document["image"]).scheme in ( "http", "https", ) isbase64 = ("data:" in document["image"]) and ( ";base64" in document["image"] ) if isurl: # is url try: im = Image.open(urllib.request.urlopen(document["image"])) document["image_visual"] = wandb.Image(im) except urllib.error.URLError: print( "Warning: Image URL " + str(document["image"]) + " is invalid." ) document["image_visual"] = None elif isbase64: # is base64 uri imgb64 = document["image"].split("base64,")[1] try: msg = base64.b64decode(imgb64) buf = io.BytesIO(msg) im = Image.open(buf) document["image_visual"] = wandb.Image(im) except base64.binascii.Error: print( "Warning: Base64 string " + str(document["image"]) + " is invalid." ) document["image_visual"] = None else: # is data path document["image_visual"] = wandb.Image(document["image"]) # Create row and append to table values_list = list(document.values()) main_table.add_data(*values_list) return main_table
python
wandb/integration/prodigy/prodigy.py
179
268
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,427
upload_dataset
def upload_dataset(dataset_name): """Upload dataset from local database to Weights & Biases. Args: dataset_name: The name of the dataset in the Prodigy database. """ # Check if wandb.init has been called if wandb.run is None: raise ValueError("You must call wandb.init() before upload_dataset()") with wb_telemetry.context(run=wandb.run) as tel: tel.feature.prodigy = True prodigy_db = util.get_module( "prodigy.components.db", required="`prodigy` library is required but not installed. Please see https://prodi.gy/docs/install", ) # Retrieve and upload prodigy dataset database = prodigy_db.connect() data = database.get_dataset(dataset_name) array_dict_types = [] schema = get_schema(data, {}, array_dict_types) for i, _d in enumerate(data): standardize(data[i], schema, array_dict_types) table = create_table(data) wandb.log({dataset_name: table}) print("Prodigy dataset `" + dataset_name + "` uploaded.")
python
wandb/integration/prodigy/prodigy.py
271
299
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,428
_wandb_use
def _wandb_use(name: str, data: pd.DataFrame, datasets=False, run=None, testing=False, *args, **kwargs): # type: ignore if testing: return "datasets" if datasets else None if datasets: run.use_artifact(f"{name}:latest") wandb.termlog(f"Using artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
39
45
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,429
wandb_track
def wandb_track( name: str, data: pd.DataFrame, datasets=False, run=None, testing=False, *args, **kwargs, ): if testing: return "pd.DataFrame" if datasets else None if datasets: artifact = wandb.Artifact(name, type="dataset") with artifact.new_file(f"{name}.parquet", "wb") as f: data.to_parquet(f, engine="pyarrow") run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
48
65
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,430
_wandb_use
def _wandb_use(name: str, data: nn.Module, models=False, run=None, testing=False, *args, **kwargs): # type: ignore if testing: return "models" if models else None if models: run.use_artifact(f"{name}:latest") wandb.termlog(f"Using artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
77
83
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,431
wandb_track
def wandb_track( name: str, data: nn.Module, models=False, run=None, testing=False, *args, **kwargs, ): if testing: return "nn.Module" if models else None if models: artifact = wandb.Artifact(name, type="model") with artifact.new_file(f"{name}.pkl", "wb") as f: torch.save(data, f) run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
86
103
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,432
_wandb_use
def _wandb_use(name: str, data: BaseEstimator, models=False, run=None, testing=False, *args, **kwargs): # type: ignore if testing: return "models" if models else None if models: run.use_artifact(f"{name}:latest") wandb.termlog(f"Using artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
114
120
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,433
wandb_track
def wandb_track( name: str, data: BaseEstimator, models=False, run=None, testing=False, *args, **kwargs, ): if testing: return "BaseEstimator" if models else None if models: artifact = wandb.Artifact(name, type="model") with artifact.new_file(f"{name}.pkl", "wb") as f: pickle.dump(data, f) run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
123
140
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,434
__init__
def __init__(self, flow): # do this to avoid recursion problem with __setattr__ self.__dict__.update( { "flow": flow, "inputs": {}, "outputs": {}, "base": set(dir(flow)), "params": {p: getattr(flow, p) for p in current.parameter_names}, } )
python
wandb/integration/metaflow/metaflow.py
149
159
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,435
__setattr__
def __setattr__(self, key, val): self.outputs[key] = val return setattr(self.flow, key, val)
python
wandb/integration/metaflow/metaflow.py
161
163
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,436
__getattr__
def __getattr__(self, key): if key not in self.base and key not in self.outputs: self.inputs[key] = getattr(self.flow, key) return getattr(self.flow, key)
python
wandb/integration/metaflow/metaflow.py
165
168
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,437
wandb_track
def wandb_track(name: str, data: (dict, list, set, str, int, float, bool), run=None, testing=False, *args, **kwargs): # type: ignore if testing: return "scalar" run.log({name: data})
python
wandb/integration/metaflow/metaflow.py
172
176
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,438
wandb_track
def wandb_track( name: str, data: Path, datasets=False, run=None, testing=False, *args, **kwargs ): if testing: return "Path" if datasets else None if datasets: artifact = wandb.Artifact(name, type="dataset") if data.is_dir(): artifact.add_dir(data) elif data.is_file(): artifact.add_file(data) run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
180
193
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,439
wandb_track
def wandb_track( name: str, data, others=False, run=None, testing=False, *args, **kwargs ): if testing: return "generic" if others else None if others: artifact = wandb.Artifact(name, type="other") with artifact.new_file(f"{name}.pkl", "wb") as f: pickle.dump(data, f) run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
198
209
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,440
wandb_use
def wandb_use(name: str, data, *args, **kwargs): try: return _wandb_use(name, data, *args, **kwargs) except wandb.CommError: print( f"This artifact ({name}, {type(data)}) does not exist in the wandb datastore!" f"If you created an instance inline (e.g. sklearn.ensemble.RandomForestClassifier), then you can safely ignore this" f"Otherwise you may want to check your internet connection!" )
python
wandb/integration/metaflow/metaflow.py
213
221
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,441
wandb_use
def wandb_use(name: str, data: (dict, list, set, str, int, float, bool), *args, **kwargs): # type: ignore pass # do nothing for these types
python
wandb/integration/metaflow/metaflow.py
225
226
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,442
_wandb_use
def _wandb_use(name: str, data: Path, datasets=False, run=None, testing=False, *args, **kwargs): # type: ignore if testing: return "datasets" if datasets else None if datasets: run.use_artifact(f"{name}:latest") wandb.termlog(f"Using artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
230
236
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,443
_wandb_use
def _wandb_use(name: str, data, others=False, run=None, testing=False, *args, **kwargs): # type: ignore if testing: return "others" if others else None if others: run.use_artifact(f"{name}:latest") wandb.termlog(f"Using artifact: {name} ({type(data)})")
python
wandb/integration/metaflow/metaflow.py
240
246
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,444
coalesce
def coalesce(*arg): return next((a for a in arg if a is not None), None)
python
wandb/integration/metaflow/metaflow.py
249
250
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,445
wandb_log
def wandb_log( func=None, # /, # py38 only datasets=False, models=False, others=False, settings=None, ): """Automatically log parameters and artifacts to W&B by type dispatch. This decorator can be applied to a flow, step, or both. - Decorating a step will enable or disable logging for certain types within that step - Decorating the flow is equivalent to decorating all steps with a default - Decorating a step after decorating the flow will overwrite the flow decoration Arguments: func: (`Callable`). The method or class being decorated (if decorating a step or flow respectively). datasets: (`bool`). If `True`, log datasets. Datasets can be a `pd.DataFrame` or `pathlib.Path`. The default value is `False`, so datasets are not logged. models: (`bool`). If `True`, log models. Models can be a `nn.Module` or `sklearn.base.BaseEstimator`. The default value is `False`, so models are not logged. others: (`bool`). If `True`, log anything pickle-able. The default value is `False`, so files are not logged. settings: (`wandb.sdk.wandb_settings.Settings`). Custom settings passed to `wandb.init`. The default value is `None`, and is the same as passing `wandb.Settings()`. If `settings.run_group` is `None`, it will be set to `{flow_name}/{run_id}. If `settings.run_job_type` is `None`, it will be set to `{run_job_type}/{step_name}` """ @wraps(func) def decorator(func): # If you decorate a class, apply the decoration to all methods in that class if inspect.isclass(func): cls = func for attr in cls.__dict__: if callable(getattr(cls, attr)): if not hasattr(attr, "_base_func"): setattr(cls, attr, decorator(getattr(cls, attr))) return cls # prefer the earliest decoration (i.e. method decoration overrides class decoration) if hasattr(func, "_base_func"): return func @wraps(func) def wrapper(self, *args, settings=settings, **kwargs): if not isinstance(settings, wandb.sdk.wandb_settings.Settings): settings = wandb.Settings() settings.update( run_group=coalesce( settings.run_group, f"{current.flow_name}/{current.run_id}" ), source=wandb.sdk.wandb_settings.Source.INIT, ) settings.update( run_job_type=coalesce(settings.run_job_type, current.step_name), source=wandb.sdk.wandb_settings.Source.INIT, ) with wandb.init(settings=settings) as run: with wb_telemetry.context(run=run) as tel: tel.feature.metaflow = True proxy = ArtifactProxy(self) run.config.update(proxy.params) func(proxy, *args, **kwargs) for name, data in proxy.inputs.items(): wandb_use( name, data, datasets=datasets, models=models, others=others, run=run, ) for name, data in proxy.outputs.items(): wandb_track( name, data, datasets=datasets, models=models, others=others, run=run, ) wrapper._base_func = func # Add for testing visibility wrapper._kwargs = { "datasets": datasets, "models": models, "others": others, "settings": settings, } return wrapper if func is None: return decorator else: return decorator(func)
python
wandb/integration/metaflow/metaflow.py
253
348
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,446
decorator
def decorator(func): # If you decorate a class, apply the decoration to all methods in that class if inspect.isclass(func): cls = func for attr in cls.__dict__: if callable(getattr(cls, attr)): if not hasattr(attr, "_base_func"): setattr(cls, attr, decorator(getattr(cls, attr))) return cls # prefer the earliest decoration (i.e. method decoration overrides class decoration) if hasattr(func, "_base_func"): return func @wraps(func) def wrapper(self, *args, settings=settings, **kwargs): if not isinstance(settings, wandb.sdk.wandb_settings.Settings): settings = wandb.Settings() settings.update( run_group=coalesce( settings.run_group, f"{current.flow_name}/{current.run_id}" ), source=wandb.sdk.wandb_settings.Source.INIT, ) settings.update( run_job_type=coalesce(settings.run_job_type, current.step_name), source=wandb.sdk.wandb_settings.Source.INIT, ) with wandb.init(settings=settings) as run: with wb_telemetry.context(run=run) as tel: tel.feature.metaflow = True proxy = ArtifactProxy(self) run.config.update(proxy.params) func(proxy, *args, **kwargs) for name, data in proxy.inputs.items(): wandb_use( name, data, datasets=datasets, models=models, others=others, run=run, ) for name, data in proxy.outputs.items(): wandb_track( name, data, datasets=datasets, models=models, others=others, run=run, ) wrapper._base_func = func # Add for testing visibility wrapper._kwargs = { "datasets": datasets, "models": models, "others": others, "settings": settings, } return wrapper
python
wandb/integration/metaflow/metaflow.py
277
343
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,447
wrapper
def wrapper(self, *args, settings=settings, **kwargs): if not isinstance(settings, wandb.sdk.wandb_settings.Settings): settings = wandb.Settings() settings.update( run_group=coalesce( settings.run_group, f"{current.flow_name}/{current.run_id}" ), source=wandb.sdk.wandb_settings.Source.INIT, ) settings.update( run_job_type=coalesce(settings.run_job_type, current.step_name), source=wandb.sdk.wandb_settings.Source.INIT, ) with wandb.init(settings=settings) as run: with wb_telemetry.context(run=run) as tel: tel.feature.metaflow = True proxy = ArtifactProxy(self) run.config.update(proxy.params) func(proxy, *args, **kwargs) for name, data in proxy.inputs.items(): wandb_use( name, data, datasets=datasets, models=models, others=others, run=run, ) for name, data in proxy.outputs.items(): wandb_track( name, data, datasets=datasets, models=models, others=others, run=run, )
python
wandb/integration/metaflow/metaflow.py
292
332
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,448
_define_metric
def _define_metric(data: str, metric_name: str) -> None: """Capture model performance at the best step. instead of the last step, of training in your `wandb.summary` """ if "loss" in str.lower(metric_name): wandb.define_metric(f"{data}_{metric_name}", summary="min") elif str.lower(metric_name) in MINIMIZE_METRICS: wandb.define_metric(f"{data}_{metric_name}", summary="min") elif str.lower(metric_name) in MAXIMIZE_METRICS: wandb.define_metric(f"{data}_{metric_name}", summary="max")
python
wandb/integration/lightgbm/__init__.py
53
63
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,449
_checkpoint_artifact
def _checkpoint_artifact( model: "Booster", iteration: int, aliases: "List[str]" ) -> None: """Upload model checkpoint as W&B artifact.""" # NOTE: type ignore required because wandb.run is improperly inferred as None type model_name = f"model_{wandb.run.id}" # type: ignore model_path = Path(wandb.run.dir) / f"model_ckpt_{iteration}.txt" # type: ignore model.save_model(model_path, num_iteration=iteration) model_artifact = wandb.Artifact(name=model_name, type="model") model_artifact.add_file(model_path) wandb.log_artifact(model_artifact, aliases=aliases)
python
wandb/integration/lightgbm/__init__.py
66
78
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,450
_log_feature_importance
def _log_feature_importance(model: "Booster") -> None: """Log feature importance.""" feat_imps = model.feature_importance() feats = model.feature_name() fi_data = [[feat, feat_imp] for feat, feat_imp in zip(feats, feat_imps)] table = wandb.Table(data=fi_data, columns=["Feature", "Importance"]) wandb.log( { "Feature Importance": wandb.plot.bar( table, "Feature", "Importance", title="Feature Importance" ) }, commit=False, )
python
wandb/integration/lightgbm/__init__.py
81
94
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,451
wandb_callback
def wandb_callback(log_params: bool = True, define_metric: bool = True) -> Callable: """Automatically integrates LightGBM with wandb. Arguments: log_params: (boolean) if True (default) logs params passed to lightgbm.train as W&B config define_metric: (boolean) if True (default) capture model performance at the best step, instead of the last step, of training in your `wandb.summary` Passing `wandb_callback` to LightGBM will: - log params passed to lightgbm.train as W&B config (default). - log evaluation metrics collected by LightGBM, such as rmse, accuracy etc to Weights & Biases - Capture the best metric in `wandb.summary` when `define_metric=True` (default). Use `log_summary` as an extension of this callback. Example: ```python params = { "boosting_type": "gbdt", "objective": "regression", } gbm = lgb.train( params, lgb_train, num_boost_round=10, valid_sets=lgb_eval, valid_names=("validation"), callbacks=[wandb_callback()], ) ``` """ log_params_list: "List[bool]" = [log_params] define_metric_list: "List[bool]" = [define_metric] def _init(env: "CallbackEnv") -> None: with wb_telemetry.context() as tel: tel.feature.lightgbm_wandb_callback = True wandb.config.update(env.params) log_params_list[0] = False if define_metric_list[0]: for i in range(len(env.evaluation_result_list)): data_type = env.evaluation_result_list[i][0] metric_name = env.evaluation_result_list[i][1] _define_metric(data_type, metric_name) def _callback(env: "CallbackEnv") -> None: if log_params_list[0]: _init(env) eval_results: "Dict[str, Dict[str, List[Any]]]" = {} recorder = lightgbm.record_evaluation(eval_results) recorder(env) for validation_key in eval_results.keys(): for key in eval_results[validation_key].keys(): wandb.log( {validation_key + "_" + key: eval_results[validation_key][key][0]}, commit=False, ) # Previous log statements use commit=False. This commits them. wandb.log({"iteration": env.iteration}, commit=True) return _callback
python
wandb/integration/lightgbm/__init__.py
97
161
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,452
_init
def _init(env: "CallbackEnv") -> None: with wb_telemetry.context() as tel: tel.feature.lightgbm_wandb_callback = True wandb.config.update(env.params) log_params_list[0] = False if define_metric_list[0]: for i in range(len(env.evaluation_result_list)): data_type = env.evaluation_result_list[i][0] metric_name = env.evaluation_result_list[i][1] _define_metric(data_type, metric_name)
python
wandb/integration/lightgbm/__init__.py
130
141
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,453
_callback
def _callback(env: "CallbackEnv") -> None: if log_params_list[0]: _init(env) eval_results: "Dict[str, Dict[str, List[Any]]]" = {} recorder = lightgbm.record_evaluation(eval_results) recorder(env) for validation_key in eval_results.keys(): for key in eval_results[validation_key].keys(): wandb.log( {validation_key + "_" + key: eval_results[validation_key][key][0]}, commit=False, ) # Previous log statements use commit=False. This commits them. wandb.log({"iteration": env.iteration}, commit=True)
python
wandb/integration/lightgbm/__init__.py
143
159
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,454
log_summary
def log_summary( model: Booster, feature_importance: bool = True, save_model_checkpoint: bool = False ) -> None: """Log useful metrics about lightgbm model after training is done. Arguments: model: (Booster) is an instance of lightgbm.basic.Booster. feature_importance: (boolean) if True (default), logs the feature importance plot. save_model_checkpoint: (boolean) if True saves the best model and upload as W&B artifacts. Using this along with `wandb_callback` will: - log `best_iteration` and `best_score` as `wandb.summary`. - log feature importance plot. - save and upload your best trained model to Weights & Biases Artifacts (when `save_model_checkpoint = True`) Example: ```python params = { "boosting_type": "gbdt", "objective": "regression", } gbm = lgb.train( params, lgb_train, num_boost_round=10, valid_sets=lgb_eval, valid_names=("validation"), callbacks=[wandb_callback()], ) log_summary(gbm) ``` """ if wandb.run is None: raise wandb.Error("You must call wandb.init() before WandbCallback()") if not isinstance(model, Booster): raise wandb.Error("Model should be an instance of lightgbm.basic.Booster") wandb.run.summary["best_iteration"] = model.best_iteration wandb.run.summary["best_score"] = model.best_score # Log feature importance if feature_importance: _log_feature_importance(model) if save_model_checkpoint: _checkpoint_artifact(model, model.best_iteration, aliases=["best"]) with wb_telemetry.context() as tel: tel.feature.lightgbm_log_summary = True
python
wandb/integration/lightgbm/__init__.py
164
215
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,455
__init__
def __init__(self, metric_period: int = 1): if wandb.run is None: raise wandb.Error("You must call `wandb.init()` before `WandbCallback()`") with wb_telemetry.context() as tel: tel.feature.catboost_wandb_callback = True self.metric_period: int = metric_period
python
wandb/integration/catboost/catboost.py
42
49
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,456
after_iteration
def after_iteration(self, info: SimpleNamespace) -> bool: if info.iteration % self.metric_period == 0: for data, metric in info.metrics.items(): for metric_name, log in metric.items(): # todo: replace with wandb.run._log once available wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False) # todo: replace with wandb.run._log once available wandb.log({f"iteration@metric-period-{self.metric_period}": info.iteration}) return True
python
wandb/integration/catboost/catboost.py
51
60
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,457
_checkpoint_artifact
def _checkpoint_artifact( model: Union[CatBoostClassifier, CatBoostRegressor], aliases: List[str] ) -> None: """Upload model checkpoint as W&B artifact.""" if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` before `_checkpoint_artifact()`" ) model_name = f"model_{wandb.run.id}" # save the model in the default `cbm` format model_path = Path(wandb.run.dir) / "model" model.save_model(model_path) model_artifact = wandb.Artifact(name=model_name, type="model") model_artifact.add_file(str(model_path)) wandb.log_artifact(model_artifact, aliases=aliases)
python
wandb/integration/catboost/catboost.py
63
80
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,458
_log_feature_importance
def _log_feature_importance( model: Union[CatBoostClassifier, CatBoostRegressor] ) -> None: """Log feature importance with default settings.""" if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` before `_checkpoint_artifact()`" ) feat_df = model.get_feature_importance(prettified=True) fi_data = [ [feat, feat_imp] for feat, feat_imp in zip(feat_df["Feature Id"], feat_df["Importances"]) ] table = wandb.Table(data=fi_data, columns=["Feature", "Importance"]) # todo: replace with wandb.run._log once available wandb.log( { "Feature Importance": wandb.plot.bar( table, "Feature", "Importance", title="Feature Importance" ) }, commit=False, )
python
wandb/integration/catboost/catboost.py
83
107
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,459
log_summary
def log_summary( model: Union[CatBoostClassifier, CatBoostRegressor], log_all_params: bool = True, save_model_checkpoint: bool = False, log_feature_importance: bool = True, ) -> None: """`log_summary` logs useful metrics about catboost model after training is done. Arguments: model: it can be CatBoostClassifier or CatBoostRegressor. log_all_params: (boolean) if True (default) log the model hyperparameters as W&B config. save_model_checkpoint: (boolean) if True saves the model upload as W&B artifacts. log_feature_importance: (boolean) if True (default) logs feature importance as W&B bar chart using the default setting of `get_feature_importance`. Using this along with `wandb_callback` will: - save the hyperparameters as W&B config, - log `best_iteration` and `best_score` as `wandb.summary`, - save and upload your trained model to Weights & Biases Artifacts (when `save_model_checkpoint = True`) - log feature importance plot. Example: ```python train_pool = Pool(train[features], label=train["label"], cat_features=cat_features) test_pool = Pool(test[features], label=test["label"], cat_features=cat_features) model = CatBoostRegressor( iterations=100, loss_function="Cox", eval_metric="Cox", ) model.fit( train_pool, eval_set=test_pool, callbacks=[WandbCallback()], ) log_summary(model) ``` """ if wandb.run is None: raise wandb.Error("You must call `wandb.init()` before `log_summary()`") if not (isinstance(model, (CatBoostClassifier, CatBoostRegressor))): raise wandb.Error( "Model should be an instance of CatBoostClassifier or CatBoostRegressor" ) with wb_telemetry.context() as tel: tel.feature.catboost_log_summary = True # log configs params = model.get_all_params() if log_all_params: wandb.config.update(params) # log best score and iteration wandb.run.summary["best_iteration"] = model.get_best_iteration() wandb.run.summary["best_score"] = model.get_best_score() # log model if save_model_checkpoint: aliases = ["best"] if params["use_best_model"] else ["last"] _checkpoint_artifact(model, aliases=aliases) # Feature importance if log_feature_importance: _log_feature_importance(model)
python
wandb/integration/catboost/catboost.py
110
178
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,460
sagemaker_auth
def sagemaker_auth(overrides=None, path=".", api_key=None): """Write a secrets.env file with the W&B ApiKey and any additional secrets passed. Arguments: overrides (dict, optional): Additional environment variables to write to secrets.env path (str, optional): The path to write the secrets file. """ settings = wandb.setup().settings current_api_key = wandb.wandb_lib.apikey.api_key(settings=settings) overrides = overrides or dict() api_key = overrides.get(env.API_KEY, api_key or current_api_key) if api_key is None: raise ValueError( "Can't find W&B ApiKey, set the WANDB_API_KEY env variable " "or run `wandb login`" ) overrides[env.API_KEY] = api_key with open(os.path.join(path, "secrets.env"), "w") as file: for k, v in overrides.items(): file.write(f"{k}={v}\n")
python
wandb/integration/sagemaker/auth.py
7
28
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,461
parse_sm_config
def parse_sm_config() -> Dict[str, Any]: """Attempt to parse SageMaker configuration. Returns: A dictionary of SageMaker config keys/values or empty dict if not found. """ conf = {} if os.path.exists(sm_files.SM_PARAM_CONFIG) and os.path.exists( sm_files.SM_RESOURCE_CONFIG ): conf["sagemaker_training_job_name"] = os.getenv("TRAINING_JOB_NAME") # Hyperparameter searches quote configs... for k, v in json.load(open(sm_files.SM_PARAM_CONFIG)).items(): cast = v.strip('"') if re.match(r"^[-\d]+$", cast): cast = int(cast) elif re.match(r"^[-.\d]+$", cast): cast = float(cast) conf[k] = cast return conf
python
wandb/integration/sagemaker/config.py
9
28
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,462
parse_sm_secrets
def parse_sm_secrets() -> Dict[str, str]: """We read our api_key from secrets.env in SageMaker.""" env_dict = dict() # Set secret variables if os.path.exists(sm_files.SM_SECRETS): for line in open(sm_files.SM_SECRETS): key, val = line.strip().split("=", 1) env_dict[key] = val return env_dict
python
wandb/integration/sagemaker/resources.py
10
18
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,463
parse_sm_resources
def parse_sm_resources() -> Tuple[Dict[str, str], Dict[str, str]]: run_dict = dict() run_id = os.getenv("TRAINING_JOB_NAME") if run_id and os.getenv("WANDB_RUN_ID") is None: suffix = "".join( secrets.choice(string.ascii_lowercase + string.digits) for _ in range(6) ) run_dict["run_id"] = "-".join( [run_id, suffix, os.getenv("CURRENT_HOST", socket.gethostname())] ) run_dict["run_group"] = os.getenv("TRAINING_JOB_NAME") env_dict = parse_sm_secrets() return run_dict, env_dict
python
wandb/integration/sagemaker/resources.py
21
34
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,464
_terminate_thread
def _terminate_thread(thread): if not thread.is_alive(): return if hasattr(thread, "_terminated"): return thread._terminated = True tid = getattr(thread, "_thread_id", None) if tid is None: for k, v in threading._active.items(): if v is thread: tid = k if tid is None: # This should never happen return logger.debug(f"Terminating thread: {tid}") res = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(tid), ctypes.py_object(Exception) ) if res == 0: # This should never happen return elif res != 1: # Revert logger.debug(f"Termination failed for thread {tid}") ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)
python
wandb/agents/pyagent.py
23
47
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,465
__init__
def __init__(self, command): self.command = command job_type = command.get("type") self.type = job_type self.run_id = command.get("run_id") self.config = command.get("args")
python
wandb/agents/pyagent.py
51
56
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,466
__repr__
def __repr__(self): if self.type == "run": return f"Job({self.run_id},{self.config})" elif self.type == "stop": return f"stop({self.run_id})" else: return "exit"
python
wandb/agents/pyagent.py
58
64
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,467
__init__
def __init__( self, sweep_id=None, project=None, entity=None, function=None, count=None ): self._sweep_path = sweep_id self._sweep_id = None self._project = project self._entity = entity self._function = function self._count = count # glob_config = os.path.expanduser('~/.config/wandb/settings') # loc_config = 'wandb/settings' # files = (glob_config, loc_config) self._api = InternalApi() self._agent_id = None self._max_initial_failures = wandb.env.get_agent_max_initial_failures( self.MAX_INITIAL_FAILURES ) # if the directory to log to is not set, set it if os.environ.get(wandb.env.DIR) is None: os.environ[wandb.env.DIR] = os.path.abspath(os.getcwd())
python
wandb/agents/pyagent.py
81
100
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,468
_init
def _init(self): # These are not in constructor so that Agent instance can be rerun self._run_threads = {} self._run_status = {} self._queue = queue.Queue() self._exit_flag = False self._exceptions = {} self._start_time = time.time()
python
wandb/agents/pyagent.py
102
109
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,469
_register
def _register(self): logger.debug("Agent._register()") agent = self._api.register_agent(socket.gethostname(), sweep_id=self._sweep_id) self._agent_id = agent["id"] logger.debug(f"agent_id = {self._agent_id}")
python
wandb/agents/pyagent.py
111
115
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,470
_setup
def _setup(self): logger.debug("Agent._setup()") self._init() parts = dict(entity=self._entity, project=self._project, name=self._sweep_path) err = util.parse_sweep_id(parts) if err: wandb.termerror(err) return entity = parts.get("entity") or self._entity project = parts.get("project") or self._project sweep_id = parts.get("name") or self._sweep_id if sweep_id: os.environ[wandb.env.SWEEP_ID] = sweep_id if entity: wandb.env.set_entity(entity) if project: wandb.env.set_project(project) if sweep_id: self._sweep_id = sweep_id self._register()
python
wandb/agents/pyagent.py
117
136
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,471
_stop_run
def _stop_run(self, run_id): logger.debug(f"Stopping run {run_id}.") self._run_status[run_id] = RunStatus.STOPPED thread = self._run_threads.get(run_id) if thread: _terminate_thread(thread)
python
wandb/agents/pyagent.py
138
143
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,472
_stop_all_runs
def _stop_all_runs(self): logger.debug("Stopping all runs.") for run in list(self._run_threads.keys()): self._stop_run(run)
python
wandb/agents/pyagent.py
145
148
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,473
_exit
def _exit(self): self._stop_all_runs() self._exit_flag = True # _terminate_thread(self._main_thread)
python
wandb/agents/pyagent.py
150
153
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,474
_heartbeat
def _heartbeat(self): while True: if self._exit_flag: return # if not self._main_thread.is_alive(): # return run_status = { run: True for run, status in self._run_status.items() if status in (RunStatus.QUEUED, RunStatus.RUNNING) } commands = self._api.agent_heartbeat(self._agent_id, {}, run_status) if commands: job = Job(commands[0]) logger.debug(f"Job received: {job}") if job.type in ["run", "resume"]: self._queue.put(job) self._run_status[job.run_id] = RunStatus.QUEUED elif job.type == "stop": self._stop_run(job.run_id) elif job.type == "exit": self._exit() return time.sleep(5)
python
wandb/agents/pyagent.py
155
178
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,475
_run_jobs_from_queue
def _run_jobs_from_queue(self): # noqa:C901 global _INSTANCES _INSTANCES += 1 try: waiting = False count = 0 while True: if self._exit_flag: return try: try: job = self._queue.get(timeout=5) if self._exit_flag: logger.debug("Exiting main loop due to exit flag.") wandb.termlog("Sweep Agent: Exiting.") return except queue.Empty: if not waiting: logger.debug("Paused.") wandb.termlog("Sweep Agent: Waiting for job.") waiting = True time.sleep(5) if self._exit_flag: logger.debug("Exiting main loop due to exit flag.") wandb.termlog("Sweep Agent: Exiting.") return continue if waiting: logger.debug("Resumed.") wandb.termlog("Job received.") waiting = False count += 1 run_id = job.run_id if self._run_status[run_id] == RunStatus.STOPPED: continue logger.debug(f"Spawning new thread for run {run_id}.") thread = threading.Thread(target=self._run_job, args=(job,)) self._run_threads[run_id] = thread thread.start() self._run_status[run_id] = RunStatus.RUNNING thread.join() logger.debug(f"Thread joined for run {run_id}.") if self._run_status[run_id] == RunStatus.RUNNING: self._run_status[run_id] = RunStatus.DONE elif self._run_status[run_id] == RunStatus.ERRORED: exc = self._exceptions[run_id] logger.error(f"Run {run_id} errored: {repr(exc)}") wandb.termerror(f"Run {run_id} errored: {repr(exc)}") if os.getenv(wandb.env.AGENT_DISABLE_FLAPPING) == "true": self._exit_flag = True return elif ( time.time() - self._start_time < self.FLAPPING_MAX_SECONDS ) and (len(self._exceptions) >= self.FLAPPING_MAX_FAILURES): msg = "Detected {} failed runs in the first {} seconds, killing sweep.".format( self.FLAPPING_MAX_FAILURES, self.FLAPPING_MAX_SECONDS ) logger.error(msg) wandb.termerror(msg) wandb.termlog( "To disable this check set WANDB_AGENT_DISABLE_FLAPPING=true" ) self._exit_flag = True return if ( self._max_initial_failures < len(self._exceptions) and len(self._exceptions) >= count ): msg = "Detected {} failed runs in a row at start, killing sweep.".format( self._max_initial_failures ) logger.error(msg) wandb.termerror(msg) wandb.termlog( "To change this value set WANDB_AGENT_MAX_INITIAL_FAILURES=val" ) self._exit_flag = True return if self._count and self._count == count: logger.debug("Exiting main loop because max count reached.") self._exit_flag = True return except KeyboardInterrupt: logger.debug("Ctrl + C detected. Stopping sweep.") wandb.termlog("Ctrl + C detected. Stopping sweep.") self._exit() return except Exception as e: if self._exit_flag: logger.debug("Exiting main loop due to exit flag.") wandb.termlog("Sweep Agent: Killed.") return else: raise e finally: _INSTANCES -= 1
python
wandb/agents/pyagent.py
180
275
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,476
_run_job
def _run_job(self, job): try: run_id = job.run_id config_file = os.path.join( "wandb", "sweep-" + self._sweep_id, "config-" + run_id + ".yaml" ) os.environ[wandb.env.RUN_ID] = run_id base_dir = os.environ.get(wandb.env.DIR, "") sweep_param_path = os.path.join(base_dir, config_file) os.environ[wandb.env.SWEEP_PARAM_PATH] = sweep_param_path wandb.wandb_lib.config_util.save_config_file_from_dict( sweep_param_path, job.config ) os.environ[wandb.env.SWEEP_ID] = self._sweep_id wandb_sdk.wandb_setup._setup(_reset=True) wandb.termlog(f"Agent Starting Run: {run_id} with config:") for k, v in job.config.items(): wandb.termlog("\t{}: {}".format(k, v["value"])) self._function() wandb.finish() except KeyboardInterrupt as ki: raise ki except Exception as e: wandb.finish(exit_code=1) if self._run_status[run_id] == RunStatus.RUNNING: self._run_status[run_id] = RunStatus.ERRORED self._exceptions[run_id] = e finally: # clean up the environment changes made os.environ.pop(wandb.env.RUN_ID, None) os.environ.pop(wandb.env.SWEEP_ID, None) os.environ.pop(wandb.env.SWEEP_PARAM_PATH, None)
python
wandb/agents/pyagent.py
277
311
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,477
run
def run(self): logger.info( "Starting sweep agent: entity={}, project={}, count={}".format( self._entity, self._project, self._count ) ) self._setup() # self._main_thread = threading.Thread(target=self._run_jobs_from_queue) self._heartbeat_thread = threading.Thread(target=self._heartbeat) self._heartbeat_thread.daemon = True # self._main_thread.start() self._heartbeat_thread.start() # self._main_thread.join() self._run_jobs_from_queue()
python
wandb/agents/pyagent.py
313
326
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,478
pyagent
def pyagent(sweep_id, function, entity=None, project=None, count=None): """Generic agent entrypoint, used for CLI or jupyter. Arguments: sweep_id (dict): Sweep ID generated by CLI or sweep API function (func, optional): A function to call instead of the "program" entity (str, optional): W&B Entity project (str, optional): W&B Project count (int, optional): the number of trials to run. """ if not callable(function): raise Exception("function paramter must be callable!") agent = Agent( sweep_id, function=function, entity=entity, project=project, count=count, ) agent.run()
python
wandb/agents/pyagent.py
329
348
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,479
is_running
def is_running(): return bool(_INSTANCES)
python
wandb/agents/pyagent.py
354
355
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,480
wandb_dir
def wandb_dir(root_dir=None): if root_dir is None or root_dir == "": try: cwd = os.getcwd() except OSError: termwarn("os.getcwd() no longer exists, using system temp directory") cwd = tempfile.gettempdir() root_dir = env.get_dir(cwd) path = os.path.join(root_dir, __stage_dir__ or ("wandb" + os.sep)) if not os.access(root_dir, os.W_OK): termwarn( f"Path {path} wasn't writable, using system temp directory", repeat=False ) path = os.path.join(tempfile.gettempdir(), __stage_dir__ or ("wandb" + os.sep)) return path
python
wandb/old/core.py
31
45
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,481
_set_stage_dir
def _set_stage_dir(stage_dir): # Used when initing a new project with "wandb init" global __stage_dir__ __stage_dir__ = stage_dir
python
wandb/old/core.py
48
51
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,482
__init__
def __init__(self, message): super().__init__(message) self.message = message
python
wandb/old/core.py
57
59
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,483
encode
def encode(self, encoding): return self.message
python
wandb/old/core.py
62
63
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,484
termlog
def termlog(string="", newline=True, repeat=True): """Log to standard error with formatting. Arguments: string (str, optional): The string to print newline (bool, optional): Print a newline at the end of the string repeat (bool, optional): If set to False only prints the string once per process """ if string: line = "\n".join([f"{LOG_STRING}: {s}" for s in string.split("\n")]) else: line = "" if not repeat and line in PRINTED_MESSAGES: return # Repeated line tracking limited to 1k messages if len(PRINTED_MESSAGES) < 1000: PRINTED_MESSAGES.add(line) if os.getenv(env.SILENT): from wandb import util from wandb.sdk.lib import filesystem filesystem.mkdir_exists_ok(os.path.dirname(util.get_log_file_path())) with open(util.get_log_file_path(), "w") as log: click.echo(line, file=log, nl=newline) else: click.echo(line, file=sys.stderr, nl=newline)
python
wandb/old/core.py
80
105
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,485
termwarn
def termwarn(string, **kwargs): string = "\n".join([f"{WARN_STRING} {s}" for s in string.split("\n")]) termlog(string=string, newline=True, **kwargs)
python
wandb/old/core.py
108
110
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,486
termerror
def termerror(string, **kwargs): string = "\n".join([f"{ERROR_STRING} {s}" for s in string.split("\n")]) termlog(string=string, newline=True, **kwargs)
python
wandb/old/core.py
113
115
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,487
__init__
def __init__(self, root=None, path=()): self._path = tuple(path) if root is None: self._root = self self._json_dict = {} else: self._root = root json_dict = root._json_dict for k in path: json_dict = json_dict[k] self._json_dict = json_dict self._dict = {} # We use this to track which keys the user has set explicitly # so that we don't automatically overwrite them when we update # the summary from the history. self._locked_keys = set()
python
wandb/old/summary.py
25
41
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,488
__setattr__
def __setattr__(self, k, v): k = k.strip() if k.startswith("_"): object.__setattr__(self, k, v) else: self[k] = v
python
wandb/old/summary.py
43
48
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,489
__getattr__
def __getattr__(self, k): k = k.strip() if k.startswith("_"): return object.__getattribute__(self, k) else: return self[k]
python
wandb/old/summary.py
50
55
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,490
_root_get
def _root_get(self, path, child_dict): """Load a value at a particular path from the root. This should only be implemented by the "_root" child class. We pass the child_dict so the item can be set on it or not as appropriate. Returning None for a nonexistant path wouldn't be distinguishable from that path being set to the value None. """ raise NotImplementedError
python
wandb/old/summary.py
57
66
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,491
_root_set
def _root_set(self, path, new_keys_values): """Set a value at a particular path in the root. This should only be implemented by the "_root" child class. """ raise NotImplementedError
python
wandb/old/summary.py
68
73
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,492
_root_del
def _root_del(self, path): """Delete a value at a particular path in the root. This should only be implemented by the "_root" child class. """ raise NotImplementedError
python
wandb/old/summary.py
75
80
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,493
_write
def _write(self, commit=False): # should only be implemented on the root summary raise NotImplementedError
python
wandb/old/summary.py
82
84
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,494
keys
def keys(self): # _json_dict has the full set of keys, including those for h5 objects # that may not have been loaded yet return self._json_dict.keys()
python
wandb/old/summary.py
86
89
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,495
get
def get(self, k, default=None): if isinstance(k, str): k = k.strip() if k not in self._dict: self._root._root_get(self._path + (k,), self._dict) return self._dict.get(k, default)
python
wandb/old/summary.py
91
96
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,496
items
def items(self): # not all items may be loaded into self._dict, so we # have to build the sequence of items from scratch for k in self.keys(): yield k, self[k]
python
wandb/old/summary.py
98
102
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,497
__getitem__
def __getitem__(self, k): if isinstance(k, str): k = k.strip() self.get(k) # load the value into _dict if it should be there res = self._dict[k] return res
python
wandb/old/summary.py
104
111
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,498
__contains__
def __contains__(self, k): if isinstance(k, str): k = k.strip() return k in self._json_dict
python
wandb/old/summary.py
113
117
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,499
__setitem__
def __setitem__(self, k, v): if isinstance(k, str): k = k.strip() path = self._path if isinstance(v, dict): self._dict[k] = SummarySubDict(self._root, path + (k,)) self._root._root_set(path, [(k, {})]) self._dict[k].update(v) else: self._dict[k] = v self._root._root_set(path, [(k, v)]) self._locked_keys.add(k) self._root._write() return v
python
wandb/old/summary.py
119
137
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,500
__delitem__
def __delitem__(self, k): k = k.strip() del self._dict[k] self._root._root_del(self._path + (k,)) self._root._write()
python
wandb/old/summary.py
139
144
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }