id
int64
1
6.07M
name
stringlengths
1
295
code
stringlengths
12
426k
language
stringclasses
1 value
source_file
stringlengths
5
202
start_line
int64
1
158k
end_line
int64
1
158k
repo
dict
2,301
on_epoch_end
def on_epoch_end(self, epoch, logs={}): if self.log_weights: wandb.log(self._log_weights(), commit=False) if self.log_gradients: wandb.log(self._log_gradients(), commit=False) if self.input_type in ( "image", "images", "segmentation_mask", ) or self.output_type in ("image", "images", "segmentation_mask"): if self.generator: self.validation_data = next(self.generator) if self.validation_data is None: wandb.termwarn( "No validation_data set, pass a generator to the callback." ) elif self.validation_data and len(self.validation_data) > 0: wandb.log( {"examples": self._log_images(num_images=self.predictions)}, commit=False, ) if ( self._log_evaluation_frequency > 0 and epoch % self._log_evaluation_frequency == 0 ): self._attempt_evaluation_log(commit=False) wandb.log({"epoch": epoch}, commit=False) wandb.log(logs, commit=True) self.current = logs.get(self.monitor) if self.current and self.monitor_op(self.current, self.best): if self.log_best_prefix: wandb.run.summary[ f"{self.log_best_prefix}{self.monitor}" ] = self.current wandb.run.summary["{}{}".format(self.log_best_prefix, "epoch")] = epoch if self.verbose and not self.save_model: print( "Epoch %05d: %s improved from %0.5f to %0.5f" % (epoch, self.monitor, self.best, self.current) ) if self.save_model: self._save_model(epoch) if self.save_model and self.save_model_as_artifact: self._save_model_as_artifact(epoch) self.best = self.current
python
wandb/integration/keras/keras.py
581
632
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,302
on_batch_begin
def on_batch_begin(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
635
636
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,303
on_batch_end
def on_batch_end(self, batch, logs=None): if self.save_graph and not self._graph_rendered: # Couldn't do this in train_begin because keras may still not be built wandb.run.summary["graph"] = wandb.Graph.from_keras(self.model) self._graph_rendered = True if self.log_batch_frequency and batch % self.log_batch_frequency == 0: wandb.log(logs, commit=True)
python
wandb/integration/keras/keras.py
639
646
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,304
on_train_batch_begin
def on_train_batch_begin(self, batch, logs=None): self._model_trained_since_last_eval = True
python
wandb/integration/keras/keras.py
648
649
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,305
on_train_batch_end
def on_train_batch_end(self, batch, logs=None): if self.save_graph and not self._graph_rendered: # Couldn't do this in train_begin because keras may still not be built wandb.run.summary["graph"] = wandb.Graph.from_keras(self.model) self._graph_rendered = True if self.log_batch_frequency and batch % self.log_batch_frequency == 0: wandb.log(logs, commit=True)
python
wandb/integration/keras/keras.py
651
658
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,306
on_test_begin
def on_test_begin(self, logs=None): pass
python
wandb/integration/keras/keras.py
660
661
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,307
on_test_end
def on_test_end(self, logs=None): pass
python
wandb/integration/keras/keras.py
663
664
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,308
on_test_batch_begin
def on_test_batch_begin(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
666
667
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,309
on_test_batch_end
def on_test_batch_end(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
669
670
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,310
on_train_begin
def on_train_begin(self, logs=None): if self.log_evaluation: try: validation_data = None if self.validation_data: validation_data = self.validation_data elif self.generator: if not self.validation_steps: wandb.termwarn( "WandbCallback is unable to log validation data. " "When using a generator for validation_data, you must pass validation_steps" ) else: x = None y_true = None for i in range(self.validation_steps): bx, by_true = next(self.generator) if x is None: x, y_true = bx, by_true else: x, y_true = ( np.append(x, bx, axis=0), np.append(y_true, by_true, axis=0), ) validation_data = (x, y_true) else: wandb.termwarn( "WandbCallback is unable to read validation_data from trainer " "and therefore cannot log validation data. Ensure Keras is properly " "patched by calling `from wandb.keras import WandbCallback` at the top of your script." ) if validation_data: self._validation_data_logger = ValidationDataLogger( inputs=validation_data[0], targets=validation_data[1], indexes=self._validation_indexes, validation_row_processor=self._validation_row_processor, prediction_row_processor=self._prediction_row_processor, class_labels=self.labels, infer_missing_processors=self._infer_missing_processors, ) except Exception as e: wandb.termwarn( "Error initializing ValidationDataLogger in WandbCallback. " f"Skipping logging validation data. Error: {str(e)}" ) if self.compute_flops and _can_compute_flops(): try: wandb.summary["GFLOPs"] = self.get_flops() except Exception as e: wandb.termwarn("Unable to compute FLOPs for this model.")
python
wandb/integration/keras/keras.py
672
723
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,311
on_train_end
def on_train_end(self, logs=None): if self._model_trained_since_last_eval: self._attempt_evaluation_log()
python
wandb/integration/keras/keras.py
725
727
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,312
on_test_begin
def on_test_begin(self, logs=None): pass
python
wandb/integration/keras/keras.py
729
730
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,313
on_test_end
def on_test_end(self, logs=None): pass
python
wandb/integration/keras/keras.py
732
733
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,314
on_test_batch_begin
def on_test_batch_begin(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
735
736
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,315
on_test_batch_end
def on_test_batch_end(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
738
739
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,316
on_predict_begin
def on_predict_begin(self, logs=None): pass
python
wandb/integration/keras/keras.py
741
742
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,317
on_predict_end
def on_predict_end(self, logs=None): pass
python
wandb/integration/keras/keras.py
744
745
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,318
on_predict_batch_begin
def on_predict_batch_begin(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
747
748
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,319
on_predict_batch_end
def on_predict_batch_end(self, batch, logs=None): pass
python
wandb/integration/keras/keras.py
750
751
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,320
_logits_to_captions
def _logits_to_captions(self, logits): if logits[0].shape[-1] == 1: # Scalar output from the model # TODO: handle validation_y if len(self.labels) == 2: # User has named true and false captions = [ self.labels[1] if logits[0] > 0.5 else self.labels[0] for logit in logits ] else: if len(self.labels) != 0: wandb.termwarn( "keras model is producing a single output, " 'so labels should be a length two array: ["False label", "True label"].' ) captions = [logit[0] for logit in logits] else: # Vector output from the model # TODO: handle validation_y labels = np.argmax(np.stack(logits), axis=1) if len(self.labels) > 0: # User has named the categories in self.labels captions = [] for label in labels: try: captions.append(self.labels[label]) except IndexError: captions.append(label) else: captions = labels return captions
python
wandb/integration/keras/keras.py
753
785
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,321
_masks_to_pixels
def _masks_to_pixels(self, masks): # if its a binary mask, just return it as grayscale instead of picking the argmax if len(masks[0].shape) == 2 or masks[0].shape[-1] == 1: return masks class_colors = ( self.class_colors if self.class_colors is not None else np.array(wandb.util.class_colors(masks[0].shape[2])) ) imgs = class_colors[np.argmax(masks, axis=-1)] return imgs
python
wandb/integration/keras/keras.py
787
797
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,322
_log_images
def _log_images(self, num_images=36): validation_X = self.validation_data[0] validation_y = self.validation_data[1] validation_length = len(validation_X) if validation_length > num_images: # pick some data at random indices = np.random.choice(validation_length, num_images, replace=False) else: indices = range(validation_length) test_data = [] test_output = [] for i in indices: test_example = validation_X[i] test_data.append(test_example) test_output.append(validation_y[i]) if self.model.stateful: predictions = self.model.predict(np.stack(test_data), batch_size=1) self.model.reset_states() else: predictions = self.model.predict( np.stack(test_data), batch_size=self._prediction_batch_size ) if len(predictions) != len(test_data): self._prediction_batch_size = 1 predictions = self.model.predict( np.stack(test_data), batch_size=self._prediction_batch_size ) if self.input_type == "label": if self.output_type in ("image", "images", "segmentation_mask"): captions = self._logits_to_captions(test_data) output_image_data = ( self._masks_to_pixels(predictions) if self.output_type == "segmentation_mask" else predictions ) reference_image_data = ( self._masks_to_pixels(test_output) if self.output_type == "segmentation_mask" else test_output ) output_images = [ wandb.Image(data, caption=captions[i], grouping=2) for i, data in enumerate(output_image_data) ] reference_images = [ wandb.Image(data, caption=captions[i]) for i, data in enumerate(reference_image_data) ] return list(chain.from_iterable(zip(output_images, reference_images))) elif self.input_type in ("image", "images", "segmentation_mask"): input_image_data = ( self._masks_to_pixels(test_data) if self.input_type == "segmentation_mask" else test_data ) if self.output_type == "label": # we just use the predicted label as the caption for now captions = self._logits_to_captions(predictions) return [ wandb.Image(data, caption=captions[i]) for i, data in enumerate(test_data) ] elif self.output_type in ("image", "images", "segmentation_mask"): output_image_data = ( self._masks_to_pixels(predictions) if self.output_type == "segmentation_mask" else predictions ) reference_image_data = ( self._masks_to_pixels(test_output) if self.output_type == "segmentation_mask" else test_output ) input_images = [ wandb.Image(data, grouping=3) for i, data in enumerate(input_image_data) ] output_images = [ wandb.Image(data) for i, data in enumerate(output_image_data) ] reference_images = [ wandb.Image(data) for i, data in enumerate(reference_image_data) ] return list( chain.from_iterable( zip(input_images, output_images, reference_images) ) ) else: # unknown output, just log the input images return [wandb.Image(img) for img in test_data] elif self.output_type in ("image", "images", "segmentation_mask"): # unknown input, just log the predicted and reference outputs without captions output_image_data = ( self._masks_to_pixels(predictions) if self.output_type == "segmentation_mask" else predictions ) reference_image_data = ( self._masks_to_pixels(test_output) if self.output_type == "segmentation_mask" else test_output ) output_images = [ wandb.Image(data, grouping=2) for i, data in enumerate(output_image_data) ] reference_images = [ wandb.Image(data) for i, data in enumerate(reference_image_data) ] return list(chain.from_iterable(zip(output_images, reference_images)))
python
wandb/integration/keras/keras.py
799
914
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,323
_log_weights
def _log_weights(self): metrics = {} for layer in self.model.layers: weights = layer.get_weights() if len(weights) == 1: _update_if_numeric( metrics, "parameters/" + layer.name + ".weights", weights[0] ) elif len(weights) == 2: _update_if_numeric( metrics, "parameters/" + layer.name + ".weights", weights[0] ) _update_if_numeric( metrics, "parameters/" + layer.name + ".bias", weights[1] ) return metrics
python
wandb/integration/keras/keras.py
916
931
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,324
_log_gradients
def _log_gradients(self): # Suppress callback warnings grad accumulator og_level = tf_logger.level tf_logger.setLevel("ERROR") self._grad_accumulator_model.fit( self._training_data_x, self._training_data_y, verbose=0, callbacks=[self._grad_accumulator_callback], ) tf_logger.setLevel(og_level) weights = self.model.trainable_weights grads = self._grad_accumulator_callback.grads metrics = {} for (weight, grad) in zip(weights, grads): metrics[ "gradients/" + weight.name.split(":")[0] + ".gradient" ] = wandb.Histogram(grad) return metrics
python
wandb/integration/keras/keras.py
933
952
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,325
_log_dataframe
def _log_dataframe(self): x, y_true, y_pred = None, None, None if self.validation_data: x, y_true = self.validation_data[0], self.validation_data[1] y_pred = self.model.predict(x) elif self.generator: if not self.validation_steps: wandb.termwarn( "when using a generator for validation data with dataframes, " "you must pass validation_steps. skipping" ) return None for i in range(self.validation_steps): bx, by_true = next(self.generator) by_pred = self.model.predict(bx) if x is None: x, y_true, y_pred = bx, by_true, by_pred else: x, y_true, y_pred = ( np.append(x, bx, axis=0), np.append(y_true, by_true, axis=0), np.append(y_pred, by_pred, axis=0), ) if self.input_type in ("image", "images") and self.output_type == "label": return wandb.image_categorizer_dataframe( x=x, y_true=y_true, y_pred=y_pred, labels=self.labels ) elif ( self.input_type in ("image", "images") and self.output_type == "segmentation_mask" ): return wandb.image_segmentation_dataframe( x=x, y_true=y_true, y_pred=y_pred, labels=self.labels, class_colors=self.class_colors, ) else: wandb.termwarn( "unknown dataframe type for input_type=%s and output_type=%s" % (self.input_type, self.output_type) ) return None
python
wandb/integration/keras/keras.py
954
1,000
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,326
_save_model
def _save_model(self, epoch): if wandb.run.disabled: return if self.verbose > 0: print( "Epoch %05d: %s improved from %0.5f to %0.5f," " saving model to %s" % (epoch, self.monitor, self.best, self.current, self.filepath) ) try: if self.save_weights_only: self.model.save_weights(self.filepath, overwrite=True) else: self.model.save(self.filepath, overwrite=True) # Was getting `RuntimeError: Unable to create link` in TF 1.13.1 # also saw `TypeError: can't pickle _thread.RLock objects` except (ImportError, RuntimeError, TypeError, AttributeError) as e: wandb.termerror( "Can't save model in the h5py format. The model will be saved as " "as an W&B Artifact in the 'tf' format." )
python
wandb/integration/keras/keras.py
1,002
1,023
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,327
_save_model_as_artifact
def _save_model_as_artifact(self, epoch): if wandb.run.disabled: return # Save the model in the SavedModel format. # TODO: Replace this manual artifact creation with the `log_model` method # after `log_model` is released from beta. self.model.save(self.filepath[:-3], overwrite=True, save_format="tf") # Log the model as artifact. name = wandb.util.make_artifact_name_safe(f"model-{wandb.run.name}") model_artifact = wandb.Artifact(name, type="model") model_artifact.add_dir(self.filepath[:-3]) wandb.run.log_artifact(model_artifact, aliases=["latest", f"epoch_{epoch}"]) # Remove the SavedModel from wandb dir as we don't want to log it to save memory. shutil.rmtree(self.filepath[:-3])
python
wandb/integration/keras/keras.py
1,025
1,041
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,328
get_flops
def get_flops(self) -> float: """ Calculate FLOPS [GFLOPs] for a tf.keras.Model or tf.keras.Sequential model in inference mode. It uses tf.compat.v1.profiler under the hood. """ if not hasattr(self, "model"): raise wandb.Error("self.model must be set before using this method.") if not isinstance( self.model, (tf.keras.models.Sequential, tf.keras.models.Model) ): raise ValueError( "Calculating FLOPS is only supported for " "`tf.keras.Model` and `tf.keras.Sequential` instances." ) from tensorflow.python.framework.convert_to_constants import ( convert_variables_to_constants_v2_as_graph, ) # Compute FLOPs for one sample batch_size = 1 inputs = [ tf.TensorSpec([batch_size] + inp.shape[1:], inp.dtype) for inp in self.model.inputs ] # convert tf.keras model into frozen graph to count FLOPs about operations used at inference real_model = tf.function(self.model).get_concrete_function(inputs) frozen_func, _ = convert_variables_to_constants_v2_as_graph(real_model) # Calculate FLOPs with tf.profiler run_meta = tf.compat.v1.RunMetadata() opts = ( tf.compat.v1.profiler.ProfileOptionBuilder( tf.compat.v1.profiler.ProfileOptionBuilder().float_operation() ) .with_empty_output() .build() ) flops = tf.compat.v1.profiler.profile( graph=frozen_func.graph, run_meta=run_meta, cmd="scope", options=opts ) # convert to GFLOPs return (flops.total_float_ops / 1e9) / 2
python
wandb/integration/keras/keras.py
1,043
1,089
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,329
__init__
def __init__( self, data_table_columns: List[str], pred_table_columns: List[str], *args: Any, **kwargs: Any, ) -> None: super().__init__(*args, **kwargs) if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` first before using this callback." ) with telemetry.context(run=wandb.run) as tel: tel.feature.keras_wandb_eval_callback = True self.data_table_columns = data_table_columns self.pred_table_columns = pred_table_columns
python
wandb/integration/keras/callbacks/tables_builder.py
91
109
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,330
on_train_begin
def on_train_begin(self, logs: Optional[Dict[str, float]] = None) -> None: # Initialize the data_table self.init_data_table(column_names=self.data_table_columns) # Log the ground truth data self.add_ground_truth(logs) # Log the data_table as W&B Artifacts self.log_data_table()
python
wandb/integration/keras/callbacks/tables_builder.py
111
117
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,331
on_epoch_end
def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, float]] = None) -> None: # Initialize the pred_table self.init_pred_table(column_names=self.pred_table_columns) # Log the model prediction self.add_model_predictions(epoch, logs) # Log the pred_table as W&B Artifacts self.log_pred_table()
python
wandb/integration/keras/callbacks/tables_builder.py
119
125
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,332
add_ground_truth
def add_ground_truth(self, logs: Optional[Dict[str, float]] = None) -> None: """Add ground truth data to `data_table`. Use this method to write the logic for adding validation/training data to `data_table` initialized using `init_data_table` method. Example: ``` for idx, data in enumerate(dataloader): self.data_table.add_data( idx, data ) ``` This method is called once `on_train_begin` or equivalent hook. """ raise NotImplementedError(f"{self.__class__.__name__}.add_ground_truth")
python
wandb/integration/keras/callbacks/tables_builder.py
128
144
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,333
add_model_predictions
def add_model_predictions( self, epoch: int, logs: Optional[Dict[str, float]] = None ) -> None: """Add a prediction from a model to `pred_table`. Use this method to write the logic for adding model prediction for validation/ training data to `pred_table` initialized using `init_pred_table` method. Example: ``` # Assuming the dataloader is not shuffling the samples. for idx, data in enumerate(dataloader): preds = model.predict(data) self.pred_table.add_data( self.data_table_ref.data[idx][0], self.data_table_ref.data[idx][1], preds ) ``` This method is called `on_epoch_end` or equivalent hook. """ raise NotImplementedError(f"{self.__class__.__name__}.add_model_predictions")
python
wandb/integration/keras/callbacks/tables_builder.py
147
168
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,334
init_data_table
def init_data_table(self, column_names: List[str]) -> None: """Initialize the W&B Tables for validation data. Call this method `on_train_begin` or equivalent hook. This is followed by adding data to the table row or column wise. Args: column_names (list): Column names for W&B Tables. """ self.data_table = wandb.Table(columns=column_names, allow_mixed_types=True)
python
wandb/integration/keras/callbacks/tables_builder.py
170
179
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,335
init_pred_table
def init_pred_table(self, column_names: List[str]) -> None: """Initialize the W&B Tables for model evaluation. Call this method `on_epoch_end` or equivalent hook. This is followed by adding data to the table row or column wise. Args: column_names (list): Column names for W&B Tables. """ self.pred_table = wandb.Table(columns=column_names)
python
wandb/integration/keras/callbacks/tables_builder.py
181
190
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,336
log_data_table
def log_data_table( self, name: str = "val", type: str = "dataset", table_name: str = "val_data" ) -> None: """Log the `data_table` as W&B artifact and call `use_artifact` on it. This lets the evaluation table use the reference of already uploaded data (images, text, scalar, etc.) without re-uploading. Args: name (str): A human-readable name for this artifact, which is how you can identify this artifact in the UI or reference it in use_artifact calls. (default is 'val') type (str): The type of the artifact, which is used to organize and differentiate artifacts. (default is 'dataset') table_name (str): The name of the table as will be displayed in the UI. (default is 'val_data'). """ data_artifact = wandb.Artifact(name, type=type) data_artifact.add(self.data_table, table_name) # Calling `use_artifact` uploads the data to W&B. assert wandb.run is not None wandb.run.use_artifact(data_artifact) data_artifact.wait() # We get the reference table. self.data_table_ref = data_artifact.get(table_name)
python
wandb/integration/keras/callbacks/tables_builder.py
192
218
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,337
log_pred_table
def log_pred_table( self, type: str = "evaluation", table_name: str = "eval_data", aliases: Optional[List[str]] = None, ) -> None: """Log the W&B Tables for model evaluation. The table will be logged multiple times creating new version. Use this to compare models at different intervals interactively. Args: type (str): The type of the artifact, which is used to organize and differentiate artifacts. (default is 'evaluation') table_name (str): The name of the table as will be displayed in the UI. (default is 'eval_data') aliases (List[str]): List of aliases for the prediction table. """ assert wandb.run is not None pred_artifact = wandb.Artifact(f"run_{wandb.run.id}_pred", type=type) pred_artifact.add(self.pred_table, table_name) wandb.run.log_artifact(pred_artifact, aliases=aliases or ["latest"])
python
wandb/integration/keras/callbacks/tables_builder.py
220
241
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,338
__init__
def __init__( self, filepath: Union[str, os.PathLike], monitor: str = "val_loss", verbose: int = 0, save_best_only: bool = False, save_weights_only: bool = False, mode: Mode = "auto", save_freq: Union[SaveStrategy, int] = "epoch", options: Optional[str] = None, initial_value_threshold: Optional[float] = None, **kwargs: Any, ) -> None: super().__init__( filepath=filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, save_freq=save_freq, options=options, initial_value_threshold=initial_value_threshold, **kwargs, ) if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` before `WandbModelCheckpoint()`" ) with telemetry.context(run=wandb.run) as tel: tel.feature.keras_model_checkpoint = True self.save_weights_only = save_weights_only # User-friendly warning when trying to save the best model. if self.save_best_only: self._check_filepath() self._is_old_tf_keras_version: Optional[bool] = None
python
wandb/integration/keras/callbacks/model_checkpoint.py
73
111
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,339
on_train_batch_end
def on_train_batch_end( self, batch: int, logs: Optional[Dict[str, float]] = None ) -> None: if self._should_save_on_batch(batch): if self.is_old_tf_keras_version: # Save the model and get filepath self._save_model(epoch=self._current_epoch, logs=logs) filepath = self._get_file_path(epoch=self._current_epoch, logs=logs) else: # Save the model and get filepath self._save_model(epoch=self._current_epoch, batch=batch, logs=logs) filepath = self._get_file_path( epoch=self._current_epoch, batch=batch, logs=logs ) # Log the model as artifact aliases = ["latest", f"epoch_{self._current_epoch}_batch_{batch}"] self._log_ckpt_as_artifact(filepath, aliases=aliases)
python
wandb/integration/keras/callbacks/model_checkpoint.py
113
129
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,340
on_epoch_end
def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, float]] = None) -> None: super().on_epoch_end(epoch, logs) # Check if model checkpoint is created at the end of epoch. if self.save_freq == "epoch": # Get filepath where the model checkpoint is saved. if self.is_old_tf_keras_version: filepath = self._get_file_path(epoch=epoch, logs=logs) else: filepath = self._get_file_path(epoch=epoch, batch=None, logs=logs) # Log the model as artifact aliases = ["latest", f"epoch_{epoch}"] self._log_ckpt_as_artifact(filepath, aliases=aliases)
python
wandb/integration/keras/callbacks/model_checkpoint.py
131
142
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,341
_log_ckpt_as_artifact
def _log_ckpt_as_artifact( self, filepath: str, aliases: Optional[List[str]] = None ) -> None: """Log model checkpoint as W&B Artifact.""" try: assert wandb.run is not None model_artifact = wandb.Artifact(f"run_{wandb.run.id}_model", type="model") if self.save_weights_only: # We get three files when this is True model_artifact.add_file( os.path.join(os.path.dirname(filepath), "checkpoint") ) model_artifact.add_file(filepath + ".index") # In a distributed setting we get multiple shards. for file in glob.glob(f"{filepath}.data-*"): model_artifact.add_file(file) elif filepath.endswith(".h5"): # Model saved in .h5 format thus we get one file. model_artifact.add_file(filepath) else: # Model saved in the SavedModel format thus we have dir. model_artifact.add_dir(filepath) wandb.log_artifact(model_artifact, aliases=aliases or []) except ValueError: # This error occurs when `save_best_only=True` and the model # checkpoint is not saved for that epoch/batch. Since TF/Keras # is giving friendly log, we can avoid clustering the stdout. pass
python
wandb/integration/keras/callbacks/model_checkpoint.py
144
171
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,342
_check_filepath
def _check_filepath(self) -> None: placeholders = [] for tup in string.Formatter().parse(self.filepath): if tup[1] is not None: placeholders.append(tup[1]) if len(placeholders) == 0: wandb.termwarn( "When using `save_best_only`, ensure that the `filepath` argument " "contains formatting placeholders like `{epoch:02d}` or `{batch:02d}`. " "This ensures correct interpretation of the logged artifacts.", repeat=False, )
python
wandb/integration/keras/callbacks/model_checkpoint.py
173
184
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,343
is_old_tf_keras_version
def is_old_tf_keras_version(self) -> Optional[bool]: if self._is_old_tf_keras_version is None: from pkg_resources import parse_version if parse_version(tf.keras.__version__) < parse_version("2.6.0"): self._is_old_tf_keras_version = True else: self._is_old_tf_keras_version = False return self._is_old_tf_keras_version
python
wandb/integration/keras/callbacks/model_checkpoint.py
187
196
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,344
__init__
def __init__( self, log_freq: Union[LogStrategy, int] = "epoch", initial_global_step: int = 0, *args: Any, **kwargs: Any, ) -> None: super().__init__(*args, **kwargs) if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` before WandbMetricsLogger()" ) with telemetry.context(run=wandb.run) as tel: tel.feature.keras_metrics_logger = True if log_freq == "batch": log_freq = 1 self.logging_batch_wise = isinstance(log_freq, int) self.log_freq: Any = log_freq if self.logging_batch_wise else None self.global_batch = 0 self.global_step = initial_global_step if self.logging_batch_wise: # define custom x-axis for batch logging. wandb.define_metric("batch/batch_step") # set all batch metrics to be logged against batch_step. wandb.define_metric("batch/*", step_metric="batch/batch_step") else: # define custom x-axis for epoch-wise logging. wandb.define_metric("epoch/epoch") # set all epoch-wise metrics to be logged against epoch. wandb.define_metric("epoch/*", step_metric="epoch/epoch")
python
wandb/integration/keras/callbacks/metrics_logger.py
52
86
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,345
_get_lr
def _get_lr(self) -> Union[float, None]: if isinstance(self.model.optimizer.learning_rate, tf.Variable): return float(self.model.optimizer.learning_rate.numpy().item()) try: return float( self.model.optimizer.learning_rate(step=self.global_step).numpy().item() ) except Exception: wandb.termerror("Unable to log learning rate.", repeat=False) return None
python
wandb/integration/keras/callbacks/metrics_logger.py
88
97
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,346
on_epoch_end
def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, Any]] = None) -> None: """Called at the end of an epoch.""" logs = dict() if logs is None else {f"epoch/{k}": v for k, v in logs.items()} logs["epoch/epoch"] = epoch lr = self._get_lr() if lr is not None: logs["epoch/learning_rate"] = lr wandb.log(logs)
python
wandb/integration/keras/callbacks/metrics_logger.py
99
109
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,347
on_batch_end
def on_batch_end(self, batch: int, logs: Optional[Dict[str, Any]] = None) -> None: self.global_step += 1 """An alias for `on_train_batch_end` for backwards compatibility.""" if self.logging_batch_wise and batch % self.log_freq == 0: logs = {f"batch/{k}": v for k, v in logs.items()} if logs else {} logs["batch/batch_step"] = self.global_batch lr = self._get_lr() if lr is not None: logs["batch/learning_rate"] = lr wandb.log(logs) self.global_batch += self.log_freq
python
wandb/integration/keras/callbacks/metrics_logger.py
111
124
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,348
on_train_batch_end
def on_train_batch_end( self, batch: int, logs: Optional[Dict[str, Any]] = None ) -> None: """Called at the end of a training batch in `fit` methods.""" self.on_batch_end(batch, logs if logs else {})
python
wandb/integration/keras/callbacks/metrics_logger.py
126
130
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,349
__init__
def __init__( self, verbose: int = 0, model_save_path: Optional[str] = None, model_save_freq: int = 0, gradient_save_freq: int = 0, log: Optional[Literal["gradients", "parameters", "all"]] = "all", ) -> None: super().__init__(verbose) if wandb.run is None: raise wandb.Error("You must call wandb.init() before WandbCallback()") with wb_telemetry.context() as tel: tel.feature.sb3 = True self.model_save_freq = model_save_freq self.model_save_path = model_save_path self.gradient_save_freq = gradient_save_freq if log not in ["gradients", "parameters", "all", None]: wandb.termwarn( "`log` must be one of `None`, 'gradients', 'parameters', or 'all', " "falling back to 'all'" ) log = "all" self.log = log # Create folder if needed if self.model_save_path is not None: os.makedirs(self.model_save_path, exist_ok=True) self.path = os.path.join(self.model_save_path, "model.zip") else: assert ( self.model_save_freq == 0 ), "to use the `model_save_freq` you have to set the `model_save_path` parameter"
python
wandb/integration/sb3/sb3.py
87
117
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,350
_init_callback
def _init_callback(self) -> None: d = {} if "algo" not in d: d["algo"] = type(self.model).__name__ for key in self.model.__dict__: if key in wandb.config: continue if type(self.model.__dict__[key]) in [float, int, str]: d[key] = self.model.__dict__[key] else: d[key] = str(self.model.__dict__[key]) if self.gradient_save_freq > 0: wandb.watch( self.model.policy, log_freq=self.gradient_save_freq, log=self.log, ) wandb.config.setdefaults(d)
python
wandb/integration/sb3/sb3.py
119
136
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,351
_on_step
def _on_step(self) -> bool: if self.model_save_freq > 0: if self.model_save_path is not None: if self.n_calls % self.model_save_freq == 0: self.save_model() return True
python
wandb/integration/sb3/sb3.py
138
143
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,352
_on_training_end
def _on_training_end(self) -> None: if self.model_save_path is not None: self.save_model()
python
wandb/integration/sb3/sb3.py
145
147
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,353
save_model
def save_model(self) -> None: self.model.save(self.path) wandb.save(self.path, base_path=self.model_save_path) if self.verbose > 1: logger.info(f"Saving model checkpoint to {self.path}")
python
wandb/integration/sb3/sb3.py
149
153
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,354
wandb_callback
def wandb_callback() -> "Callable": """Old style callback that will be deprecated in favor of WandbCallback. Please try the new logger for more features.""" warnings.warn( "wandb_callback will be deprecated in favor of WandbCallback. Please use WandbCallback for more features.", UserWarning, stacklevel=2, ) with wb_telemetry.context() as tel: tel.feature.xgboost_old_wandb_callback = True def callback(env: "CallbackEnv") -> None: for k, v in env.evaluation_result_list: wandb.log({k: v}, commit=False) wandb.log({}) return callback
python
wandb/integration/xgboost/xgboost.py
36
52
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,355
callback
def callback(env: "CallbackEnv") -> None: for k, v in env.evaluation_result_list: wandb.log({k: v}, commit=False) wandb.log({})
python
wandb/integration/xgboost/xgboost.py
47
50
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,356
__init__
def __init__( self, log_model: bool = False, log_feature_importance: bool = True, importance_type: str = "gain", define_metric: bool = True, ): self.log_model: bool = log_model self.log_feature_importance: bool = log_feature_importance self.importance_type: str = importance_type self.define_metric: bool = define_metric if wandb.run is None: raise wandb.Error("You must call wandb.init() before WandbCallback()") with wb_telemetry.context() as tel: tel.feature.xgboost_wandb_callback = True
python
wandb/integration/xgboost/xgboost.py
95
112
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,357
before_training
def before_training(self, model: Booster) -> Booster: """Run before training is finished.""" # Update W&B config config = model.save_config() wandb.config.update(json.loads(config)) return model
python
wandb/integration/xgboost/xgboost.py
114
120
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,358
after_training
def after_training(self, model: Booster) -> Booster: """Run after training is finished.""" # Log the booster model as artifacts if self.log_model: self._log_model_as_artifact(model) # Plot feature importance if self.log_feature_importance: self._log_feature_importance(model) # Log the best score and best iteration if model.attr("best_score") is not None: wandb.log( { "best_score": float(cast(str, model.attr("best_score"))), "best_iteration": int(cast(str, model.attr("best_iteration"))), } ) return model
python
wandb/integration/xgboost/xgboost.py
122
141
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,359
after_iteration
def after_iteration(self, model: Booster, epoch: int, evals_log: dict) -> bool: """Run after each iteration. Return True when training should stop.""" # Log metrics for data, metric in evals_log.items(): for metric_name, log in metric.items(): if self.define_metric: self._define_metric(data, metric_name) wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False) else: wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False) wandb.log({"epoch": epoch}) self.define_metric = False return False
python
wandb/integration/xgboost/xgboost.py
143
158
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,360
_log_model_as_artifact
def _log_model_as_artifact(self, model: Booster) -> None: model_name = f"{wandb.run.id}_model.json" # type: ignore model_path = Path(wandb.run.dir) / model_name # type: ignore model.save_model(str(model_path)) model_artifact = wandb.Artifact(name=model_name, type="model") model_artifact.add_file(model_path) wandb.log_artifact(model_artifact)
python
wandb/integration/xgboost/xgboost.py
160
167
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,361
_log_feature_importance
def _log_feature_importance(self, model: Booster) -> None: fi = model.get_score(importance_type=self.importance_type) fi_data = [[k, fi[k]] for k in fi] table = wandb.Table(data=fi_data, columns=["Feature", "Importance"]) wandb.log( { "Feature Importance": wandb.plot.bar( table, "Feature", "Importance", title="Feature Importance" ) } )
python
wandb/integration/xgboost/xgboost.py
169
179
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,362
_define_metric
def _define_metric(self, data: str, metric_name: str) -> None: if "loss" in str.lower(metric_name): wandb.define_metric(f"{data}-{metric_name}", summary="min") elif str.lower(metric_name) in MINIMIZE_METRICS: wandb.define_metric(f"{data}-{metric_name}", summary="min") elif str.lower(metric_name) in MAXIMIZE_METRICS: wandb.define_metric(f"{data}-{metric_name}", summary="max") else: pass
python
wandb/integration/xgboost/xgboost.py
181
189
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,363
__init__
def __init__(self, **kwargs): self.run = wandb.init(**kwargs) self.resources = {}
python
wandb/integration/sacred/__init__.py
58
60
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,364
started_event
def started_event( self, ex_info, command, host_info, start_time, config, meta_info, _id ): # TODO: add the source code file # TODO: add dependencies and metadata. self.__update_config(config)
python
wandb/integration/sacred/__init__.py
62
67
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,365
completed_event
def completed_event(self, stop_time, result): if result: if not isinstance(result, tuple): result = ( result, ) # transform single result to tuple so that both single & multiple results use same code for i, r in enumerate(result): if isinstance(r, float) or isinstance(r, int): wandb.log({f"result_{i}": float(r)}) elif isinstance(r, dict): wandb.log(r) elif isinstance(r, object): artifact = wandb.Artifact(f"result_{i}.pkl", type="result") artifact.add_file(r) self.run.log_artifact(artifact) elif isinstance(r, numpy.ndarray): wandb.log({f"result_{i}": wandb.Image(r)}) else: warnings.warn( "logging results does not support type '{}' results. Ignoring this result".format( type(r), ), stacklevel=2, )
python
wandb/integration/sacred/__init__.py
69
93
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,366
artifact_event
def artifact_event(self, name, filename, metadata=None, content_type=None): if content_type is None: content_type = "file" artifact = wandb.Artifact(name, type=content_type) artifact.add_file(filename) self.run.log_artifact(artifact)
python
wandb/integration/sacred/__init__.py
95
100
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,367
resource_event
def resource_event(self, filename): """TODO: Maintain resources list.""" if filename not in self.resources: md5 = get_digest(filename) self.resources[filename] = md5
python
wandb/integration/sacred/__init__.py
102
106
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,368
log_metrics
def log_metrics(self, metrics_by_name, info): for metric_name, metric_ptr in metrics_by_name.items(): for _step, value in zip(metric_ptr["steps"], metric_ptr["values"]): if isinstance(value, numpy.ndarray): wandb.log({metric_name: wandb.Image(value)}) else: wandb.log({metric_name: value})
python
wandb/integration/sacred/__init__.py
108
114
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,369
__update_config
def __update_config(self, config): for k, v in config.items(): self.run.config[k] = v self.run.config["resources"] = []
python
wandb/integration/sacred/__init__.py
116
119
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,370
wandb_log
def wandb_log( # noqa: C901 func=None, # /, # py38 only log_component_file=True, ): """Wrap a standard python function and log to W&B.""" import json import os from functools import wraps from inspect import Parameter, signature from kfp import components from kfp.components import ( InputArtifact, InputBinaryFile, InputPath, InputTextFile, OutputArtifact, OutputBinaryFile, OutputPath, OutputTextFile, ) import wandb from wandb.sdk.lib import telemetry as wb_telemetry output_types = (OutputArtifact, OutputBinaryFile, OutputPath, OutputTextFile) input_types = (InputArtifact, InputBinaryFile, InputPath, InputTextFile) def isinstance_namedtuple(x): t = type(x) b = t.__bases__ if len(b) != 1 or b[0] != tuple: return False f = getattr(t, "_fields", None) if not isinstance(f, tuple): return False return all(type(n) == str for n in f) def get_iframe_html(run): return f'<iframe src="{run.url}?kfp=true" style="border:none;width:100%;height:100%;min-width:900px;min-height:600px;"></iframe>' def get_link_back_to_kubeflow(): wandb_kubeflow_url = os.getenv("WANDB_KUBEFLOW_URL") return f"{wandb_kubeflow_url}/#/runs/details/{{workflow.uid}}" def log_input_scalar(name, data, run=None): run.config[name] = data wandb.termlog(f"Setting config: {name} to {data}") def log_input_artifact(name, data, type, run=None): artifact = wandb.Artifact(name, type=type) artifact.add_file(data) run.use_artifact(artifact) wandb.termlog(f"Using artifact: {name}") def log_output_scalar(name, data, run=None): if isinstance_namedtuple(data): for k, v in zip(data._fields, data): run.log({f"{func.__name__}.{k}": v}) else: run.log({name: data}) def log_output_artifact(name, data, type, run=None): artifact = wandb.Artifact(name, type=type) artifact.add_file(data) run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name}") def _log_component_file(func, run=None): name = func.__name__ output_component_file = f"{name}.yml" components._python_op.func_to_component_file(func, output_component_file) artifact = wandb.Artifact(name, type="kubeflow_component_file") artifact.add_file(output_component_file) run.log_artifact(artifact) wandb.termlog(f"Logging component file: {output_component_file}") # Add `mlpipeline_ui_metadata_path` to signature to show W&B run in "ML Visualizations tab" sig = signature(func) no_default = [] has_default = [] for param in sig.parameters.values(): if param.default is param.empty: no_default.append(param) else: has_default.append(param) new_params = tuple( ( *no_default, Parameter( "mlpipeline_ui_metadata_path", annotation=OutputPath(), kind=Parameter.POSITIONAL_OR_KEYWORD, ), *has_default, ) ) new_sig = sig.replace(parameters=new_params) new_anns = {param.name: param.annotation for param in new_params} if "return" in func.__annotations__: new_anns["return"] = func.__annotations__["return"] def decorator(func): input_scalars = {} input_artifacts = {} output_scalars = {} output_artifacts = {} for name, ann in func.__annotations__.items(): if name == "return": output_scalars[name] = ann elif isinstance(ann, output_types): output_artifacts[name] = ann elif isinstance(ann, input_types): input_artifacts[name] = ann else: input_scalars[name] = ann @wraps(func) def wrapper(*args, **kwargs): bound = new_sig.bind(*args, **kwargs) bound.apply_defaults() mlpipeline_ui_metadata_path = bound.arguments["mlpipeline_ui_metadata_path"] del bound.arguments["mlpipeline_ui_metadata_path"] with wandb.init( job_type=func.__name__, group="{{workflow.annotations.pipelines.kubeflow.org/run_name}}", ) as run: # Link back to the kfp UI kubeflow_url = get_link_back_to_kubeflow() run.notes = kubeflow_url run.config["LINK_TO_KUBEFLOW_RUN"] = kubeflow_url iframe_html = get_iframe_html(run) metadata = { "outputs": [ { "type": "markdown", "storage": "inline", "source": iframe_html, } ] } with open(mlpipeline_ui_metadata_path, "w") as metadata_file: json.dump(metadata, metadata_file) if log_component_file: _log_component_file(func, run=run) for name, _ in input_scalars.items(): log_input_scalar(name, kwargs[name], run) for name, ann in input_artifacts.items(): log_input_artifact(name, kwargs[name], ann.type, run) with wb_telemetry.context(run=run) as tel: tel.feature.kfp_wandb_log = True result = func(*bound.args, **bound.kwargs) for name, _ in output_scalars.items(): log_output_scalar(name, result, run) for name, ann in output_artifacts.items(): log_output_artifact(name, kwargs[name], ann.type, run) return result wrapper.__signature__ = new_sig wrapper.__annotations__ = new_anns return wrapper if func is None: return decorator else: return decorator(func)
python
wandb/integration/kfp/wandb_logging.py
1
182
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,371
isinstance_namedtuple
def isinstance_namedtuple(x): t = type(x) b = t.__bases__ if len(b) != 1 or b[0] != tuple: return False f = getattr(t, "_fields", None) if not isinstance(f, tuple): return False return all(type(n) == str for n in f)
python
wandb/integration/kfp/wandb_logging.py
30
38
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,372
get_iframe_html
def get_iframe_html(run): return f'<iframe src="{run.url}?kfp=true" style="border:none;width:100%;height:100%;min-width:900px;min-height:600px;"></iframe>'
python
wandb/integration/kfp/wandb_logging.py
40
41
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,373
get_link_back_to_kubeflow
def get_link_back_to_kubeflow(): wandb_kubeflow_url = os.getenv("WANDB_KUBEFLOW_URL") return f"{wandb_kubeflow_url}/#/runs/details/{{workflow.uid}}"
python
wandb/integration/kfp/wandb_logging.py
43
45
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,374
log_input_scalar
def log_input_scalar(name, data, run=None): run.config[name] = data wandb.termlog(f"Setting config: {name} to {data}")
python
wandb/integration/kfp/wandb_logging.py
47
49
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,375
log_input_artifact
def log_input_artifact(name, data, type, run=None): artifact = wandb.Artifact(name, type=type) artifact.add_file(data) run.use_artifact(artifact) wandb.termlog(f"Using artifact: {name}")
python
wandb/integration/kfp/wandb_logging.py
51
55
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,376
log_output_scalar
def log_output_scalar(name, data, run=None): if isinstance_namedtuple(data): for k, v in zip(data._fields, data): run.log({f"{func.__name__}.{k}": v}) else: run.log({name: data})
python
wandb/integration/kfp/wandb_logging.py
57
62
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,377
log_output_artifact
def log_output_artifact(name, data, type, run=None): artifact = wandb.Artifact(name, type=type) artifact.add_file(data) run.log_artifact(artifact) wandb.termlog(f"Logging artifact: {name}")
python
wandb/integration/kfp/wandb_logging.py
64
68
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,378
_log_component_file
def _log_component_file(func, run=None): name = func.__name__ output_component_file = f"{name}.yml" components._python_op.func_to_component_file(func, output_component_file) artifact = wandb.Artifact(name, type="kubeflow_component_file") artifact.add_file(output_component_file) run.log_artifact(artifact) wandb.termlog(f"Logging component file: {output_component_file}")
python
wandb/integration/kfp/wandb_logging.py
70
77
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,379
decorator
def decorator(func): input_scalars = {} input_artifacts = {} output_scalars = {} output_artifacts = {} for name, ann in func.__annotations__.items(): if name == "return": output_scalars[name] = ann elif isinstance(ann, output_types): output_artifacts[name] = ann elif isinstance(ann, input_types): input_artifacts[name] = ann else: input_scalars[name] = ann @wraps(func) def wrapper(*args, **kwargs): bound = new_sig.bind(*args, **kwargs) bound.apply_defaults() mlpipeline_ui_metadata_path = bound.arguments["mlpipeline_ui_metadata_path"] del bound.arguments["mlpipeline_ui_metadata_path"] with wandb.init( job_type=func.__name__, group="{{workflow.annotations.pipelines.kubeflow.org/run_name}}", ) as run: # Link back to the kfp UI kubeflow_url = get_link_back_to_kubeflow() run.notes = kubeflow_url run.config["LINK_TO_KUBEFLOW_RUN"] = kubeflow_url iframe_html = get_iframe_html(run) metadata = { "outputs": [ { "type": "markdown", "storage": "inline", "source": iframe_html, } ] } with open(mlpipeline_ui_metadata_path, "w") as metadata_file: json.dump(metadata, metadata_file) if log_component_file: _log_component_file(func, run=run) for name, _ in input_scalars.items(): log_input_scalar(name, kwargs[name], run) for name, ann in input_artifacts.items(): log_input_artifact(name, kwargs[name], ann.type, run) with wb_telemetry.context(run=run) as tel: tel.feature.kfp_wandb_log = True result = func(*bound.args, **bound.kwargs) for name, _ in output_scalars.items(): log_output_scalar(name, result, run) for name, ann in output_artifacts.items(): log_output_artifact(name, kwargs[name], ann.type, run) return result wrapper.__signature__ = new_sig wrapper.__annotations__ = new_anns return wrapper
python
wandb/integration/kfp/wandb_logging.py
106
177
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,380
wrapper
def wrapper(*args, **kwargs): bound = new_sig.bind(*args, **kwargs) bound.apply_defaults() mlpipeline_ui_metadata_path = bound.arguments["mlpipeline_ui_metadata_path"] del bound.arguments["mlpipeline_ui_metadata_path"] with wandb.init( job_type=func.__name__, group="{{workflow.annotations.pipelines.kubeflow.org/run_name}}", ) as run: # Link back to the kfp UI kubeflow_url = get_link_back_to_kubeflow() run.notes = kubeflow_url run.config["LINK_TO_KUBEFLOW_RUN"] = kubeflow_url iframe_html = get_iframe_html(run) metadata = { "outputs": [ { "type": "markdown", "storage": "inline", "source": iframe_html, } ] } with open(mlpipeline_ui_metadata_path, "w") as metadata_file: json.dump(metadata, metadata_file) if log_component_file: _log_component_file(func, run=run) for name, _ in input_scalars.items(): log_input_scalar(name, kwargs[name], run) for name, ann in input_artifacts.items(): log_input_artifact(name, kwargs[name], ann.type, run) with wb_telemetry.context(run=run) as tel: tel.feature.kfp_wandb_log = True result = func(*bound.args, **bound.kwargs) for name, _ in output_scalars.items(): log_output_scalar(name, result, run) for name, ann in output_artifacts.items(): log_output_artifact(name, kwargs[name], ann.type, run) return result
python
wandb/integration/kfp/wandb_logging.py
123
173
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,381
full_path_exists
def full_path_exists(full_func): def get_parent_child_pairs(full_func): components = full_func.split(".") parents, children = [], [] for i, _ in enumerate(components[:-1], 1): parent = ".".join(components[:i]) child = components[i] parents.append(parent) children.append(child) return zip(parents, children) for parent, child in get_parent_child_pairs(full_func): module = wandb.util.get_module(parent) if not module or not hasattr(module, child) or getattr(module, child) is None: return False return True
python
wandb/integration/kfp/kfp_patch.py
46
61
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,382
get_parent_child_pairs
def get_parent_child_pairs(full_func): components = full_func.split(".") parents, children = [], [] for i, _ in enumerate(components[:-1], 1): parent = ".".join(components[:i]) child = components[i] parents.append(parent) children.append(child) return zip(parents, children)
python
wandb/integration/kfp/kfp_patch.py
47
55
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,383
patch
def patch(module_name, func): module = wandb.util.get_module(module_name) success = False full_func = f"{module_name}.{func.__name__}" if not full_path_exists(full_func): wandb.termerror( f"Failed to patch {module_name}.{func.__name__}! Please check if this package/module is installed!" ) else: wandb.patched.setdefault(module.__name__, []) # if already patched, do not patch again if [module, func.__name__] not in wandb.patched[module.__name__]: setattr(module, f"orig_{func.__name__}", getattr(module, func.__name__)) setattr(module, func.__name__, func) wandb.patched[module.__name__].append([module, func.__name__]) success = True return success
python
wandb/integration/kfp/kfp_patch.py
64
82
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,384
unpatch
def unpatch(module_name): if module_name in wandb.patched: for module, func in wandb.patched[module_name]: setattr(module, func, getattr(module, f"orig_{func}")) wandb.patched[module_name] = []
python
wandb/integration/kfp/kfp_patch.py
85
89
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,385
unpatch_kfp
def unpatch_kfp(): unpatch("kfp.components") unpatch("kfp.components._python_op") unpatch("wandb.integration.kfp")
python
wandb/integration/kfp/kfp_patch.py
92
95
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,386
patch_kfp
def patch_kfp(): to_patch = [ ( "kfp.components", create_component_from_func, ), ( "kfp.components._python_op", create_component_from_func, ), ( "kfp.components._python_op", _get_function_source_definition, ), ("kfp.components._python_op", strip_type_hints), ] successes = [] for module_name, func in to_patch: success = patch(module_name, func) successes.append(success) if not all(successes): wandb.termerror( "Failed to patch one or more kfp functions. Patching @wandb_log decorator to no-op." ) patch("wandb.integration.kfp", wandb_log)
python
wandb/integration/kfp/kfp_patch.py
98
123
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,387
wandb_log
def wandb_log( func=None, # /, # py38 only log_component_file=True, ): """Wrap a standard python function and log to W&B. NOTE: Because patching failed, this decorator is a no-op. """ from functools import wraps def decorator(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper if func is None: return decorator else: return decorator(func)
python
wandb/integration/kfp/kfp_patch.py
126
147
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,388
decorator
def decorator(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
python
wandb/integration/kfp/kfp_patch.py
137
142
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,389
wrapper
def wrapper(*args, **kwargs): return func(*args, **kwargs)
python
wandb/integration/kfp/kfp_patch.py
139
140
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,390
_get_function_source_definition
def _get_function_source_definition(func: Callable) -> str: """Get the source code of a function. This function is modified from KFP. The original source is below: https://github.com/kubeflow/pipelines/blob/b6406b02f45cdb195c7b99e2f6d22bf85b12268b/sdk/python/kfp/components/_python_op.py#L300-L319. """ func_code = inspect.getsource(func) # Function might be defined in some indented scope (e.g. in another # function). We need to handle this and properly dedent the function source # code func_code = textwrap.dedent(func_code) func_code_lines = func_code.split("\n") # For wandb, allow decorators (so we can use the @wandb_log decorator) func_code_lines = itertools.dropwhile( lambda x: not (x.startswith("def") or x.startswith("@wandb_log")), func_code_lines, ) if not func_code_lines: raise ValueError( 'Failed to dedent and clean up the source of function "{}". ' "It is probably not properly indented.".format(func.__name__) ) return "\n".join(func_code_lines)
python
wandb/integration/kfp/kfp_patch.py
150
176
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,391
create_component_from_func
def create_component_from_func( func: Callable, output_component_file: Optional[str] = None, base_image: Optional[str] = None, packages_to_install: Optional[List[str]] = None, annotations: Optional[Mapping[str, str]] = None, ): '''Convert a Python function to a component and returns a task factory. The returned task factory accepts arguments and returns a task object. This function is modified from KFP. The original source is below: https://github.com/kubeflow/pipelines/blob/b6406b02f45cdb195c7b99e2f6d22bf85b12268b/sdk/python/kfp/components/_python_op.py#L998-L1110. Args: func: The python function to convert base_image: Optional. Specify a custom Docker container image to use in the component. For lightweight components, the image needs to have python 3.5+. Default is the python image corresponding to the current python environment. output_component_file: Optional. Write a component definition to a local file. The produced component file can be loaded back by calling :code:`load_component_from_file` or :code:`load_component_from_uri`. packages_to_install: Optional. List of [versioned] python packages to pip install before executing the user function. annotations: Optional. Allows adding arbitrary key-value data to the component specification. Returns: A factory function with a strongly-typed signature taken from the python function. Once called with the required arguments, the factory constructs a task instance that can run the original function in a container. Examples: The function name and docstring are used as component name and description. Argument and return annotations are used as component input/output types:: def add(a: float, b: float) -> float: """Return sum of two arguments""" return a + b # add_op is a task factory function that creates a task object when given arguments add_op = create_component_from_func( func=add, base_image='python:3.7', # Optional output_component_file='add.component.yaml', # Optional packages_to_install=['pandas==0.24'], # Optional ) # The component spec can be accessed through the .component_spec attribute: add_op.component_spec.save('add.component.yaml') # The component function can be called with arguments to create a task: add_task = add_op(1, 3) # The resulting task has output references, corresponding to the component outputs. # When the function only has a single anonymous return value, the output name is "Output": sum_output_ref = add_task.outputs['Output'] # These task output references can be passed to other component functions, constructing a computation graph: task2 = add_op(sum_output_ref, 5) :code:`create_component_from_func` function can also be used as decorator:: @create_component_from_func def add_op(a: float, b: float) -> float: """Return sum of two arguments""" return a + b To declare a function with multiple return values, use the :code:`NamedTuple` return annotation syntax:: from typing import NamedTuple def add_multiply_two_numbers(a: float, b: float) -> NamedTuple('Outputs', [('sum', float), ('product', float)]): """Return sum and product of two arguments""" return (a + b, a * b) add_multiply_op = create_component_from_func(add_multiply_two_numbers) # The component function can be called with arguments to create a task: add_multiply_task = add_multiply_op(1, 3) # The resulting task has output references, corresponding to the component outputs: sum_output_ref = add_multiply_task.outputs['sum'] # These task output references can be passed to other component functions, constructing a computation graph: task2 = add_multiply_op(sum_output_ref, 5) Bigger data should be read from files and written to files. Use the :py:class:`kfp.components.InputPath` parameter annotation to tell the system that the function wants to consume the corresponding input data as a file. The system will download the data, write it to a local file and then pass the **path** of that file to the function. Use the :py:class:`kfp.components.OutputPath` parameter annotation to tell the system that the function wants to produce the corresponding output data as a file. The system will prepare and pass the **path** of a file where the function should write the output data. After the function exits, the system will upload the data to the storage system so that it can be passed to downstream components. You can specify the type of the consumed/produced data by specifying the type argument to :py:class:`kfp.components.InputPath` and :py:class:`kfp.components.OutputPath`. The type can be a python type or an arbitrary type name string. :code:`OutputPath('CatBoostModel')` means that the function states that the data it has written to a file has type :code:`CatBoostModel`. :code:`InputPath('CatBoostModel')` means that the function states that it expect the data it reads from a file to have type 'CatBoostModel'. When the pipeline author connects inputs to outputs the system checks whether the types match. Every kind of data can be consumed as a file input. Conversely, bigger data should not be consumed by value as all value inputs pass through the command line. Example of a component function declaring file input and output:: def catboost_train_classifier( training_data_path: InputPath('CSV'), # Path to input data file of type "CSV" trained_model_path: OutputPath('CatBoostModel'), # Path to output data file of type "CatBoostModel" number_of_trees: int = 100, # Small output of type "Integer" ) -> NamedTuple('Outputs', [ ('Accuracy', float), # Small output of type "Float" ('Precision', float), # Small output of type "Float" ('JobUri', 'URI'), # Small output of type "URI" ]): """Train CatBoost classification model""" ... return (accuracy, precision, recall) ''' core_packages = ["wandb", "kfp"] if not packages_to_install: packages_to_install = core_packages else: packages_to_install += core_packages component_spec = _func_to_component_spec( func=func, extra_code=wandb_logging_extras, base_image=base_image, packages_to_install=packages_to_install, ) if annotations: component_spec.metadata = structures.MetadataSpec( annotations=annotations, ) if output_component_file: component_spec.save(output_component_file) return _create_task_factory_from_component_spec(component_spec)
python
wandb/integration/kfp/kfp_patch.py
179
303
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,392
strip_type_hints
def strip_type_hints(source_code: str) -> str: """Strip type hints from source code. This function is modified from KFP. The original source is below: https://github.com/kubeflow/pipelines/blob/b6406b02f45cdb195c7b99e2f6d22bf85b12268b/sdk/python/kfp/components/_python_op.py#L237-L248. """ # For wandb, do not strip type hints # try: # return _strip_type_hints_using_lib2to3(source_code) # except Exception as ex: # print('Error when stripping type annotations: ' + str(ex)) # try: # return _strip_type_hints_using_strip_hints(source_code) # except Exception as ex: # print('Error when stripping type annotations: ' + str(ex)) return source_code
python
wandb/integration/kfp/kfp_patch.py
306
324
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,393
add_wandb_visualization
def add_wandb_visualization(run, mlpipeline_ui_metadata_path): """NOTE: To use this, you must modify your component to have an output called `mlpipeline_ui_metadata_path` AND call `wandb.init` yourself inside that component. Example usage: def my_component(..., mlpipeline_ui_metadata_path: OutputPath()): import wandb from wandb.integration.kfp.helpers import add_wandb_visualization with wandb.init() as run: add_wandb_visualization(run, mlpipeline_ui_metadata_path) ... # the rest of your code here """ def get_iframe_html(run): return f'<iframe src="{run.url}?kfp=true" style="border:none;width:100%;height:100%;min-width:900px;min-height:600px;"></iframe>' iframe_html = get_iframe_html(run) metadata = { "outputs": [{"type": "markdown", "storage": "inline", "source": iframe_html}] } with open(mlpipeline_ui_metadata_path, "w") as metadata_file: json.dump(metadata, metadata_file)
python
wandb/integration/kfp/helpers.py
4
28
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,394
get_iframe_html
def get_iframe_html(run): return f'<iframe src="{run.url}?kfp=true" style="border:none;width:100%;height:100%;min-width:900px;min-height:600px;"></iframe>'
python
wandb/integration/kfp/helpers.py
19
20
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,395
__init__
def __init__( self, learn: "fastai.basic_train.Learner", log: Optional[Literal["gradients", "parameters", "all"]] = "gradients", save_model: bool = True, monitor: Optional[str] = None, mode: Literal["auto", "min", "max"] = "auto", input_type: Optional[Literal["images"]] = None, validation_data: Optional[list] = None, predictions: int = 36, seed: int = 12345, ) -> None: # Check if wandb.init has been called if wandb.run is None: raise ValueError("You must call wandb.init() before WandbCallback()") # Adapted from fast.ai "SaveModelCallback" if monitor is None: # use default TrackerCallback monitor value super().__init__(learn, mode=mode) else: super().__init__(learn, monitor=monitor, mode=mode) self.save_model = save_model self.model_path = Path(wandb.run.dir) / "bestmodel.pth" self.log = log self.input_type = input_type self.best = None # Select items for sample predictions to see evolution along training self.validation_data = validation_data if input_type and not self.validation_data: wandb_random = random.Random(seed) # For repeatability predictions = min(predictions, len(learn.data.valid_ds)) indices = wandb_random.sample(range(len(learn.data.valid_ds)), predictions) self.validation_data = [learn.data.valid_ds[i] for i in indices]
python
wandb/integration/fastai/__init__.py
83
118
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,396
on_train_begin
def on_train_begin(self, **kwargs: Any) -> None: """Call watch method to log model topology, gradients & weights.""" # Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback" super().on_train_begin() # Ensure we don't call "watch" multiple times if not WandbCallback._watch_called: WandbCallback._watch_called = True # Logs model topology and optionally gradients and weights wandb.watch(self.learn.model, log=self.log)
python
wandb/integration/fastai/__init__.py
120
130
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,397
on_epoch_end
def on_epoch_end( self, epoch: int, smooth_loss: float, last_metrics: list, **kwargs: Any ) -> None: """Log training loss, validation loss and custom metrics & log prediction samples & save model.""" if self.save_model: # Adapted from fast.ai "SaveModelCallback" current = self.get_monitor_value() if current is not None and self.operator(current, self.best): print( "Better model found at epoch {} with {} value: {}.".format( epoch, self.monitor, current ) ) self.best = current # Save within wandb folder with self.model_path.open("wb") as model_file: self.learn.save(model_file) # Log sample predictions if learn.predict is available if self.validation_data: try: self._wandb_log_predictions() except FastaiError as e: wandb.termwarn(e.message) self.validation_data = None # prevent from trying again on next loop except Exception as e: wandb.termwarn(f"Unable to log prediction samples.\n{e}") self.validation_data = None # prevent from trying again on next loop # Log losses & metrics # Adapted from fast.ai "CSVLogger" logs = { name: stat for name, stat in list( zip(self.learn.recorder.names, [epoch, smooth_loss] + last_metrics) ) } wandb.log(logs)
python
wandb/integration/fastai/__init__.py
132
170
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,398
on_train_end
def on_train_end(self, **kwargs: Any) -> None: """Load the best model.""" if self.save_model: # Adapted from fast.ai "SaveModelCallback" if self.model_path.is_file(): with self.model_path.open("rb") as model_file: self.learn.load(model_file, purge=False) print(f"Loaded best saved model from {self.model_path}")
python
wandb/integration/fastai/__init__.py
172
179
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,399
_wandb_log_predictions
def _wandb_log_predictions(self) -> None: """Log prediction samples.""" pred_log = [] if self.validation_data is None: return for x, y in self.validation_data: try: pred = self.learn.predict(x) except Exception: raise FastaiError( 'Unable to run "predict" method from Learner to log prediction samples.' ) # scalar -> likely to be a category # tensor of dim 1 -> likely to be multicategory if not pred[1].shape or pred[1].dim() == 1: pred_log.append( wandb.Image( x.data, caption=f"Ground Truth: {y}\nPrediction: {pred[0]}", ) ) # most vision datasets have a "show" function we can use elif hasattr(x, "show"): # log input data pred_log.append(wandb.Image(x.data, caption="Input data", grouping=3)) # log label and prediction for im, capt in ((pred[0], "Prediction"), (y, "Ground Truth")): # Resize plot to image resolution # from https://stackoverflow.com/a/13714915 my_dpi = 100 fig = plt.figure(frameon=False, dpi=my_dpi) h, w = x.size fig.set_size_inches(w / my_dpi, h / my_dpi) ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() fig.add_axes(ax) # Superpose label or prediction to input image x.show(ax=ax, y=im) pred_log.append(wandb.Image(fig, caption=capt)) plt.close(fig) # likely to be an image elif hasattr(y, "shape") and ( (len(y.shape) == 2) or (len(y.shape) == 3 and y.shape[0] in [1, 3, 4]) ): pred_log.extend( [ wandb.Image(x.data, caption="Input data", grouping=3), wandb.Image(pred[0].data, caption="Prediction"), wandb.Image(y.data, caption="Ground Truth"), ] ) # we just log input data else: pred_log.append(wandb.Image(x.data, caption="Input data")) wandb.log({"Prediction Samples": pred_log}, commit=False)
python
wandb/integration/fastai/__init__.py
181
244
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,400
__init__
def __init__(self, summary_op=None, steps_per_log=1000, history=None): self._summary_op = summary_op self._steps_per_log = steps_per_log self._history = history with telemetry.context() as tel: tel.feature.estimator_hook = True
python
wandb/integration/tensorflow/estimator_hook.py
27
33
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }