id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
166,112
import functools from typing import Callable, Dict, List import absl from flax import linen as nn from flax.metrics import tensorboard import jax from jax import numpy as jnp from jax.experimental import jax2tf import numpy as np import optax import tensorflow as tf import tensorflow_transform as tft from tfx import v1 as tfx from tfx.examples.penguin import penguin_utils_base as base def _make_trained_model(train_data: tf.data.Dataset, eval_data: tf.data.Dataset, num_epochs: int, steps_per_epoch: int, eval_steps_per_epoch: int, tensorboard_log_dir: str): """Execute model training and evaluation loop. Args: train_data: a dataset with training pairs (_InputBatch, _LabelBatch). eval_data: a dataset with evaluation pairs (_InputBatch, _LabelBatch). num_epochs: number of training epochs. steps_per_epoch: number of steps for a training epoch. Should be the number of samples in your train_data divided by the batch size. eval_steps_per_epoch: number of steps for evaluation at the end of each training epoch. Should be the number of samples in your eval_data divided by the batch size. tensorboard_log_dir: Directory where the tensorboard summaries are written. Returns: An instance of tf.Model. """ learning_rate = 1e-2 rng = jax.random.PRNGKey(0) summary_writer = tensorboard.SummaryWriter(tensorboard_log_dir) summary_writer.hparams( dict( learning_rate=learning_rate, num_epochs=num_epochs, steps_per_epoch=steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch)) rng, init_rng = jax.random.split(rng) # Initialize with some fake data of the proper shape. init_val = dict((feature, jnp.array([[1.]], dtype=jnp.float32)) for feature in _FEATURE_KEYS_XF) model = _FlaxPenguinModel() params = model.init(init_rng, init_val)['params'] tx = optax.adam(learning_rate=learning_rate) opt_state = tx.init(params) for epoch in range(1, num_epochs + 1): params, opt_state, train_metrics = _train_epoch(model, tx, params, opt_state, train_data, steps_per_epoch) absl.logging.info('Flax train epoch: %d, loss: %.4f, accuracy: %.2f', epoch, train_metrics['loss'], train_metrics['accuracy'] * 100) eval_metrics = _eval_epoch(model, params, eval_data, eval_steps_per_epoch) absl.logging.info('Flax eval epoch: %d, loss: %.4f, accuracy: %.2f', epoch, eval_metrics['loss'], eval_metrics['accuracy'] * 100) summary_writer.scalar('epoch_train_loss', train_metrics['loss'], epoch) summary_writer.scalar('epoch_train_accuracy', train_metrics['accuracy'], epoch) summary_writer.scalar('epoch_eval_loss', eval_metrics['loss'], epoch) summary_writer.scalar('epoch_eval_accuracy', eval_metrics['accuracy'], epoch) summary_writer.flush() # The prediction function for the trained model def predict(params: _Params, inputs: _InputBatch): return model.apply({'params': params}, inputs) trained_params = params # Convert the prediction function to TF, with a variable batch dimension # for all inputs. tf_fn = jax2tf.convert(predict, with_gradient=False, enable_xla=True, polymorphic_shapes=(None, '(b, 1)')) # Create tf.Variables for the parameters. If you want more useful variable # names, you can use `tree.map_structure_with_path` from the `dm-tree` # package. param_vars = tf.nest.map_structure( # Due to a bug in SavedModel it is not possible to use tf.GradientTape # on a function converted with jax2tf and loaded from SavedModel. # Thus, we mark the variables as non-trainable to ensure that users of # the SavedModel will not try to fine tune them. lambda param: tf.Variable(param, trainable=False), trained_params) tf_graph = tf.function( lambda inputs: tf_fn(param_vars, inputs), autograph=False, experimental_compile=True) return _SavedModelWrapper(tf_graph, param_vars) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = base.input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, base.TRAIN_BATCH_SIZE) eval_dataset = base.input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, base.EVAL_BATCH_SIZE) model = _make_trained_model( train_dataset, eval_dataset, num_epochs=1, steps_per_epoch=fn_args.train_steps, eval_steps_per_epoch=fn_args.eval_steps, tensorboard_log_dir=fn_args.model_run_dir) signatures = base.make_serving_signatures(model, tf_transform_output) tf.saved_model.save(model, fn_args.serving_model_dir, signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,113
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx import v1 as tfx The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, accuracy_threshold: float, serving_model_dir: str, metadata_path: str, user_provided_schema_path: str, beam_pipeline_args: List[str], infra_validator_host_ip_address: str, make_warmup: bool, ) -> tfx.dsl.Pipeline` to solve the following problem: Implements the penguin pipeline with TFX. Here is the function: def _create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, accuracy_threshold: float, serving_model_dir: str, metadata_path: str, user_provided_schema_path: str, beam_pipeline_args: List[str], infra_validator_host_ip_address: str, make_warmup: bool, ) -> tfx.dsl.Pipeline: """Implements the penguin pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'labelled')) # Computes statistics over data for visualization and example validation. statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) # Import user-provided schema. schema_gen = tfx.components.ImportSchemaGen( schema_file=user_provided_schema_path) # Performs anomaly detection based on statistics and data schema. example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = tfx.components.Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that trains a model. trainer = tfx.components.Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=tfx.proto.TrainArgs(num_steps=2000), eval_args=tfx.proto.EvalArgs(num_steps=5)) # Get the latest blessed model for model validation. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name='serving_default', label_key='species_xf', preprocessing_function_names=['transform_features']) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': accuracy_threshold}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Performs infra validation of a candidate model to prevent unservable model # from being pushed. This config will launch a model server of the latest # TensorFlow Serving image in a local docker engine. infra_validator = tfx.components.InfraValidator( model=trainer.outputs['model'], examples=example_gen.outputs['examples'], serving_spec=tfx.proto.ServingSpec( # TODO(b/244254788): Roll back to the 'latest' tag. tensorflow_serving=tfx.proto.TensorFlowServing(tags=['2.8.2']), local_docker=tfx.proto.LocalDockerConfig( host_ip_address=infra_validator_host_ip_address ), ), request_spec=tfx.proto.RequestSpec( tensorflow_serving=tfx.proto.TensorFlowServingRequestSpec(), # If this flag is set, InfraValidator will produce a model with # warmup requests (in its outputs['blessing']). make_warmup=make_warmup, ), ) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. if make_warmup: # If InfraValidator.request_spec.make_warmup = True, its output contains # a model so that Pusher can push 'infra_blessing' input instead of # 'model' input. pusher = tfx.components.Pusher( model_blessing=evaluator.outputs['blessing'], infra_blessing=infra_validator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) else: # Otherwise, 'infra_blessing' does not contain a model and is used as a # conditional checker just like 'model_blessing' does. This is the typical # use case. pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], infra_blessing=infra_validator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, infra_validator, pusher, ], enable_cache=True, metadata_connection_config=tfx.orchestration.metadata .sqlite_metadata_connection_config(metadata_path), beam_pipeline_args=beam_pipeline_args)
Implements the penguin pipeline with TFX.
166,114
from typing import List import keras_tuner as kt import tensorflow as tf import tensorflow_decision_forests as tfdf import tensorflow_transform as tft from tfx import v1 as tfx from tfx.examples.penguin import penguin_utils_base as base from tfx_bsl.public import tfxio def _get_hyperparameters() -> kt.HyperParameters: """Creates a small hyperparameter search space for TF-DF. TF-DF offers multiple learning algorithms, and each one has its own hyperparameters. In this example, we consider the Random Forest and Gradient Boosted Trees models. Their hyperparameters are described at: https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/RandomForestModel https://github.com/google/yggdrasil-decision-forests/blob/main/documentation/learners.md See the user manual to get an idea of which hyperparameters are best suited for tuning: https://github.com/google/yggdrasil-decision-forests/blob/main/documentation/user_manual.md#manual-tuning-of-hyper-parameters Returns: Valid range and default value of few of the GBT hyperparameters. """ hp = kt.HyperParameters() # Select the decision forest learning algorithm. hp.Choice( _KEY_MODEL_TYPE, [ _KEY_RANDOM_FOREST, _KEY_GRADIENT_BOOSTED_TREES, ], default=_KEY_GRADIENT_BOOSTED_TREES) # Hyperparameter for the Random Forest hp.Int( 'max_depth', 10, 24, default=16, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_RANDOM_FOREST]) hp.Int( 'min_examples', 2, 20, default=6, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_RANDOM_FOREST]) # Hyperparameters for the Gradient Boosted Trees hp.Float( 'num_candidate_attributes_ratio', 0.5, 1.0, default=1.0, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_GRADIENT_BOOSTED_TREES]) hp.Boolean( 'use_hessian_gain', default=False, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_GRADIENT_BOOSTED_TREES]) hp.Choice( 'growing_strategy', ['LOCAL', 'BEST_FIRST_GLOBAL'], default='LOCAL', parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_GRADIENT_BOOSTED_TREES]) return hp def _make_keras_model(hparams: kt.HyperParameters) -> tf.keras.Model: """Creates a TF-DF Keras model. Args: hparams: Hyperparameters of the model. Returns: A Keras Model. """ # Note: The input features are not specified. Therefore, all the columns # specified in the Transform are used as input features, and their semantic # (e.g. numerical, categorical) is inferred automatically. common_args = { 'verbose': 2, 'task': tfdf.keras.Task.CLASSIFICATION, } if hparams.get(_KEY_MODEL_TYPE) == _KEY_RANDOM_FOREST: return tfdf.keras.RandomForestModel( max_depth=hparams.get('max_depth'), min_examples=hparams.get('min_examples'), **common_args) elif hparams.get(_KEY_MODEL_TYPE) == _KEY_GRADIENT_BOOSTED_TREES: return tfdf.keras.GradientBoostedTreesModel( num_candidate_attributes_ratio=hparams.get( 'num_candidate_attributes_ratio'), use_hessian_gain=hparams.get('use_hessian_gain'), growing_strategy=hparams.get('growing_strategy'), **common_args) else: raise ValueError('Unknown model type') def input_fn(file_pattern: List[str], data_accessor: tfx.components.DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int) -> tf.data.Dataset: """Creates a tf.Dataset for training or evaluation. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( # The batch size has not impact on the model quality. However, small # batch size might be slower. batch_size=batch_size, # TF-DF models should be trained on exactly one epoch. num_epochs=1, # Datasets should not be shuffled. shuffle=False, label_key=base.transformed_name(base._LABEL_KEY)), # pylint: disable=protected-access tf_transform_output.transformed_metadata.schema) The provided code snippet includes necessary dependencies for implementing the `tuner_fn` function. Write a Python function `def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult` to solve the following problem: Builds a Keras Tuner for the model. Args: fn_args: Holds args as name/value pairs. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. - schema_path: optional schema of the input data. - transform_graph_path: optional transform graph produced by TFT. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation. Here is the function: def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult: """Builds a Keras Tuner for the model. Args: fn_args: Holds args as name/value pairs. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. - schema_path: optional schema of the input data. - transform_graph_path: optional transform graph produced by TFT. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation. """ # RandomSearch is a subclass of kt.Tuner which inherits from # BaseTuner. tuner = kt.RandomSearch( _make_keras_model, max_trials=6, hyperparameters=_get_hyperparameters(), allow_new_entries=False, # The model is tuned on the loss computed on the TFX validation dataset. objective=kt.Objective('val_loss', 'min'), directory=fn_args.working_dir, project_name='penguin_tuning') transform_graph = tft.TFTransformOutput(fn_args.transform_graph_path) train_dataset = input_fn(fn_args.train_files, fn_args.data_accessor, transform_graph, base.TRAIN_BATCH_SIZE) eval_dataset = input_fn(fn_args.eval_files, fn_args.data_accessor, transform_graph, base.EVAL_BATCH_SIZE) return tfx.components.TunerFnResult( tuner=tuner, fit_kwargs={ 'x': train_dataset, 'validation_data': eval_dataset, })
Builds a Keras Tuner for the model. Args: fn_args: Holds args as name/value pairs. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. - schema_path: optional schema of the input data. - transform_graph_path: optional transform graph produced by TFT. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation.
166,115
from typing import List import keras_tuner as kt import tensorflow as tf import tensorflow_decision_forests as tfdf import tensorflow_transform as tft from tfx import v1 as tfx from tfx.examples.penguin import penguin_utils_base as base from tfx_bsl.public import tfxio def _get_hyperparameters() -> kt.HyperParameters: """Creates a small hyperparameter search space for TF-DF. TF-DF offers multiple learning algorithms, and each one has its own hyperparameters. In this example, we consider the Random Forest and Gradient Boosted Trees models. Their hyperparameters are described at: https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/RandomForestModel https://github.com/google/yggdrasil-decision-forests/blob/main/documentation/learners.md See the user manual to get an idea of which hyperparameters are best suited for tuning: https://github.com/google/yggdrasil-decision-forests/blob/main/documentation/user_manual.md#manual-tuning-of-hyper-parameters Returns: Valid range and default value of few of the GBT hyperparameters. """ hp = kt.HyperParameters() # Select the decision forest learning algorithm. hp.Choice( _KEY_MODEL_TYPE, [ _KEY_RANDOM_FOREST, _KEY_GRADIENT_BOOSTED_TREES, ], default=_KEY_GRADIENT_BOOSTED_TREES) # Hyperparameter for the Random Forest hp.Int( 'max_depth', 10, 24, default=16, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_RANDOM_FOREST]) hp.Int( 'min_examples', 2, 20, default=6, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_RANDOM_FOREST]) # Hyperparameters for the Gradient Boosted Trees hp.Float( 'num_candidate_attributes_ratio', 0.5, 1.0, default=1.0, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_GRADIENT_BOOSTED_TREES]) hp.Boolean( 'use_hessian_gain', default=False, parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_GRADIENT_BOOSTED_TREES]) hp.Choice( 'growing_strategy', ['LOCAL', 'BEST_FIRST_GLOBAL'], default='LOCAL', parent_name=_KEY_MODEL_TYPE, parent_values=[_KEY_GRADIENT_BOOSTED_TREES]) return hp def _make_keras_model(hparams: kt.HyperParameters) -> tf.keras.Model: """Creates a TF-DF Keras model. Args: hparams: Hyperparameters of the model. Returns: A Keras Model. """ # Note: The input features are not specified. Therefore, all the columns # specified in the Transform are used as input features, and their semantic # (e.g. numerical, categorical) is inferred automatically. common_args = { 'verbose': 2, 'task': tfdf.keras.Task.CLASSIFICATION, } if hparams.get(_KEY_MODEL_TYPE) == _KEY_RANDOM_FOREST: return tfdf.keras.RandomForestModel( max_depth=hparams.get('max_depth'), min_examples=hparams.get('min_examples'), **common_args) elif hparams.get(_KEY_MODEL_TYPE) == _KEY_GRADIENT_BOOSTED_TREES: return tfdf.keras.GradientBoostedTreesModel( num_candidate_attributes_ratio=hparams.get( 'num_candidate_attributes_ratio'), use_hessian_gain=hparams.get('use_hessian_gain'), growing_strategy=hparams.get('growing_strategy'), **common_args) else: raise ValueError('Unknown model type') def input_fn(file_pattern: List[str], data_accessor: tfx.components.DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int) -> tf.data.Dataset: """Creates a tf.Dataset for training or evaluation. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( # The batch size has not impact on the model quality. However, small # batch size might be slower. batch_size=batch_size, # TF-DF models should be trained on exactly one epoch. num_epochs=1, # Datasets should not be shuffled. shuffle=False, label_key=base.transformed_name(base._LABEL_KEY)), # pylint: disable=protected-access tf_transform_output.transformed_metadata.schema) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, base.TRAIN_BATCH_SIZE) eval_dataset = input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, base.EVAL_BATCH_SIZE) if fn_args.hyperparameters: hparams = kt.HyperParameters.from_config(fn_args.hyperparameters) else: # This is a shown case when hyperparameters is decided and Tuner is removed # from the pipeline. User can also inline the hyperparameters directly in # _make_keras_model. hparams = _get_hyperparameters() model = _make_keras_model(hparams) model.fit(train_dataset, validation_data=eval_dataset) print('Trained model:') model.summary() # Export the tensorboard logs. model.make_inspector().export_to_tensorboard(fn_args.model_run_dir) signatures = base.make_serving_signatures(model, tf_transform_output) model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,116
import datetime import multiprocessing import os import socket import sys from typing import List, Optional import absl from absl import flags import tensorflow_model_analysis as tfma from tfx import v1 as tfx from tfx.utils import proto_utils def RangeConfigGenerator(input_date: tfx.dsl.components.Parameter[str], range_config: tfx.dsl.components.OutputArtifact[ tfx.types.standard_artifacts.String]): """Implements the custom component to convert date into span number. Args: input_date: input date to generate range_config. range_config: range_config to ExampleGen. """ start_time = datetime.datetime(2022, 1, 1) # start time calculate span number from. datem = datetime.datetime.strptime(input_date, '%Y%m%d') span_number = (datetime.datetime(datem.year, datem.month, datem.day) - start_time).days range_config_str = proto_utils.proto_to_json( tfx.proto.RangeConfig( static_range=tfx.proto.StaticRange( start_span_number=span_number, end_span_number=span_number))) range_config.value = range_config_str The provided code snippet includes necessary dependencies for implementing the `create_pipeline` function. Write a Python function `def create_pipeline( # pylint: disable=invalid-name pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, accuracy_threshold: float, serving_model_dir: str, metadata_path: str, user_provided_schema_path: Optional[str], enable_tuning: bool, enable_bulk_inferrer: bool, enable_example_diff: bool, examplegen_input_config: Optional[tfx.proto.Input], examplegen_range_config_date: Optional[str], resolver_range_config: Optional[tfx.proto.RangeConfig], beam_pipeline_args: List[str], # TODO(b/191634100): Always enable transform cache. enable_transform_input_cache: bool) -> tfx.dsl.Pipeline` to solve the following problem: Implements the penguin pipeline with TFX. Args: pipeline_name: name of the TFX pipeline being created. pipeline_root: root directory of the pipeline. data_root: directory containing the penguin data. module_file: path to files used in Trainer and Transform components. accuracy_threshold: minimum accuracy to push the model. serving_model_dir: filepath to write pipeline SavedModel to. metadata_path: path to local pipeline ML Metadata store. user_provided_schema_path: path to user provided schema file. enable_tuning: If True, the hyperparameter tuning through KerasTuner is enabled. enable_bulk_inferrer: If True, the generated model will be used for a batch inference. enable_example_diff: If True, perform the feature skew detection. examplegen_input_config: ExampleGen's input_config. examplegen_range_config_date: date to generate the range_config to ExampleGen. resolver_range_config: SpansResolver's range_config. Specify this will enable SpansResolver to get a window of ExampleGen's output Spans for transform and training. beam_pipeline_args: list of beam pipeline options for LocalDAGRunner. Please refer to https://beam.apache.org/documentation/runners/direct/. enable_transform_input_cache: Indicates whether input cache should be used in Transform if available. Returns: A TFX pipeline object. Here is the function: def create_pipeline( # pylint: disable=invalid-name pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, accuracy_threshold: float, serving_model_dir: str, metadata_path: str, user_provided_schema_path: Optional[str], enable_tuning: bool, enable_bulk_inferrer: bool, enable_example_diff: bool, examplegen_input_config: Optional[tfx.proto.Input], examplegen_range_config_date: Optional[str], resolver_range_config: Optional[tfx.proto.RangeConfig], beam_pipeline_args: List[str], # TODO(b/191634100): Always enable transform cache. enable_transform_input_cache: bool) -> tfx.dsl.Pipeline: """Implements the penguin pipeline with TFX. Args: pipeline_name: name of the TFX pipeline being created. pipeline_root: root directory of the pipeline. data_root: directory containing the penguin data. module_file: path to files used in Trainer and Transform components. accuracy_threshold: minimum accuracy to push the model. serving_model_dir: filepath to write pipeline SavedModel to. metadata_path: path to local pipeline ML Metadata store. user_provided_schema_path: path to user provided schema file. enable_tuning: If True, the hyperparameter tuning through KerasTuner is enabled. enable_bulk_inferrer: If True, the generated model will be used for a batch inference. enable_example_diff: If True, perform the feature skew detection. examplegen_input_config: ExampleGen's input_config. examplegen_range_config_date: date to generate the range_config to ExampleGen. resolver_range_config: SpansResolver's range_config. Specify this will enable SpansResolver to get a window of ExampleGen's output Spans for transform and training. beam_pipeline_args: list of beam pipeline options for LocalDAGRunner. Please refer to https://beam.apache.org/documentation/runners/direct/. enable_transform_input_cache: Indicates whether input cache should be used in Transform if available. Returns: A TFX pipeline object. """ range_config = None if examplegen_range_config_date: input_config_generator = RangeConfigGenerator( # pylint: disable=no-value-for-parameter input_date=examplegen_range_config_date) range_config = input_config_generator.outputs['range_config'].future( )[0].value example_gen = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'labelled'), input_config=examplegen_input_config, range_config=range_config) # Computes statistics over data for visualization and example validation. statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) if user_provided_schema_path: # Import user-provided schema. schema_gen = tfx.components.ImportSchemaGen( schema_file=user_provided_schema_path) # Performs anomaly detection based on statistics and data schema. example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) else: # Generates schema based on statistics files. schema_gen = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Gets multiple Spans for transform and training. if resolver_range_config: examples_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.SpanRangeStrategy, config={ 'range_config': resolver_range_config }, examples=tfx.dsl.Channel( type=tfx.types.standard_artifacts.Examples, producer_component_id=example_gen.id)).with_id('span_resolver') examples_resolver.add_upstream_node(example_gen) # Performs transformations and feature engineering in training and serving. if enable_transform_input_cache: transform_cache_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestArtifactStrategy, cache=tfx.dsl.Channel(type=tfx.types.standard_artifacts.TransformCache) ).with_id('transform_cache_resolver') tft_resolved_cache = transform_cache_resolver.outputs['cache'] else: tft_resolved_cache = None transform = tfx.components.Transform( examples=(examples_resolver.outputs['examples'] if resolver_range_config else example_gen.outputs['examples']), schema=schema_gen.outputs['schema'], module_file=module_file, analyzer_cache=tft_resolved_cache) # Tunes the hyperparameters for model training based on user-provided Python # function. Note that once the hyperparameters are tuned, you can drop the # Tuner component from pipeline and feed Trainer with tuned hyperparameters. if enable_tuning: tuner = tfx.components.Tuner( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], train_args=tfx.proto.TrainArgs(num_steps=20), eval_args=tfx.proto.EvalArgs(num_steps=5)) # Uses user-provided Python function that trains a model. trainer = tfx.components.Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], # If Tuner is in the pipeline, Trainer can take Tuner's output # best_hyperparameters artifact as input and utilize it in the user module # code. # # If there isn't Tuner in the pipeline, either use Importer to import # a previous Tuner's output to feed to Trainer, or directly use the tuned # hyperparameters in user module code and set hyperparameters to None # here. # # Example of Importer, # hparams_importer = Importer( # source_uri='path/to/best_hyperparameters.txt', # artifact_type=HyperParameters).with_id('import_hparams') # ... # hyperparameters = hparams_importer.outputs['result'], hyperparameters=(tuner.outputs['best_hyperparameters'] if enable_tuning else None), train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=5)) # Get the latest blessed model for model validation. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name='serving_default', label_key='species_xf', preprocessing_function_names=['transform_features']) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': accuracy_threshold}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Components declared within the conditional block will only be triggered # if the Predicate evaluates to True. # # In the example below, # evaluator.outputs['blessing'].future()[0].custom_property('blessed') == 1 # is a Predicate, which will be evaluated during runtime. # # - evaluator.outputs['blessing'] is the output Channel 'blessing'. # - .future() turns the Channel into a Placeholder. # - [0] gets the first artifact from the 'blessing' Channel. # - .custom_property('blessed') gets a custom property called 'blessed' from # that artifact. # - == 1 compares that property with 1. (An explicit comparison is needed. # There's no automatic boolean conversion based on truthiness.) # # Note these operations are just placeholder, something like Mocks. They are # not evaluated until runtime. For more details, see tfx/dsl/placeholder/. with tfx.dsl.Cond( evaluator.outputs['blessing'].future()[0].custom_property('blessed') == 1 ): # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = tfx.components.Pusher( model=trainer.outputs['model'], # No need to pass model_blessing any more, since Pusher is already # guarded by a Conditional. # model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) # Showcase for BulkInferrer component. if enable_bulk_inferrer: # Generates unlabelled examples. example_gen_unlabelled = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'unlabelled')).with_id( 'CsvExampleGen_Unlabelled') # Performs offline batch inference. bulk_inferrer = tfx.components.BulkInferrer( examples=example_gen_unlabelled.outputs['examples'], model=trainer.outputs['model'], # Empty data_spec.example_splits will result in using all splits. data_spec=tfx.proto.DataSpec(), model_spec=tfx.proto.ModelSpec()) if enable_example_diff: skewed_data_example_gen = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'skewed')).with_id( 'CsvExampleGen_Skewed') example_diff_config = tfx.proto.ExampleDiffConfig( paired_example_skew=tfx.proto.PairedExampleSkew( skew_sample_size=2, identifier_features=['culmen_length_mm'])) include_split_pairs = [('train', 'train'), ('train', 'eval')] example_diff = tfx.components.ExampleDiff( examples_test=example_gen.outputs['examples'], examples_base=skewed_data_example_gen.outputs['examples'], config=example_diff_config, include_split_pairs=include_split_pairs ) components_list = [ example_gen, statistics_gen, schema_gen, transform, trainer, model_resolver, evaluator, pusher, ] if examplegen_range_config_date: components_list.append(input_config_generator) if resolver_range_config: components_list.append(examples_resolver) if enable_transform_input_cache: components_list.append(transform_cache_resolver) if enable_tuning: components_list.append(tuner) if enable_bulk_inferrer: components_list.extend((example_gen_unlabelled, bulk_inferrer)) if user_provided_schema_path: components_list.append(example_validator) if enable_example_diff: components_list.extend( (skewed_data_example_gen, example_diff)) return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components_list, enable_cache=True, metadata_connection_config=tfx.orchestration.metadata .sqlite_metadata_connection_config(metadata_path), beam_pipeline_args=beam_pipeline_args)
Implements the penguin pipeline with TFX. Args: pipeline_name: name of the TFX pipeline being created. pipeline_root: root directory of the pipeline. data_root: directory containing the penguin data. module_file: path to files used in Trainer and Transform components. accuracy_threshold: minimum accuracy to push the model. serving_model_dir: filepath to write pipeline SavedModel to. metadata_path: path to local pipeline ML Metadata store. user_provided_schema_path: path to user provided schema file. enable_tuning: If True, the hyperparameter tuning through KerasTuner is enabled. enable_bulk_inferrer: If True, the generated model will be used for a batch inference. enable_example_diff: If True, perform the feature skew detection. examplegen_input_config: ExampleGen's input_config. examplegen_range_config_date: date to generate the range_config to ExampleGen. resolver_range_config: SpansResolver's range_config. Specify this will enable SpansResolver to get a window of ExampleGen's output Spans for transform and training. beam_pipeline_args: list of beam pipeline options for LocalDAGRunner. Please refer to https://beam.apache.org/documentation/runners/direct/. enable_transform_input_cache: Indicates whether input cache should be used in Transform if available. Returns: A TFX pipeline object.
166,117
import os import sys from typing import Dict, List, Optional, Union from absl import flags from absl import logging import tensorflow_model_analysis as tfma from tfx import v1 as tfx _gcp_region = 'us-central1' _vertex_job_spec = { 'project': _project_id, 'worker_pool_specs': [{ 'machine_spec': { 'machine_type': _machine_type, }, 'replica_count': _replica_count, 'container_spec': { 'image_uri': 'gcr.io/tfx-oss-public/tfx:{}'.format(tfx.__version__), }, }], } _vertex_serving_spec = { 'project_id': _project_id, 'endpoint_name': _endpoint_name, 'machine_type': _machine_type, } The provided code snippet includes necessary dependencies for implementing the `create_pipeline` function. Write a Python function `def create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, ai_platform_training_args: Dict[str, str], ai_platform_serving_args: Dict[str, Union[List[str], str]], enable_tuning: bool, enable_cache: bool, user_provided_schema_path: str, beam_pipeline_args: List[str], use_cloud_component: bool, use_aip: bool, use_vertex: bool, serving_model_dir: Optional[str] = None) -> tfx.dsl.Pipeline` to solve the following problem: Implements the penguin pipeline with TFX and Kubeflow Pipeline. Args: pipeline_name: name of the TFX pipeline being created. pipeline_root: root directory of the pipeline. Should be a valid GCS path. data_root: uri of the penguin data. module_file: uri of the module file used in Trainer, Transform and Tuner. ai_platform_training_args: Args of CAIP training job. Please refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job for detailed description. ai_platform_serving_args: Args of CAIP model deployment. Please refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.models for detailed description. enable_tuning: If True, the hyperparameter tuning through CloudTuner is enabled. enable_cache: If True, enable caching of pipeline jobs for sequential runs. user_provided_schema_path: Path to the schema of the input data. beam_pipeline_args: List of Beam pipeline options. Please refer to https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options. use_cloud_component: whether to use tfx.extensions components, namely Tuner, Trainer, and Pusher. use_aip: whether to use AI platform config with Cloud components; implicitly refers to KFP1 orchestration. use_vertex: whether to use Vertex config with Cloud components; implicitly refers to KFP2 orchestration. serving_model_dir: file path to write pipeline saved model to. Returns: A TFX pipeline object. Here is the function: def create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, ai_platform_training_args: Dict[str, str], ai_platform_serving_args: Dict[str, Union[List[str], str]], enable_tuning: bool, enable_cache: bool, user_provided_schema_path: str, beam_pipeline_args: List[str], use_cloud_component: bool, use_aip: bool, use_vertex: bool, serving_model_dir: Optional[str] = None) -> tfx.dsl.Pipeline: """Implements the penguin pipeline with TFX and Kubeflow Pipeline. Args: pipeline_name: name of the TFX pipeline being created. pipeline_root: root directory of the pipeline. Should be a valid GCS path. data_root: uri of the penguin data. module_file: uri of the module file used in Trainer, Transform and Tuner. ai_platform_training_args: Args of CAIP training job. Please refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job for detailed description. ai_platform_serving_args: Args of CAIP model deployment. Please refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.models for detailed description. enable_tuning: If True, the hyperparameter tuning through CloudTuner is enabled. enable_cache: If True, enable caching of pipeline jobs for sequential runs. user_provided_schema_path: Path to the schema of the input data. beam_pipeline_args: List of Beam pipeline options. Please refer to https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options. use_cloud_component: whether to use tfx.extensions components, namely Tuner, Trainer, and Pusher. use_aip: whether to use AI platform config with Cloud components; implicitly refers to KFP1 orchestration. use_vertex: whether to use Vertex config with Cloud components; implicitly refers to KFP2 orchestration. serving_model_dir: file path to write pipeline saved model to. Returns: A TFX pipeline object. """ # Assert Cloud components exist with either AIP/Vertex configuration. if use_cloud_component: assert use_aip ^ use_vertex, ( 'Cloud component needs either AIP or Vertex configuration.') # TODO(b/248108131): Add an end-to-end test to make sure the runtime parameter # is really overriden. # Number of epochs in training. train_args = tfx.dsl.experimental.RuntimeParameter( name='train-args', default='{"num_steps" : 100}', ptype=str, ) # Number of epochs in evaluation. eval_args = tfx.dsl.experimental.RuntimeParameter( name='eval-args', default='{"num_steps": 50}', ptype=str, ) if use_vertex: train_args = tfx.proto.TrainArgs(num_steps=100) eval_args = tfx.proto.EvalArgs(num_steps=50) # Brings data into the pipeline or otherwise joins/converts training data. example_gen = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'labelled')) # Computes statistics over data for visualization and example validation. statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) # Import user-provided schema. schema_gen = tfx.components.ImportSchemaGen( schema_file=user_provided_schema_path) # Performs anomaly detection based on statistics and data schema. example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = tfx.components.Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Tunes the hyperparameters for model training based on user-provided Python # function. Note that once the hyperparameters are tuned, you can drop the # Tuner component from pipeline and feed Trainer with tuned hyperparameters. if enable_tuning: if use_cloud_component and use_aip: # The Tuner component launches 1 AIP Training job for flock management of # parallel tuning. For example, 2 workers (defined by num_parallel_trials) # in the flock management AIP Training job, each runs a search loop for # trials as shown below. # Tuner component -> CAIP job X -> CloudTunerA -> tuning trials # -> CloudTunerB -> tuning trials # # Distributed training for each trial depends on the Tuner # (kerastuner.BaseTuner) setup in tuner_fn. Currently CloudTuner is single # worker training per trial. DistributingCloudTuner (a subclass of # CloudTuner) launches remote distributed training job per trial. # # E.g., single worker training per trial # ... -> CloudTunerA -> single worker training # -> CloudTunerB -> single worker training # vs distributed training per trial # ... -> DistributingCloudTunerA -> CAIP job Y -> master,worker1,2,3 # -> DistributingCloudTunerB -> CAIP job Z -> master,worker1,2,3 tuner = tfx.extensions.google_cloud_ai_platform.Tuner( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=50), tune_args=tfx.proto.TuneArgs( # num_parallel_trials=3 means that 3 search loops are # running in parallel. num_parallel_trials=3), custom_config={ # Note that this TUNING_ARGS_KEY will be used to start the CAIP # job for parallel tuning (CAIP job X above). # # num_parallel_trials will be used to fill/overwrite the # workerCount specified by TUNING_ARGS_KEY: # num_parallel_trials = workerCount + 1 (for master) tfx.extensions.google_cloud_ai_platform.experimental .TUNING_ARGS_KEY: ai_platform_training_args, # This working directory has to be a valid GCS path and will be # used to launch remote training job per trial. tfx.extensions.google_cloud_ai_platform.experimental .REMOTE_TRIALS_WORKING_DIR_KEY: os.path.join(pipeline_root, 'trials'), }) elif use_cloud_component and use_vertex: tuner = tfx.extensions.google_cloud_ai_platform.Tuner( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=50), tune_args=tfx.proto.TuneArgs(num_parallel_trials=3), custom_config={ tfx.extensions.google_cloud_ai_platform.ENABLE_VERTEX_KEY: True, tfx.extensions.google_cloud_ai_platform.VERTEX_REGION_KEY: _gcp_region, tfx.extensions.google_cloud_ai_platform.experimental .TUNING_ARGS_KEY: _vertex_job_spec, tfx.extensions.google_cloud_ai_platform.experimental .REMOTE_TRIALS_WORKING_DIR_KEY: os.path.join(pipeline_root, 'trials'), }) else: tuner = tfx.components.Tuner( examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], module_file=module_file, train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=50), tune_args=tfx.proto.TuneArgs(num_parallel_trials=3)) if use_cloud_component and use_aip: # Uses user-provided Python function that trains a model. trainer = tfx.extensions.google_cloud_ai_platform.Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], # If Tuner is in the pipeline, Trainer can take Tuner's output # best_hyperparameters artifact as input and utilize it in the user # module code. # # If there isn't Tuner in the pipeline, either use Importer to # import a previous Tuner's output to feed to Trainer, or directly use # the tuned hyperparameters in user module code and set hyperparameters # to None here. # # Example of Importer, # hparams_importer = Importer( # source_uri='path/to/best_hyperparameters.txt', # artifact_type=HyperParameters).with_id('import_hparams') # ... # hyperparameters = hparams_importer.outputs['result'], hyperparameters=(tuner.outputs['best_hyperparameters'] if enable_tuning else None), train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=50), custom_config={ tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: ai_platform_training_args }) elif use_cloud_component and use_vertex: trainer = tfx.extensions.google_cloud_ai_platform.Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], hyperparameters=(tuner.outputs['best_hyperparameters'] if enable_tuning else None), train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=50), custom_config={ tfx.extensions.google_cloud_ai_platform.ENABLE_VERTEX_KEY: True, tfx.extensions.google_cloud_ai_platform.VERTEX_REGION_KEY: _gcp_region, tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: _vertex_job_spec }) else: trainer = tfx.components.Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], hyperparameters=(tuner.outputs['best_hyperparameters'] if enable_tuning else None), train_args=train_args, eval_args=eval_args ) # Get the latest blessed model for model validation. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name='serving_default', label_key='species_xf', preprocessing_function_names=['transform_features']) ], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.3}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) if use_cloud_component and use_aip: pusher = tfx.extensions.google_cloud_ai_platform.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], custom_config={ tfx.extensions.google_cloud_ai_platform.experimental .PUSHER_SERVING_ARGS_KEY: ai_platform_serving_args }) elif use_cloud_component and use_vertex: pusher = tfx.extensions.google_cloud_ai_platform.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], custom_config={ tfx.extensions.google_cloud_ai_platform.ENABLE_VERTEX_KEY: True, tfx.extensions.google_cloud_ai_platform.VERTEX_REGION_KEY: _gcp_region, tfx.extensions.google_cloud_ai_platform.SERVING_ARGS_KEY: _vertex_serving_spec, }) else: pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) components = [ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, pusher, ] if enable_tuning: components.append(tuner) return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=enable_cache, beam_pipeline_args=beam_pipeline_args)
Implements the penguin pipeline with TFX and Kubeflow Pipeline. Args: pipeline_name: name of the TFX pipeline being created. pipeline_root: root directory of the pipeline. Should be a valid GCS path. data_root: uri of the penguin data. module_file: uri of the module file used in Trainer, Transform and Tuner. ai_platform_training_args: Args of CAIP training job. Please refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job for detailed description. ai_platform_serving_args: Args of CAIP model deployment. Please refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.models for detailed description. enable_tuning: If True, the hyperparameter tuning through CloudTuner is enabled. enable_cache: If True, enable caching of pipeline jobs for sequential runs. user_provided_schema_path: Path to the schema of the input data. beam_pipeline_args: List of Beam pipeline options. Please refer to https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options. use_cloud_component: whether to use tfx.extensions components, namely Tuner, Trainer, and Pusher. use_aip: whether to use AI platform config with Cloud components; implicitly refers to KFP1 orchestration. use_vertex: whether to use Vertex config with Cloud components; implicitly refers to KFP2 orchestration. serving_model_dir: file path to write pipeline saved model to. Returns: A TFX pipeline object.
166,118
import absl import keras_tuner import tensorflow as tf from tensorflow import keras import tensorflow_transform as tft from tfx import v1 as tfx from tfx.examples.penguin import penguin_utils_base as base def _get_hyperparameters() -> keras_tuner.HyperParameters: """Returns hyperparameters for building Keras model.""" hp = keras_tuner.HyperParameters() # Defines search space. hp.Choice('learning_rate', [1e-2, 1e-3], default=1e-2) hp.Int('num_layers', 1, 3, default=2) return hp def _make_keras_model(hparams: keras_tuner.HyperParameters) -> tf.keras.Model: """Creates a DNN Keras model for classifying penguin data. Args: hparams: Holds HyperParameters for tuning. Returns: A Keras Model. """ # The model below is built with Functional API, please refer to # https://www.tensorflow.org/guide/keras/overview for all API options. inputs = [ keras.layers.Input(shape=(1,), name=base.transformed_name(f)) for f in base.FEATURE_KEYS ] d = keras.layers.concatenate(inputs) for _ in range(int(hparams.get('num_layers'))): d = keras.layers.Dense(8, activation='relu')(d) outputs = keras.layers.Dense(3)(d) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.Adam(hparams.get('learning_rate')), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()]) model.summary(print_fn=absl.logging.info) return model The provided code snippet includes necessary dependencies for implementing the `tuner_fn` function. Write a Python function `def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult` to solve the following problem: Build the tuner using the KerasTuner API. Args: fn_args: Holds args as name/value pairs. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. - schema_path: optional schema of the input data. - transform_graph_path: optional transform graph produced by TFT. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation. Here is the function: def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult: """Build the tuner using the KerasTuner API. Args: fn_args: Holds args as name/value pairs. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. - schema_path: optional schema of the input data. - transform_graph_path: optional transform graph produced by TFT. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation. """ # RandomSearch is a subclass of keras_tuner.Tuner which inherits from # BaseTuner. tuner = keras_tuner.RandomSearch( _make_keras_model, max_trials=6, hyperparameters=_get_hyperparameters(), allow_new_entries=False, objective=keras_tuner.Objective('val_sparse_categorical_accuracy', 'max'), directory=fn_args.working_dir, project_name='penguin_tuning') transform_graph = tft.TFTransformOutput(fn_args.transform_graph_path) train_dataset = base.input_fn( fn_args.train_files, fn_args.data_accessor, transform_graph, base.TRAIN_BATCH_SIZE) eval_dataset = base.input_fn( fn_args.eval_files, fn_args.data_accessor, transform_graph, base.EVAL_BATCH_SIZE) return tfx.components.TunerFnResult( tuner=tuner, fit_kwargs={ 'x': train_dataset, 'validation_data': eval_dataset, 'steps_per_epoch': fn_args.train_steps, 'validation_steps': fn_args.eval_steps })
Build the tuner using the KerasTuner API. Args: fn_args: Holds args as name/value pairs. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. - schema_path: optional schema of the input data. - transform_graph_path: optional transform graph produced by TFT. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation.
166,119
import absl import keras_tuner import tensorflow as tf from tensorflow import keras import tensorflow_transform as tft from tfx import v1 as tfx from tfx.examples.penguin import penguin_utils_base as base def _get_hyperparameters() -> keras_tuner.HyperParameters: """Returns hyperparameters for building Keras model.""" hp = keras_tuner.HyperParameters() # Defines search space. hp.Choice('learning_rate', [1e-2, 1e-3], default=1e-2) hp.Int('num_layers', 1, 3, default=2) return hp def _make_keras_model(hparams: keras_tuner.HyperParameters) -> tf.keras.Model: """Creates a DNN Keras model for classifying penguin data. Args: hparams: Holds HyperParameters for tuning. Returns: A Keras Model. """ # The model below is built with Functional API, please refer to # https://www.tensorflow.org/guide/keras/overview for all API options. inputs = [ keras.layers.Input(shape=(1,), name=base.transformed_name(f)) for f in base.FEATURE_KEYS ] d = keras.layers.concatenate(inputs) for _ in range(int(hparams.get('num_layers'))): d = keras.layers.Dense(8, activation='relu')(d) outputs = keras.layers.Dense(3)(d) model = keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=keras.optimizers.Adam(hparams.get('learning_rate')), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()]) model.summary(print_fn=absl.logging.info) return model The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = base.input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, base.TRAIN_BATCH_SIZE) eval_dataset = base.input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, base.EVAL_BATCH_SIZE) if fn_args.hyperparameters: hparams = keras_tuner.HyperParameters.from_config(fn_args.hyperparameters) else: # This is a shown case when hyperparameters is decided and Tuner is removed # from the pipeline. User can also inline the hyperparameters directly in # _build_keras_model. hparams = _get_hyperparameters() absl.logging.info('HyperParameters for training: %s' % hparams.get_config()) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = _make_keras_model(hparams) # Write logs to path tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='epoch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) signatures = base.make_serving_signatures(model, tf_transform_output) model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,120
import os from typing import Dict, List, Optional import absl import tensorflow_model_analysis as tfma from tfx import v1 as tfx The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, trainer_module_file: str, evaluator_module_file: str, ai_platform_training_args: Optional[Dict[str, str]], ai_platform_serving_args: Optional[Dict[str, str]], beam_pipeline_args: List[str], ) -> tfx.dsl.Pipeline` to solve the following problem: Implements the Penguin pipeline with TFX. Here is the function: def _create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, trainer_module_file: str, evaluator_module_file: str, ai_platform_training_args: Optional[Dict[str, str]], ai_platform_serving_args: Optional[Dict[str, str]], beam_pipeline_args: List[str], ) -> tfx.dsl.Pipeline: """Implements the Penguin pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'labelled')) # Computes statistics over data for visualization and example validation. statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # TODO(humichael): Handle applying transformation component in Milestone 3. # Uses user-provided Python function that trains a model. # Num_steps is not provided during evaluation because the scikit-learn model # loads and evaluates the entire test set at once. trainer = tfx.extensions.google_cloud_ai_platform.Trainer( module_file=trainer_module_file, examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], train_args=tfx.proto.TrainArgs(num_steps=2000), eval_args=tfx.proto.EvalArgs(), custom_config={ tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: ai_platform_training_args, }) # Get the latest blessed model for model validation. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='species')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='Accuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.6}), change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) evaluator = tfx.components.Evaluator( module_file=evaluator_module_file, examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) pusher = tfx.extensions.google_cloud_ai_platform.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], custom_config={ tfx.extensions.google_cloud_ai_platform.experimental .PUSHER_SERVING_ARGS_KEY: ai_platform_serving_args, }) return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, trainer, model_resolver, evaluator, pusher, ], enable_cache=True, beam_pipeline_args=beam_pipeline_args, )
Implements the Penguin pipeline with TFX.
166,121
import os import pickle from typing import Tuple import absl import numpy as np from sklearn.neural_network import MLPClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from tfx.components.trainer.fn_args_utils import DataAccessor from tfx.components.trainer.fn_args_utils import FnArgs from tfx.dsl.io import fileio from tfx.utils import io_utils from tfx_bsl.tfxio import dataset_options from tensorflow_metadata.proto.v0 import schema_pb2 _FEATURE_KEYS = [ 'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g' ] _LABEL_KEY = 'species' _TRAIN_DATA_SIZE = 228 _TRAIN_BATCH_SIZE = 20 def _input_fn( file_pattern: str, data_accessor: DataAccessor, schema: schema_pb2.Schema, batch_size: int = 20, ) -> Tuple[np.ndarray, np.ndarray]: """Generates features and label for tuning/training. Args: file_pattern: input tfrecord file pattern. data_accessor: DataAccessor for converting input to RecordBatch. schema: schema of the input data. batch_size: An int representing the number of records to combine in a single batch. Returns: A (features, indices) tuple where features is a matrix of features, and indices is a single vector of label indices. """ record_batch_iterator = data_accessor.record_batch_factory( file_pattern, dataset_options.RecordBatchesOptions(batch_size=batch_size, num_epochs=1), schema) feature_list = [] label_list = [] for record_batch in record_batch_iterator: record_dict = {} for column, field in zip(record_batch, record_batch.schema): record_dict[field.name] = column.flatten() label_list.append(record_dict[_LABEL_KEY]) features = [record_dict[key] for key in _FEATURE_KEYS] feature_list.append(np.stack(features, axis=-1)) return np.concatenate(feature_list), np.concatenate(label_list) class FnArgs: """Args to pass to user defined training/tuning function(s). Attributes: working_dir: Working dir. train_files: A list of patterns for train files. eval_files: A list of patterns for eval files. train_steps: Number of train steps. eval_steps: Number of eval steps. schema_path: A single uri for schema file. Will be None if not specified. schema_file: Deprecated, use `schema_path` instead. transform_graph_path: An optional single uri for transform graph produced by TFT. Will be None if not specified. transform_output: Deprecated, use `transform_graph_path` instead. data_accessor: Contains factories that can create tf.data.Datasets or other means to access the train/eval data. They provide a uniform way of accessing data, regardless of how the data is stored on disk. serving_model_dir: A single uri for the output directory of the serving model. eval_model_dir: A single uri for the output directory of the eval model. Note that this is estimator only, Keras doesn't require it for TFMA. model_run_dir: A single uri for the output directory of model training related files. base_model: An optional base model path that will be used for this training. hyperparameters: An optional keras_tuner.HyperParameters config. custom_config: An optional dictionary passed to the component. """ working_dir = attr.ib(type=str, default=None) train_files = attr.ib(type=List[str], default=None) eval_files = attr.ib(type=List[str], default=None) train_steps = attr.ib(type=int, default=None) eval_steps = attr.ib(type=int, default=None) schema_path = attr.ib(type=str, default=None) schema_file = attr.ib(type=str, default=None) transform_graph_path = attr.ib(type=str, default=None) transform_output = attr.ib(type=str, default=None) data_accessor = attr.ib(type=DataAccessor, default=None) serving_model_dir = attr.ib(type=str, default=None) eval_model_dir = attr.ib(type=str, default=None) model_run_dir = attr.ib(type=str, default=None) base_model = attr.ib(type=str, default=None) hyperparameters = attr.ib(type=Dict[str, Any], default=None) custom_config = attr.ib(type=Dict[str, Any], default=None) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema()) x_train, y_train = _input_fn(fn_args.train_files, fn_args.data_accessor, schema) x_eval, y_eval = _input_fn(fn_args.eval_files, fn_args.data_accessor, schema) steps_per_epoch = _TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE estimator = MLPClassifier( hidden_layer_sizes=[8, 8, 8], activation='relu', solver='adam', batch_size=_TRAIN_BATCH_SIZE, learning_rate_init=0.0005, max_iter=int(fn_args.train_steps / steps_per_epoch), verbose=True) # Create a pipeline that standardizes the input data before passing it to an # estimator. Once the scaler is fit, it will use the same mean and stdev to # transform inputs at both training and serving time. model = Pipeline([ ('scaler', StandardScaler()), ('estimator', estimator), ]) model.feature_keys = _FEATURE_KEYS model.label_key = _LABEL_KEY model.fit(x_train, y_train) absl.logging.info(model) score = model.score(x_eval, y_eval) absl.logging.info('Accuracy: %f', score) # Export the model as a pickle named model.pkl. AI Platform Prediction expects # sklearn model artifacts to follow this naming convention. os.makedirs(fn_args.serving_model_dir) model_path = os.path.join(fn_args.serving_model_dir, 'model.pkl') with fileio.open(model_path, 'wb+') as f: pickle.dump(model, f)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,122
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx import v1 as tfx The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, trainer_module_file: str, evaluator_module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str], ) -> tfx.dsl.Pipeline` to solve the following problem: Implements the Penguin pipeline with TFX. Here is the function: def _create_pipeline( pipeline_name: str, pipeline_root: str, data_root: str, trainer_module_file: str, evaluator_module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str], ) -> tfx.dsl.Pipeline: """Implements the Penguin pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = tfx.components.CsvExampleGen( input_base=os.path.join(data_root, 'labelled')) # Computes statistics over data for visualization and example validation. statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # TODO(humichael): Handle applying transformation component in Milestone 3. # Uses user-provided Python function that trains a model. # Num_steps is not provided during evaluation because the scikit-learn model # loads and evaluates the entire test set at once. trainer = tfx.components.Trainer( module_file=trainer_module_file, examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], train_args=tfx.proto.TrainArgs(num_steps=2000), eval_args=tfx.proto.EvalArgs()) # Get the latest blessed model for model validation. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='species')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='Accuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.6}), change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) evaluator = tfx.components.Evaluator( module_file=evaluator_module_file, examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, trainer, model_resolver, evaluator, pusher, ], enable_cache=True, metadata_connection_config=tfx.orchestration.metadata .sqlite_metadata_connection_config(metadata_path), beam_pipeline_args=beam_pipeline_args, )
Implements the Penguin pipeline with TFX.
166,123
import copy import os import pickle from typing import Dict, Iterable, List import apache_beam as beam import tensorflow as tf import tensorflow_model_analysis as tfma from tfx_bsl.tfxio import tensor_adapter def _custom_model_loader_fn(model_path: str): """Returns a function that loads a scikit-learn model.""" return lambda: pickle.load(tf.io.gfile.GFile(model_path, 'rb')) The provided code snippet includes necessary dependencies for implementing the `custom_eval_shared_model` function. Write a Python function `def custom_eval_shared_model( eval_saved_model_path, model_name, eval_config, **kwargs) -> tfma.EvalSharedModel` to solve the following problem: Returns a single custom EvalSharedModel. Here is the function: def custom_eval_shared_model( eval_saved_model_path, model_name, eval_config, **kwargs) -> tfma.EvalSharedModel: """Returns a single custom EvalSharedModel.""" model_path = os.path.join(eval_saved_model_path, 'model.pkl') return tfma.default_eval_shared_model( eval_saved_model_path=model_path, model_name=model_name, eval_config=eval_config, custom_model_loader=tfma.ModelLoader( construct_fn=_custom_model_loader_fn(model_path)), add_metrics_callbacks=kwargs.get('add_metrics_callbacks'))
Returns a single custom EvalSharedModel.
166,124
import copy import os import pickle from typing import Dict, Iterable, List import apache_beam as beam import tensorflow as tf import tensorflow_model_analysis as tfma from tfx_bsl.tfxio import tensor_adapter def _make_sklearn_predict_extractor( eval_shared_model: tfma.EvalSharedModel,) -> tfma.extractors.Extractor: """Creates an extractor for performing predictions using a scikit-learn model. The extractor's PTransform loads and runs the serving pickle against every extract yielding a copy of the incoming extracts with an additional extract added for the predictions keyed by tfma.PREDICTIONS_KEY. The model inputs are searched for under tfma.FEATURES_KEY. Args: eval_shared_model: Shared model (single-model evaluation). Returns: Extractor for extracting predictions. """ eval_shared_models = tfma.utils.verify_and_update_eval_shared_models( eval_shared_model) return tfma.extractors.Extractor( stage_name=_PREDICT_EXTRACTOR_STAGE_NAME, ptransform=_ExtractPredictions( # pylint: disable=no-value-for-parameter eval_shared_models={m.model_name: m for m in eval_shared_models})) The provided code snippet includes necessary dependencies for implementing the `custom_extractors` function. Write a Python function `def custom_extractors( eval_shared_model: tfma.MaybeMultipleEvalSharedModels, eval_config: tfma.EvalConfig, tensor_adapter_config: tensor_adapter.TensorAdapterConfig, ) -> List[tfma.extractors.Extractor]` to solve the following problem: Returns default extractors plus a custom prediction extractor. Here is the function: def custom_extractors( eval_shared_model: tfma.MaybeMultipleEvalSharedModels, eval_config: tfma.EvalConfig, tensor_adapter_config: tensor_adapter.TensorAdapterConfig, ) -> List[tfma.extractors.Extractor]: """Returns default extractors plus a custom prediction extractor.""" predict_extractor = _make_sklearn_predict_extractor(eval_shared_model) return tfma.default_extractors( eval_shared_model=eval_shared_model, eval_config=eval_config, tensor_adapter_config=tensor_adapter_config, custom_predict_extractor=predict_extractor)
Returns default extractors plus a custom prediction extractor.
166,125
import datetime import os from typing import List import tensorflow_model_analysis as tfma from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.components.trainer.executor import GenericExecutor from tfx.dsl.components.base import executor_spec from tfx.dsl.components.common import resolver from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing class GenericExecutor(base_executor.BaseExecutor): """Local generic trainer executor for the TFX Trainer component. The Trainer executor supplements TensorFlow training with a component to enable warm-start training of any user-specified TF model. The Trainer is a library built on top of TensorFlow that is expected to be integrated into a custom user-specified binary. To include Trainer in a TFX pipeline, configure your pipeline similar to https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104. For more details on the Trainer component itself, please refer to https://tensorflow.org/tfx/guide/trainer. For a tutorial on Tensorflow, please refer to https://www.tensorflow.org/tutorials. How to create a trainer callback function to be used by this Trainer executor: A model training can be executed by TFX by first creating a run_fn callback method that defines, trains an TF Model and saves it to the provided location, This becomes the basis of the Executor for GenericTrainer. This Executor will then execute the run_fn with correct parameters by resolving the input artifacts, output artifacts and execution properties. """ # Name of subdirectory which contains checkpoints from prior runs _CHECKPOINT_FILE_NAME = 'checkpoint' def _GetFnArgs(self, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> fn_args_utils.FnArgs: if input_dict.get(standard_component_specs.HYPERPARAMETERS_KEY): hyperparameters_file = io_utils.get_only_uri_in_dir( artifact_utils.get_single_uri( input_dict[standard_component_specs.HYPERPARAMETERS_KEY])) hyperparameters_config = json.loads( file_io.read_file_to_string(hyperparameters_file)) else: hyperparameters_config = None output_path = artifact_utils.get_single_uri( output_dict[standard_component_specs.MODEL_KEY]) serving_model_dir = path_utils.serving_model_dir(output_path) eval_model_dir = path_utils.eval_model_dir(output_path) model_run_dir = artifact_utils.get_single_uri( output_dict[standard_component_specs.MODEL_RUN_KEY]) # TODO(b/126242806) Use PipelineInputs when it is available in third_party. result = fn_args_utils.get_common_fn_args(input_dict, exec_properties) if result.custom_config and not isinstance(result.custom_config, dict): raise ValueError('custom_config in execution properties needs to be a ' 'dict. Got %s instead.' % type(result.custom_config)) result.transform_output = result.transform_graph_path result.serving_model_dir = serving_model_dir result.eval_model_dir = eval_model_dir result.model_run_dir = model_run_dir result.schema_file = result.schema_path result.hyperparameters = hyperparameters_config return result def Do(self, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None: """Uses a user-supplied run_fn to train a TensorFlow model locally. The Trainer Executor invokes a run_fn callback function provided by the user via the module_file parameter. In this function, user defines the model and trains it, then saves the model and training related files (e.g, Tensorboard logs) to the provided locations. Args: input_dict: Input dict from input key to a list of ML-Metadata Artifacts. - examples: Examples used for training, must include 'train' and 'eval' if custom splits is not specified in train_args and eval_args. - transform_graph: Optional input transform graph. - transform_output: Optional input transform graph, deprecated. - schema: Schema of the data. output_dict: Output dict from output key to a list of Artifacts. - model: Exported model. - model_run: Model training related outputs (e.g., Tensorboard logs) exec_properties: A dict of execution properties. - train_args: JSON string of trainer_pb2.TrainArgs instance, providing args for training. - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing args for eval. - module_file: Python module file containing UDF model definition. Exactly one of `module_file`, `module_path` and `run_fn` should be passed. - module_path: Python module path containing UDF model definition. Exactly one of `module_file`, `module_path` and `run_fn` should be passed. - run_fn: Python module path to the run function. Exactly one of `module_file`, `module_path` and `run_fn` should be passed. - warm_starting: Whether or not we need to do warm starting. - warm_start_from: Optional. If warm_starting is True, this is the directory to find previous model to warm start on. - custom_config: Optional. JSON-serialized dict of additional parameters to pass to trainer function. Returns: None Raises: ValueError: When not exactly one of `module_file`, `module_path` and `run_fn` are present in 'exec_properties'. RuntimeError: If run_fn failed to generate model in desired location. """ self._log_startup(input_dict, output_dict, exec_properties) fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties) run_fn = udf_utils.get_fn(exec_properties, 'run_fn') # Train the model absl.logging.info('Training model.') run_fn(fn_args) # Note: If trained with multi-node distribution workers, it is the user # module's responsibility to export the model only once. if not fileio.exists(fn_args.serving_model_dir): raise RuntimeError('run_fn failed to generate model.') absl.logging.info( 'Training complete. Model written to %s. ModelRun written to %s', fn_args.serving_model_dir, fn_args.model_run_dir) class Model(_TfxArtifact): """Artifact that contains the actual persisted model. Training components stores the trained model like a saved model in this artifact. A `Model` artifact contains serialization of the trained model in one or more formats, each suitable for different usage (e.g. serving, evaluation), and serving environments. * File structure: - `{uri}/` - `Format-Serving/`: Model exported for serving. - `saved_model.pb` - Other actual model files. - `Format-TFMA/`: Model exported for evaluation. - `saved_model.pb` - Other actual model files. * Commonly used custom properties of the Model artifact: """ TYPE_NAME = 'Model' TYPE_ANNOTATION = SystemModel class ModelBlessing(_TfxArtifact): """Artifact that contains the evaluation of a trained model. This artifact is usually used with Conditional when determining whether to push this model on service or not. ```python # Run pusher if evaluator has blessed the model. with tfx.dsl.Cond(evaluator.outputs['blessing'].future() [0].custom_property('blessed') == 1): pusher = Pusher(...) ``` * File structure: - `{uri}/` - `BLESSED`: if the evaluator has blessed the model. - `NOT_BLESSED`: if the evaluator has not blessed the model. - See tfx/components/evaluator/executor.py for how to write ModelBlessing. * Commonly used custom properties of the ModelBlessing artifact: - `blessed`: int value that represents whether the evaluator has blessed its model or not. """ TYPE_NAME = 'ModelBlessing' The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem: Implements the chicago taxi pipeline with TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline: """Implements the chicago taxi pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = CsvExampleGen(input_base=data_root) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen( examples=example_gen.outputs['examples']) # Step 3 # Generates schema based on statistics files. infer_schema = SchemaGen( # Step 3 statistics=statistics_gen.outputs['statistics'], # Step 3 infer_feature_shape=False) # Step 3 # Performs anomaly detection based on statistics and data schema. validate_stats = ExampleValidator( # Step 3 statistics=statistics_gen.outputs['statistics'], # Step 3 schema=infer_schema.outputs['schema']) # Step 3 # Performs transformations and feature engineering in training and serving. transform = Transform( # Step 4 examples=example_gen.outputs['examples'], # Step 4 schema=infer_schema.outputs['schema'], # Step 4 module_file=module_file) # Step 4 # Uses user-provided Python function that implements a model. trainer = Trainer( # Step 5 module_file=module_file, # Step 5 custom_executor_spec=executor_spec.ExecutorClassSpec( GenericExecutor), # Step 5 examples=transform.outputs['transformed_examples'], # Step 5 transform_graph=transform.outputs['transform_graph'], # Step 5 schema=infer_schema.outputs['schema'], # Step 5 train_args=trainer_pb2.TrainArgs(num_steps=10000), # Step 5 eval_args=trainer_pb2.EvalArgs(num_steps=5000)) # Step 5 # Get the latest blessed model for model validation. model_resolver = resolver.Resolver( # Step 6 strategy_class=latest_blessed_model_resolver .LatestBlessedModelResolver, # Step 6 model=Channel(type=Model), # Step 6 model_blessing=Channel(type=ModelBlessing)).with_id( # Step 6 'latest_blessed_model_resolver') # Step 6 # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( # Step 6 model_specs=[ # Step 6 # This assumes a serving model with signature 'serving_default'. If # using estimator based EvalSavedModel, add signature_name: 'eval' and # remove the label_key. tfma.ModelSpec( # Step 6 signature_name='serving_default', # Step 6 label_key='tips', # Step 6 preprocessing_function_names=['transform_features'], # Step 6 ) # Step 6 ], # Step 6 metrics_specs=[ # Step 6 tfma.MetricsSpec( # Step 6 # The metrics added here are in addition to those saved with the # model (assuming either a keras model or EvalSavedModel is used). # Any metrics added into the saved model (for example using # model.compile(..., metrics=[...]), etc) will be computed # automatically. # To add validation thresholds for metrics saved with the model, # add them keyed by metric name to the thresholds map. metrics=[ # Step 6 tfma.MetricConfig(class_name='ExampleCount'), # Step 6 tfma.MetricConfig( class_name='BinaryAccuracy', # Step 6 threshold=tfma.MetricThreshold( # Step 6 value_threshold=tfma.GenericValueThreshold( # Step 6 lower_bound={'value': 0.5}), # Step 6 # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma .GenericChangeThreshold( # Step 6 direction=tfma.MetricDirection .HIGHER_IS_BETTER, # Step 6 absolute={'value': -1e-10}))) # Step 6 ] # Step 6 ) # Step 6 ], # Step 6 slicing_specs=[ # Step 6 # An empty slice spec means the overall slice, i.e. the whole dataset. tfma.SlicingSpec(), # Step 6 # Data can be sliced along a feature column. In this case, data is # sliced along feature column trip_start_hour. tfma.SlicingSpec( # Step 6 feature_keys=['trip_start_hour']) # Step 6 ]) # Step 6 model_analyzer = Evaluator( # Step 6 examples=example_gen.outputs['examples'], # Step 6 model=trainer.outputs['model'], # Step 6 baseline_model=model_resolver.outputs['model'], # Step 6 eval_config=eval_config) # Step 6 # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( # Step 7 model=trainer.outputs['model'], # Step 7 model_blessing=model_analyzer.outputs['blessing'], # Step 7 push_destination=pusher_pb2.PushDestination( # Step 7 filesystem=pusher_pb2.PushDestination.Filesystem( # Step 7 base_directory=serving_model_dir))) # Step 7 return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, # Step 3 infer_schema, # Step 3 validate_stats, # Step 3 transform, # Step 4 trainer, # Step 5 model_resolver, # Step 6 model_analyzer, # Step 6 pusher, # Step 7 ], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), beam_pipeline_args=beam_pipeline_args)
Implements the chicago taxi pipeline with TFX.
166,126
from typing import List import absl from keras.callbacks import LambdaCallback import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.executor import TrainerFnArgs from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options _NUMERICAL_FEATURES = ['trip_miles', 'fare', 'trip_seconds'] _BUCKET_FEATURES = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] _FEATURE_BUCKET_COUNT = 10 _CATEGORICAL_NUMERICAL_FEATURES = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] _CATEGORICAL_STRING_FEATURES = [ 'payment_type', 'company', ] _LABEL_KEY = 'tips' _FARE_KEY = 'fare' _FEATURE_BUCKET_COUNT = 10 _LABEL_KEY = 'tips' def t_name(key): """Rename the feature keys so that they don't clash with the raw keys when. running the Evaluator component. Args: key: The original feature key Returns: key with '_xf' appended """ return key + '_xf' def _make_one_hot(x, key): """Make a one-hot tensor to encode categorical features. Args: x: A dense tensor key: A string key for the feature in the input Returns: A dense one-hot tensor as a float list """ integerized = tft.compute_and_apply_vocabulary( x, top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE, vocab_filename=key, name=key) depth = (tft.experimental.get_vocabulary_size_by_name(key) + _OOV_SIZE) one_hot_encoded = tf.one_hot( integerized, depth=tf.cast(depth, tf.int32), on_value=1.0, off_value=0.0) return tf.reshape(one_hot_encoded, [-1, depth]) def _fill_in_missing(x): """Replace missing values in a SparseTensor. Fills in missing values of `x` with '' or 0, and converts to a dense tensor. Args: x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in. """ if not isinstance(x, tf.sparse.SparseTensor): return x default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse.to_dense( tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _NUMERICAL_FEATURES: # If sparse make it dense, setting nan's to 0 or '', and apply zscore. outputs[t_name(key)] = tft.scale_to_z_score( _fill_in_missing(inputs[key]), name=key) for key in _BUCKET_FEATURES: outputs[t_name(key)] = tf.cast( tft.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT, name=key), dtype=tf.float32) for key in _CATEGORICAL_STRING_FEATURES: outputs[t_name(key)] = _make_one_hot(_fill_in_missing(inputs[key]), key) for key in _CATEGORICAL_NUMERICAL_FEATURES: outputs[t_name(key)] = _make_one_hot(_fill_in_missing(inputs[key]), key) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_LABEL_KEY] = tf.where( tf.math.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,127
from typing import List import absl from keras.callbacks import LambdaCallback import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.executor import TrainerFnArgs from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options def export_serving_model(tf_transform_output, model, output_dir): """Exports a keras model for serving. Args: tf_transform_output: Wrapper around output of tf.Transform. model: A keras model to export for serving. output_dir: A directory where the model will be exported to. """ # The layer has to be saved to the model for keras tracking purpases. model.tft_layer = tf_transform_output.transform_features_layer() signatures = { 'serving_default': _get_tf_examples_serving_signature(model, tf_transform_output), 'transform_features': _get_transform_features_signature(model, tf_transform_output), } model.save(output_dir, save_format='tf', signatures=signatures) def _input_fn(file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), tf_transform_output.transformed_metadata.schema).repeat() def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model: """Creates a DNN Keras model for classifying taxi data. Args: hidden_units: [int], the layer sizes of the DNN (input layer first). Returns: A keras Model. """ real_valued_columns = [ tf.feature_column.numeric_column(key, shape=()) for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS) ] categorical_columns = [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0) for key in _transformed_names(_VOCAB_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0) for key in _transformed_names(_BUCKET_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip( _transformed_names(_CATEGORICAL_FEATURE_KEYS), _MAX_CATEGORICAL_FEATURE_VALUES) ] indicator_column = [ tf.feature_column.indicator_column(categorical_column) for categorical_column in categorical_columns ] model = _wide_and_deep_classifier( # TODO(b/139668410) replace with premade wide_and_deep keras model wide_columns=indicator_column, deep_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25]) return model TrainerFnArgs = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name deprecated_name='tfx.components.trainer.executor.TrainerFnArgs', name='tfx.components.trainer.fn_args_utils.FnArgs', func_or_class=fn_args_utils.FnArgs) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: TrainerFnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: TrainerFnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ # Number of nodes in the first layer of the DNN first_dnn_layer_size = 100 num_dnn_layers = 4 dnn_decay_factor = 0.7 tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, 40) eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, 40) # If no GPUs are found, CPU is used. mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = _build_keras_model( # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(first_dnn_layer_size * dnn_decay_factor**i)) for i in range(num_dnn_layers) ]) # Creating a custom callback using LambdaCallback to add batch data def batch_output(batch, logs): tf.summary.scalar('batch_loss', data=logs['loss'], step=batch) tf.summary.scalar( 'batch_accuracy', data=logs['binary_accuracy'], step=batch) return batch batch_log_callback = LambdaCallback(on_batch_end=batch_output) # Write logs to path tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='batch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback, batch_log_callback]) # Export the model. export_serving_model(tf_transform_output, model, fn_args.serving_model_dir)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,128
import re from IPython.display import display_html import matplotlib.pyplot as plt import networkx as nx import pandas as pd from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `_is_output_event` function. Write a Python function `def _is_output_event(event)` to solve the following problem: Checks if event is an Output event. Here is the function: def _is_output_event(event): """Checks if event is an Output event.""" return event.type == metadata_store_pb2.Event.OUTPUT
Checks if event is an Output event.
166,129
import re from IPython.display import display_html import matplotlib.pyplot as plt import networkx as nx import pandas as pd from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `_is_input_event` function. Write a Python function `def _is_input_event(event)` to solve the following problem: Checks if event is an Input event. Here is the function: def _is_input_event(event): """Checks if event is an Input event.""" return event.type in [ metadata_store_pb2.Event.DECLARED_INPUT, metadata_store_pb2.Event.INPUT ]
Checks if event is an Input event.
166,130
import re from IPython.display import display_html import matplotlib.pyplot as plt import networkx as nx import pandas as pd from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `_get_value_str` function. Write a Python function `def _get_value_str(p)` to solve the following problem: Returns a string representation of a `metadata_store_pb2.Value` object. Here is the function: def _get_value_str(p): """Returns a string representation of a `metadata_store_pb2.Value` object.""" if p.int_value: return str(p.int_value) if p.string_value: return p.string_value if p.double_value: return str(p.double_value) return ''
Returns a string representation of a `metadata_store_pb2.Value` object.
166,131
import os from typing import Dict, List from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ModelValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.dsl.components.base import executor_spec from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component from tfx.extensions.google_cloud_big_query.pusher import executor as bigquery_pusher_executor from tfx.orchestration import pipeline from tfx.orchestration.kubeflow import kubeflow_dag_runner from tfx.proto import evaluator_pb2 from tfx.proto import trainer_pb2 The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline( pipeline_name: str, pipeline_root: str, query: str, module_file: str, beam_pipeline_args: List[str], ai_platform_training_args: Dict[str, str], bigquery_serving_args: Dict[str, str]) -> pipeline.Pipeline` to solve the following problem: Implements the chicago taxi pipeline with TFX and Kubeflow Pipelines. Here is the function: def _create_pipeline( pipeline_name: str, pipeline_root: str, query: str, module_file: str, beam_pipeline_args: List[str], ai_platform_training_args: Dict[str, str], bigquery_serving_args: Dict[str, str]) -> pipeline.Pipeline: """Implements the chicago taxi pipeline with TFX and Kubeflow Pipelines.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = big_query_example_gen_component.BigQueryExampleGen(query=query) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that implements a model. # to train a model on Google Cloud AI Platform. trainer = Trainer( custom_executor_spec=executor_spec.ExecutorClassSpec( ai_platform_trainer_executor.Executor), module_file=module_file, transformed_examples=transform.outputs['transformed_examples'], schema=schema_gen.outputs['schema'], transform_graph=transform.outputs['transform_graph'], train_args=trainer_pb2.TrainArgs(num_steps=10000), eval_args=trainer_pb2.EvalArgs(num_steps=5000), custom_config={'ai_platform_training_args': ai_platform_training_args}) # Uses TFMA to compute a evaluation statistics over features of a model. evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[ evaluator_pb2.SingleSlicingSpec( column_for_slicing=['trip_start_hour']) ])) # Performs quality validation of a candidate model (compared to a baseline). model_validator = ModelValidator( examples=example_gen.outputs['examples'], model=trainer.outputs['model']) # Checks whether the model passed the validation steps and pushes the model # to Google Cloud BigQuery ML if check passed. pusher = Pusher( custom_executor_spec=executor_spec.ExecutorClassSpec( bigquery_pusher_executor.Executor), model=trainer.outputs['model'], model_blessing=model_validator.outputs['blessing'], custom_config={'bigquery_serving_args': bigquery_serving_args}) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, evaluator, model_validator, pusher ], beam_pipeline_args=beam_pipeline_args, )
Implements the chicago taxi pipeline with TFX and Kubeflow Pipelines.
166,132
from typing import List import tensorflow as tf from tensorflow import estimator as tf_estimator import tensorflow_model_analysis as tfma import tensorflow_transform as tft from tensorflow_transform.tf_metadata import schema_utils from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options _CATEGORICAL_FEATURE_KEYS = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] _DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds'] _FEATURE_BUCKET_COUNT = 10 _BUCKET_FEATURE_KEYS = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] _VOCAB_SIZE = 1000 _OOV_SIZE = 10 _VOCAB_FEATURE_KEYS = [ 'payment_type', 'company', ] _LABEL_KEY = 'tips' _FARE_KEY = 'fare' def _transformed_name(key): return key + '_xf' def _fill_in_missing(x): """Replace missing values in a SparseTensors. If x is a SparseTensors, fills in missing values of `x` with '' or 0, and converts to a dense tensor. Otherwise it returns x as is. Args: x: A `SparseTensor` of rank 2 or a tensor that is not an instance of `SparseTensor`. If input is a `SparseTensor` its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in, or x as is if x is not an instance of `SparseTensor` """ if not isinstance(x, tf.SparseTensor): return x default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse.to_dense( tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _DENSE_FLOAT_FEATURE_KEYS: # If sparse make it dense, setting nan's to 0 or '', and apply zscore. outputs[_transformed_name(key)] = tft.scale_to_z_score( _fill_in_missing(inputs[key])) for key in _VOCAB_FEATURE_KEYS: # Build a vocabulary for this feature. outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary( _fill_in_missing(inputs[key]), top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE) for key in _BUCKET_FEATURE_KEYS: outputs[_transformed_name(key)] = tft.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT) for key in _CATEGORICAL_FEATURE_KEYS: outputs[_transformed_name(key)] = _fill_in_missing(inputs[key]) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where( tf.math.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,133
from typing import List import tensorflow as tf from tensorflow import estimator as tf_estimator import tensorflow_model_analysis as tfma import tensorflow_transform as tft from tensorflow_transform.tf_metadata import schema_utils from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options def _build_estimator(config, hidden_units=None, warm_start_from=None): """Build an estimator for predicting the tipping behavior of taxi riders. Args: config: tf.estimator.RunConfig defining the runtime environment for the estimator (including model_dir). hidden_units: [int], the layer sizes of the DNN (input layer first) warm_start_from: Optional directory to warm start from. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. """ real_valued_columns = [ tf.feature_column.numeric_column(key, shape=()) for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS) ] categorical_columns = [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0) for key in _transformed_names(_VOCAB_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0) for key in _transformed_names(_BUCKET_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip( _transformed_names(_CATEGORICAL_FEATURE_KEYS), _MAX_CATEGORICAL_FEATURE_VALUES) ] return tf_estimator.DNNLinearCombinedClassifier( config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25], warm_start_from=warm_start_from) def _flat_input_serving_receiver_fn(tf_transform_output, schema): """Build the serving function for flat list of Dense tensors as input. Args: tf_transform_output: A TFTransformOutput. schema: the schema of the input data. Returns: Tensorflow graph which parses examples, applying tf-transform to them. """ raw_feature_spec = _get_raw_feature_spec(schema) raw_feature_spec.pop(_LABEL_KEY) raw_input_fn = tf_estimator.export.build_parsing_serving_input_receiver_fn( raw_feature_spec, default_batch_size=None) serving_input_receiver = raw_input_fn() transformed_features = tf_transform_output.transform_raw_features( serving_input_receiver.features) # We construct a receiver function that receives flat list of Dense tensors as # features. This is as per BigQuery ML serving requirements. return tf_estimator.export.ServingInputReceiver( transformed_features, serving_input_receiver.features) def _eval_input_receiver_fn(tf_transform_output, schema): """Build everything needed for the tf-model-analysis to run the model. Args: tf_transform_output: A TFTransformOutput. schema: the schema of the input data. Returns: EvalInputReceiver function, which contains: - Tensorflow graph which parses raw untransformed features, applies the tf-transform preprocessing operators. - Set of raw, untransformed features. - Label against which predictions will be compared. """ # Notice that the inputs are raw features, not transformed features here. raw_feature_spec = _get_raw_feature_spec(schema) serialized_tf_example = tf.compat.v1.placeholder( dtype=tf.string, shape=[None], name='input_example_tensor') # Add a parse_example operator to the tensorflow graph, which will parse # raw, untransformed, tf examples. features = tf.io.parse_example( serialized=serialized_tf_example, features=raw_feature_spec) # Now that we have our raw examples, process them through the tf-transform # function computed during the preprocessing step. transformed_features = tf_transform_output.transform_raw_features(features) # The key name MUST be 'examples'. receiver_tensors = {'examples': serialized_tf_example} # NOTE: Model is driven by transformed features (since training works on the # materialized output of TFT, but slicing will happen on raw features. features.update(transformed_features) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=receiver_tensors, labels=transformed_features[_transformed_name(_LABEL_KEY)]) def _input_fn(file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)), tf_transform_output.transformed_metadata.schema) The provided code snippet includes necessary dependencies for implementing the `trainer_fn` function. Write a Python function `def trainer_fn(trainer_fn_args, schema)` to solve the following problem: Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. Here is the function: def trainer_fn(trainer_fn_args, schema): """Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. """ # Number of nodes in the first layer of the DNN first_dnn_layer_size = 100 num_dnn_layers = 4 dnn_decay_factor = 0.7 train_batch_size = 40 eval_batch_size = 40 tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output) train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda trainer_fn_args.train_files, trainer_fn_args.data_accessor, tf_transform_output, batch_size=train_batch_size) eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda trainer_fn_args.eval_files, trainer_fn_args.data_accessor, tf_transform_output, batch_size=eval_batch_size) train_spec = tf_estimator.TrainSpec( # pylint: disable=g-long-lambda train_input_fn, max_steps=trainer_fn_args.train_steps) serving_receiver_fn = lambda: _flat_input_serving_receiver_fn( # pylint: disable=g-long-lambda tf_transform_output, schema) exporter = tf_estimator.FinalExporter('chicago-taxi', serving_receiver_fn) eval_spec = tf_estimator.EvalSpec( eval_input_fn, steps=trainer_fn_args.eval_steps, exporters=[exporter], name='chicago-taxi-eval') run_config = tf_estimator.RunConfig( save_checkpoints_steps=999, keep_checkpoint_max=1) run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir) estimator = _build_estimator( # Construct layers sizes with exponential decay hidden_units=[ max(2, int(first_dnn_layer_size * dnn_decay_factor**i)) for i in range(num_dnn_layers) ], config=run_config, warm_start_from=trainer_fn_args.base_model) # Create an input receiver for TFMA processing receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda tf_transform_output, schema) return { 'estimator': estimator, 'train_spec': train_spec, 'eval_spec': eval_spec, 'eval_input_receiver_fn': receiver_fn }
Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval.
166,134
import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import FnArgs from tfx.examples.mnist import mnist_utils_native_keras_base as base The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ return base.preprocessing_fn(inputs)
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,135
import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import FnArgs from tfx.examples.mnist import mnist_utils_native_keras_base as base def _get_serve_tf_examples_fn(model, tf_transform_output): """Returns a function that parses a serialized tf.Example.""" model.tft_layer = tf_transform_output.transform_features_layer() def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" feature_spec = tf_transform_output.raw_feature_spec() feature_spec.pop(base.LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return model(transformed_features) return serve_tf_examples_fn class FnArgs: """Args to pass to user defined training/tuning function(s). Attributes: working_dir: Working dir. train_files: A list of patterns for train files. eval_files: A list of patterns for eval files. train_steps: Number of train steps. eval_steps: Number of eval steps. schema_path: A single uri for schema file. Will be None if not specified. schema_file: Deprecated, use `schema_path` instead. transform_graph_path: An optional single uri for transform graph produced by TFT. Will be None if not specified. transform_output: Deprecated, use `transform_graph_path` instead. data_accessor: Contains factories that can create tf.data.Datasets or other means to access the train/eval data. They provide a uniform way of accessing data, regardless of how the data is stored on disk. serving_model_dir: A single uri for the output directory of the serving model. eval_model_dir: A single uri for the output directory of the eval model. Note that this is estimator only, Keras doesn't require it for TFMA. model_run_dir: A single uri for the output directory of model training related files. base_model: An optional base model path that will be used for this training. hyperparameters: An optional keras_tuner.HyperParameters config. custom_config: An optional dictionary passed to the component. """ working_dir = attr.ib(type=str, default=None) train_files = attr.ib(type=List[str], default=None) eval_files = attr.ib(type=List[str], default=None) train_steps = attr.ib(type=int, default=None) eval_steps = attr.ib(type=int, default=None) schema_path = attr.ib(type=str, default=None) schema_file = attr.ib(type=str, default=None) transform_graph_path = attr.ib(type=str, default=None) transform_output = attr.ib(type=str, default=None) data_accessor = attr.ib(type=DataAccessor, default=None) serving_model_dir = attr.ib(type=str, default=None) eval_model_dir = attr.ib(type=str, default=None) model_run_dir = attr.ib(type=str, default=None) base_model = attr.ib(type=str, default=None) hyperparameters = attr.ib(type=Dict[str, Any], default=None) custom_config = attr.ib(type=Dict[str, Any], default=None) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, 40) eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, 40) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = base.build_keras_model() # Write logs to path tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='epoch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) signatures = { 'serving_default': _get_serve_tf_examples_fn( model, tf_transform_output).get_concrete_function( tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')) } model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,136
import os import tensorflow as tf import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.trainer.rewriting import converters from tfx.components.trainer.rewriting import rewriter from tfx.components.trainer.rewriting import rewriter_factory from tfx.examples.mnist import mnist_utils_native_keras_base as base The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ return base.preprocessing_fn(inputs)
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,137
import os import tensorflow as tf import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.trainer.rewriting import converters from tfx.components.trainer.rewriting import rewriter from tfx.components.trainer.rewriting import rewriter_factory from tfx.examples.mnist import mnist_utils_native_keras_base as base def _get_serve_tf_examples_fn(model, tf_transform_output): """Returns a function that feeds the input tensor into the model.""" model.tft_layer = tf_transform_output.transform_features_layer() def serve_tf_examples_fn(image_tensor): """Returns the output to be used in the serving signature.""" transformed_features = model.tft_layer({base.IMAGE_KEY: image_tensor}) return model(transformed_features) return serve_tf_examples_fn The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, 40) eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, 40) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = base.build_keras_model() # Write logs to path tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='epoch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) signatures = { 'serving_default': _get_serve_tf_examples_fn( model, tf_transform_output).get_concrete_function( tf.TensorSpec( shape=[None, 784], dtype=tf.float32, name='image_floats')) } temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp') model.save(temp_saving_model_dir, save_format='tf', signatures=signatures) tfrw = rewriter_factory.create_rewriter( rewriter_factory.TFLITE_REWRITER, name='tflite_rewriter') converters.rewrite_saved_model(temp_saving_model_dir, fn_args.serving_model_dir, tfrw, rewriter.ModelType.TFLITE_MODEL) tfx.dsl.io.fileio.rmtree(temp_saving_model_dir)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,138
from typing import List import absl import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options IMAGE_KEY = 'image_floats' LABEL_KEY = 'image_class' def transformed_name(key): return key + '_xf' The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} # The input float values for the image encoding are in the range [-0.5, 0.5]. # So scale_by_min_max is a identity operation, since the range is preserved. outputs[transformed_name(IMAGE_KEY)] = ( tft.scale_by_min_max(inputs[IMAGE_KEY], -0.5, 0.5)) # TODO(b/157064428): Support label transformation for Keras. # Do not apply label transformation as it will result in wrong evaluation. outputs[transformed_name(LABEL_KEY)] = inputs[LABEL_KEY] return outputs
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,139
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ImportExampleGen from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, module_file_lite: str, serving_model_dir: str, serving_model_dir_lite: str, metadata_path: str, beam_pipeline_args: List[str], accuracy_threshold: float = 0.8) -> pipeline.Pipeline` to solve the following problem: Implements the handwritten digit classification example using TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, module_file_lite: str, serving_model_dir: str, serving_model_dir_lite: str, metadata_path: str, beam_pipeline_args: List[str], accuracy_threshold: float = 0.8) -> pipeline.Pipeline: """Implements the handwritten digit classification example using TFX.""" # Brings data into the pipeline. example_gen = ImportExampleGen(input_base=data_root) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) def _create_trainer(module_file, component_id): return Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=trainer_pb2.TrainArgs(num_steps=5000), eval_args=trainer_pb2.EvalArgs(num_steps=100)).with_id(component_id) # Uses user-provided Python function that trains a Keras model. trainer = _create_trainer(module_file, 'Trainer.mnist') # Trains the same model as the one above, but converts it into a TFLite one. trainer_lite = _create_trainer(module_file_lite, 'Trainer.mnist_lite') # TODO(b/150949276): Add resolver back once it supports two trainers. # Uses TFMA to compute evaluation statistics over features of a model and # performs quality validation of a candidate model. eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='image_class')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': accuracy_threshold}))) ]) ]) eval_config_lite = tfma.EvalConfig() eval_config_lite.CopyFrom(eval_config) # Informs the evaluator that the model is a TFLite model. eval_config_lite.model_specs[0].model_type = 'tf_lite' # Uses TFMA to compute the evaluation statistics over features of a model. evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], eval_config=eval_config).with_id('Evaluator.mnist') # Uses TFMA to compute the evaluation statistics over features of a TFLite # model. evaluator_lite = Evaluator( examples=example_gen.outputs['examples'], model=trainer_lite.outputs['model'], eval_config=eval_config_lite).with_id('Evaluator.mnist_lite') # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir))).with_id('Pusher.mnist') # Checks whether the TFLite model passed the validation steps and pushes the # model to a file destination if check passed. pusher_lite = Pusher( model=trainer_lite.outputs['model'], model_blessing=evaluator_lite.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir_lite))).with_id( 'Pusher.mnist_lite') return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, trainer_lite, evaluator, evaluator_lite, pusher, pusher_lite, ], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), beam_pipeline_args=beam_pipeline_args)
Implements the handwritten digit classification example using TFX.
166,140
import itertools from typing import Dict, List, Optional, Union from struct2tensor import calculate from struct2tensor import calculate_options from struct2tensor import path from struct2tensor import prensor_util from struct2tensor.expression_impl import proto as proto_expr import tensorflow as tf from tfx_bsl.public import tfxio from tensorflow_serving.apis import input_pb2 _DEFAULT_VALUE_SUFFIX = '_dv' _TYPE_LIST_MAP = { tf.float32: 'float_list', tf.int64: 'int64_list', tf.string: 'bytes_list', } class Feature: """Parsing config for a feature in ELWCs.""" def __init__(self, name: str, dtype: tf.DType, default_value: Optional[Union[int, float, str]] = None, length: Optional[int] = None): """Initializer. Args: name: Name of the feature. dtype: Dtype of the feature. One of tf.string, tf.int64 or tf.float32. Note that if the actual ELWC does not contain the corresponding oneof (for example, if dtype is tf.string, but ELWC contains float_values), the feature will be parsed as an empty list (also see `default_value` below). default_value: Default value of the feature. If specified, must also specify `length`. For instances that do not have the specified length, it will be padded with the default value. length: The length of the feature. If specified, must also specify `default_value`. """ self.name = name self.dtype = dtype self.default_value = default_value self.length = length assert dtype in _TYPE_LIST_MAP, ( 'Feature %s must have a dtype of tf.string, tf.int64 or tf.float32' % name) assert ((default_value is None and length is None) or (default_value is not None and length is not None)), ( 'Feature %s: default_value and length must both be specified ' 'or not specified' % name) if default_value is not None: assert ((dtype == tf.string and isinstance(default_value, bytes)) or (dtype == tf.int64 and isinstance(default_value, int)) or (dtype == tf.float32 and isinstance(default_value, float))), ( 'Feature %s: type of default_value (%s) must match ' 'dtype' % (name, type(default_value))) The provided code snippet includes necessary dependencies for implementing the `parse_elwc_with_struct2tensor` function. Write a Python function `def parse_elwc_with_struct2tensor( records: tf.Tensor, context_features: List[Feature], example_features: List[Feature], size_feature_name: Optional[str] = None) -> Dict[str, tf.RaggedTensor]` to solve the following problem: Parses a batch of ELWC records into RaggedTensors using struct2tensor. Args: records: A dictionary with a single item. The value of this single item is the serialized ELWC input. context_features: List of context-level features. example_features: List of example-level features. size_feature_name: A string, the name of a feature for example list sizes. If None, which is default, this feature is not generated. Otherwise the feature is added to the feature dict. Returns: A dict that maps feature name to RaggedTensors. Here is the function: def parse_elwc_with_struct2tensor( records: tf.Tensor, context_features: List[Feature], example_features: List[Feature], size_feature_name: Optional[str] = None) -> Dict[str, tf.RaggedTensor]: """Parses a batch of ELWC records into RaggedTensors using struct2tensor. Args: records: A dictionary with a single item. The value of this single item is the serialized ELWC input. context_features: List of context-level features. example_features: List of example-level features. size_feature_name: A string, the name of a feature for example list sizes. If None, which is default, this feature is not generated. Otherwise the feature is added to the feature dict. Returns: A dict that maps feature name to RaggedTensors. """ def get_step_name(feature_name: str): """Gets the name of the step (a component in a prensor Path) for a feature. A prensor step cannot contain dots ("."), but a feature name can. Args: feature_name: name of the feature Returns: a valid step name. """ return feature_name.replace('.', '_dot_') def get_default_filled_step_name(feature_name: str): return get_step_name(feature_name) + _DEFAULT_VALUE_SUFFIX def get_context_feature_path(feature: Feature): list_name = _TYPE_LIST_MAP.get(feature.dtype) return path.Path(['context', 'features', 'feature[{}]'.format(feature.name), list_name, 'value']) def get_example_feature_path(feature: Feature): list_name = _TYPE_LIST_MAP.get(feature.dtype) return path.Path(['examples', 'features', 'feature[{}]'.format(feature.name), list_name, 'value']) def get_promote_and_project_maps(features: List[Feature], is_context: bool): promote_map = {} project_map = {} if is_context: get_feature_path = get_context_feature_path get_promote_destination = lambda leaf_name: path.Path([leaf_name]) else: get_feature_path = get_example_feature_path get_promote_destination = lambda leaf_name: path.Path( # pylint: disable=g-long-lambda ['examples', leaf_name]) for feature in features: promote_map[get_step_name(feature.name)] = get_feature_path(feature) leaf_name = (get_step_name(feature.name) if feature.default_value is None else get_default_filled_step_name(feature.name)) project_map[feature.name] = get_promote_destination(leaf_name) return promote_map, project_map def get_pad_2d_ragged_fn(feature: Feature): def pad_2d_ragged(rt): dense = rt.to_tensor(shape=[None, feature.length], default_value=feature.default_value) flattened = tf.reshape(dense, [-1]) return tf.RaggedTensor.from_uniform_row_length( flattened, feature.length, validate=False) return pad_2d_ragged context_promote_map, context_keys_to_promoted_paths = ( get_promote_and_project_maps(context_features, is_context=True)) examples_promote_map, examples_keys_to_promoted_paths = ( get_promote_and_project_maps(example_features, is_context=False)) # Build the struct2tensor query. s2t_expr = ( proto_expr.create_expression_from_proto( records, input_pb2.ExampleListWithContext.DESCRIPTOR) .promote_and_broadcast(context_promote_map, path.Path([])) .promote_and_broadcast(examples_promote_map, path.Path(['examples']))) # Pad features that have default_values specified. for features, parent_path in [(context_features, path.Path([])), (example_features, path.Path(['examples']))]: for feature in features: if feature.default_value is not None: s2t_expr = s2t_expr.map_ragged_tensors( parent_path=parent_path, source_fields=[get_step_name(feature.name)], operator=get_pad_2d_ragged_fn(feature), is_repeated=True, dtype=feature.dtype, new_field_name=get_default_filled_step_name(feature.name)) to_project = list(itertools.chain( context_keys_to_promoted_paths.values(), examples_keys_to_promoted_paths.values())) if size_feature_name is not None: s2t_expr = s2t_expr.create_size_field( path.Path(['examples']), get_step_name(size_feature_name)) to_project.append(path.Path([get_step_name(size_feature_name)])) projection = s2t_expr.project(to_project) options = calculate_options.get_options_with_minimal_checks() prensor_result = calculate.calculate_prensors( [projection], options)[0] # a map from path.Path to RaggedTensors. projected_with_paths = prensor_util.get_ragged_tensors( prensor_result, options) context_dict = { f: projected_with_paths[context_keys_to_promoted_paths[f]] for f in context_keys_to_promoted_paths } examples_dict = { f: projected_with_paths[examples_keys_to_promoted_paths[f]] for f in examples_keys_to_promoted_paths } result = {} result.update(context_dict) result.update(examples_dict) if size_feature_name is not None: result[size_feature_name] = projected_with_paths[ path.Path([get_step_name(size_feature_name)])] return result
Parses a batch of ELWC records into RaggedTensors using struct2tensor. Args: records: A dictionary with a single item. The value of this single item is the serialized ELWC input. context_features: List of context-level features. example_features: List of example-level features. size_feature_name: A string, the name of a feature for example list sizes. If None, which is default, this feature is not generated. Otherwise the feature is added to the feature dict. Returns: A dict that maps feature name to RaggedTensors.
166,141
import tensorflow as tf import tensorflow_ranking as tfr import tensorflow_transform as tft from tfx.examples.ranking import features from tfx.examples.ranking import struct2tensor_parsing_utils from tfx_bsl.public import tfxio The provided code snippet includes necessary dependencies for implementing the `make_decoder` function. Write a Python function `def make_decoder()` to solve the following problem: Creates a data decoder that that decodes ELWC records to tensors. A DataView (see "TfGraphDataViewProvider" component in the pipeline) will refer to this decoder. And any components that consumes the data with the DataView applied will use this decoder. Returns: A ELWC decoder. Here is the function: def make_decoder(): """Creates a data decoder that that decodes ELWC records to tensors. A DataView (see "TfGraphDataViewProvider" component in the pipeline) will refer to this decoder. And any components that consumes the data with the DataView applied will use this decoder. Returns: A ELWC decoder. """ context_features, example_features, label_feature = features.get_features() return struct2tensor_parsing_utils.ELWCDecoder( name='ELWCDecoder', context_features=context_features, example_features=example_features, size_feature_name=features.LIST_SIZE_FEATURE_NAME, label_feature=label_feature)
Creates a data decoder that that decodes ELWC records to tensors. A DataView (see "TfGraphDataViewProvider" component in the pipeline) will refer to this decoder. And any components that consumes the data with the DataView applied will use this decoder. Returns: A ELWC decoder.
166,142
import tensorflow as tf import tensorflow_ranking as tfr import tensorflow_transform as tft from tfx.examples.ranking import features from tfx.examples.ranking import struct2tensor_parsing_utils from tfx_bsl.public import tfxio The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: Transform preprocessing_fn. Here is the function: def preprocessing_fn(inputs): """Transform preprocessing_fn.""" # generate a shared vocabulary. _ = tft.vocabulary( tf.concat([ inputs[features.QUERY_TOKENS].flat_values, inputs[features.DOCUMENT_TOKENS].flat_values ], axis=0), vocab_filename='shared_vocab') return inputs
Transform preprocessing_fn.
166,143
import tensorflow as tf import tensorflow_ranking as tfr import tensorflow_transform as tft from tfx.examples.ranking import features from tfx.examples.ranking import struct2tensor_parsing_utils from tfx_bsl.public import tfxio def _input_fn(file_patterns, data_accessor, batch_size) -> tf.data.Dataset: """Returns a dataset of decoded tensors.""" def prepare_label(parsed_ragged_tensors): label = parsed_ragged_tensors.pop(features.LABEL) # Convert labels to a dense tensor. label = label.to_tensor(default_value=features.LABEL_PADDING_VALUE) return parsed_ragged_tensors, label # NOTE: this dataset already contains RaggedTensors from the Decoder. dataset = data_accessor.tf_dataset_factory( file_patterns, tfxio.TensorFlowDatasetOptions(batch_size=batch_size), schema=None) return dataset.map(prepare_label).repeat() def _create_ranking_model(tf_transform_output, hparams) -> tf.keras.Model: """Creates a Keras ranking model.""" context_feature_specs, example_feature_specs, _ = features.get_features() context_keras_inputs, example_keras_inputs = ( struct2tensor_parsing_utils.create_keras_inputs( context_feature_specs, example_feature_specs, features.LIST_SIZE_FEATURE_NAME)) context_features, example_features, mask = _preprocess_keras_inputs( context_keras_inputs, example_keras_inputs, tf_transform_output, hparams) # Since argspec inspection is expensive, for keras layer, # layer_obj._call_spec.arg_names is a property that uses cached argspec for # call. We use this to determine whether the layer expects `inputs` as first # argument. # TODO(b/185176464): update tfr dependency to remove this branch. flatten_list = tfr.keras.layers.FlattenList() # TODO(kathywu): remove the except branch once changes to the call function # args in the Keras Layer have been released. try: first_arg_name = flatten_list._call_spec.arg_names[0] # pylint: disable=protected-access except AttributeError: first_arg_name = flatten_list._call_fn_args[0] # pylint: disable=protected-access if first_arg_name == 'inputs': (flattened_context_features, flattened_example_features) = flatten_list( inputs=(context_features, example_features, mask)) else: (flattened_context_features, flattened_example_features) = flatten_list(context_features, example_features, mask) # Concatenate flattened context and example features along `list_size` dim. context_input = [ tf.keras.layers.Flatten()(flattened_context_features[name]) for name in sorted(flattened_context_features) ] example_input = [ tf.keras.layers.Flatten()(flattened_example_features[name]) for name in sorted(flattened_example_features) ] input_layer = tf.concat(context_input + example_input, 1) dnn = tf.keras.Sequential() if hparams['use_batch_norm']: dnn.add( tf.keras.layers.BatchNormalization( momentum=hparams['batch_norm_moment'])) for layer_size in hparams['hidden_layer_dims']: dnn.add(tf.keras.layers.Dense(units=layer_size)) if hparams['use_batch_norm']: dnn.add(tf.keras.layers.BatchNormalization( momentum=hparams['batch_norm_moment'])) dnn.add(tf.keras.layers.Activation(activation=tf.nn.relu)) dnn.add(tf.keras.layers.Dropout(rate=hparams['dropout_rate'])) dnn.add(tf.keras.layers.Dense(units=1)) # Since argspec inspection is expensive, for keras layer, # layer_obj._call_spec.arg_names is a property that uses cached argspec for # call. We use this to determine whether the layer expects `inputs` as first # argument. restore_list = tfr.keras.layers.RestoreList() # TODO(kathywu): remove the except branch once changes to the call function # args in the Keras Layer have been released. try: first_arg_name = flatten_list._call_spec.arg_names[0] # pylint: disable=protected-access except AttributeError: first_arg_name = flatten_list._call_fn_args[0] # pylint: disable=protected-access if first_arg_name == 'inputs': logits = restore_list(inputs=(dnn(input_layer), mask)) else: logits = restore_list(dnn(input_layer), mask) model = tf.keras.Model( inputs={ **context_keras_inputs, **example_keras_inputs }, outputs=logits, name='dnn_ranking_model') model.compile( optimizer=tf.keras.optimizers.Adagrad( learning_rate=hparams['learning_rate']), loss=tfr.keras.losses.get(hparams['loss']), metrics=tfr.keras.metrics.default_keras_metrics()) return model The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(trainer_fn_args)` to solve the following problem: TFX trainer entry point. Here is the function: def run_fn(trainer_fn_args): """TFX trainer entry point.""" tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output) hparams = dict( batch_size=32, embedding_dimension=20, learning_rate=0.05, dropout_rate=0.8, hidden_layer_dims=[64, 32, 16], loss='approx_ndcg_loss', use_batch_norm=True, batch_norm_moment=0.99 ) train_dataset = _input_fn(trainer_fn_args.train_files, trainer_fn_args.data_accessor, hparams['batch_size']) eval_dataset = _input_fn(trainer_fn_args.eval_files, trainer_fn_args.data_accessor, hparams['batch_size']) model = _create_ranking_model(tf_transform_output, hparams) model.summary() log_dir = trainer_fn_args.model_run_dir # Write logs to path tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=log_dir, update_freq='epoch') model.fit( train_dataset, steps_per_epoch=trainer_fn_args.train_steps, validation_data=eval_dataset, validation_steps=trainer_fn_args.eval_steps, callbacks=[tensorboard_callback]) # TODO(zhuo): Add support for Regress signature. @tf.function(input_signature=[tf.TensorSpec([None], tf.string)], autograph=False) def predict_serving_fn(serialized_elwc_records): decode_fn = trainer_fn_args.data_accessor.data_view_decode_fn decoded = decode_fn(serialized_elwc_records) decoded.pop(features.LABEL) return {tf.saved_model.PREDICT_OUTPUTS: model(decoded)} model.save( trainer_fn_args.serving_model_dir, save_format='tf', signatures={ 'serving_default': predict_serving_fn.get_concrete_function(), })
TFX trainer entry point.
166,144
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx.components import Evaluator from tfx.components import ImportExampleGen from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.components.experimental.data_view import binder_component from tfx.components.experimental.data_view import provider_component from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import example_gen_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.proto import transform_pb2 The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str])` to solve the following problem: Creates pipeline. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]): """Creates pipeline.""" pipeline_root = os.path.join(pipeline_root, 'pipelines', pipeline_name) example_gen = ImportExampleGen( input_base=data_root, # IMPORTANT: must set FORMAT_PROTO payload_format=example_gen_pb2.FORMAT_PROTO) data_view_provider = provider_component.TfGraphDataViewProvider( module_file=module_file, create_decoder_func='make_decoder') data_view_binder = binder_component.DataViewBinder( example_gen.outputs['examples'], data_view_provider.outputs['data_view']) statistics_gen = StatisticsGen( examples=data_view_binder.outputs['output_examples']) schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics']) transform = Transform( examples=data_view_binder.outputs['output_examples'], schema=schema_gen.outputs['schema'], module_file=module_file, # important: must disable Transform materialization and ensure the # transform field of the splits config is empty. splits_config=transform_pb2.SplitsConfig(analyze=['train']), materialize=False) trainer = Trainer( examples=data_view_binder.outputs['output_examples'], transform_graph=transform.outputs['transform_graph'], module_file=module_file, train_args=trainer_pb2.TrainArgs(num_steps=1000), schema=schema_gen.outputs['schema'], eval_args=trainer_pb2.EvalArgs(num_steps=10)) eval_config = tfma.EvalConfig( model_specs=[ tfma.ModelSpec( signature_name='', label_key='relevance', padding_options=tfma.PaddingOptions( label_float_padding=-1.0, prediction_float_padding=-1.0)) ], slicing_specs=[ tfma.SlicingSpec(), tfma.SlicingSpec(feature_keys=['query_tokens']), ], metrics_specs=[ tfma.MetricsSpec( per_slice_thresholds={ 'metric/ndcg_10': tfma.PerSliceMetricThresholds(thresholds=[ tfma.PerSliceMetricThreshold( # The overall slice. slicing_specs=[tfma.SlicingSpec()], threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.6}))) ]) }) ]) evaluator = Evaluator( examples=data_view_binder.outputs['output_examples'], model=trainer.outputs['model'], eval_config=eval_config, schema=schema_gen.outputs['schema']) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir))) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, data_view_provider, data_view_binder, statistics_gen, schema_gen, transform, trainer, evaluator, pusher, ], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), beam_pipeline_args=beam_pipeline_args)
Creates pipeline.
166,145
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.dsl.components.common import resolver from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import example_gen_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing class Model(_TfxArtifact): """Artifact that contains the actual persisted model. Training components stores the trained model like a saved model in this artifact. A `Model` artifact contains serialization of the trained model in one or more formats, each suitable for different usage (e.g. serving, evaluation), and serving environments. * File structure: - `{uri}/` - `Format-Serving/`: Model exported for serving. - `saved_model.pb` - Other actual model files. - `Format-TFMA/`: Model exported for evaluation. - `saved_model.pb` - Other actual model files. * Commonly used custom properties of the Model artifact: """ TYPE_NAME = 'Model' TYPE_ANNOTATION = SystemModel class ModelBlessing(_TfxArtifact): """Artifact that contains the evaluation of a trained model. This artifact is usually used with Conditional when determining whether to push this model on service or not. ```python # Run pusher if evaluator has blessed the model. with tfx.dsl.Cond(evaluator.outputs['blessing'].future() [0].custom_property('blessed') == 1): pusher = Pusher(...) ``` * File structure: - `{uri}/` - `BLESSED`: if the evaluator has blessed the model. - `NOT_BLESSED`: if the evaluator has not blessed the model. - See tfx/components/evaluator/executor.py for how to write ModelBlessing. * Commonly used custom properties of the ModelBlessing artifact: - `blessed`: int value that represents whether the evaluator has blessed its model or not. """ TYPE_NAME = 'ModelBlessing' The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem: Implements the Bert classication on Cola dataset pipline with TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline: """Implements the Bert classication on Cola dataset pipline with TFX.""" input_config = example_gen_pb2.Input(splits=[ example_gen_pb2.Input.Split(name='train', pattern='train/*'), example_gen_pb2.Input.Split(name='eval', pattern='validation/*') ]) # Brings data into the pipline example_gen = CsvExampleGen(input_base=data_root, input_config=input_config) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that trains a model. trainer = Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], # Adjust these steps when training on the full dataset. train_args=trainer_pb2.TrainArgs(num_steps=2), eval_args=trainer_pb2.EvalArgs(num_steps=1)) # Get the latest blessed model for model validation. model_resolver = resolver.Resolver( strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel( type=ModelBlessing)).with_id('latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='label')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( # Adjust the threshold when training on the # full dataset. # TODO(b/236089934): Change back to 0.5. lower_bound={'value': 0.1}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-2}))) ]) ]) evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir))) components = [ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, pusher, ] return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), enable_cache=True, beam_pipeline_args=beam_pipeline_args, )
Implements the Bert classication on Cola dataset pipline with TFX.
166,146
from typing import List import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.transform import stats_options_util from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor from tfx_bsl.public import tfxio from google.protobuf import text_format _INPUT_WORD_IDS = 'input_word_ids' _INPUT_MASK = 'input_mask' _SEGMENT_IDS = 'segment_ids' _FEATURE_KEY = 'sentence' _LABEL_KEY = 'label' def _tokenize(feature): """Tokenize the two sentences and insert appropriate tokens.""" processor = BertPreprocessor(_BERT_LINK) vocab = processor.get_vocab_name() # Annotate asset provides the mapping between the name (_BERT_VOCAB) and the # path within the StatsOptions object passed to TFDV ( # https://github.com/tensorflow/data-validation/blob/master/tensorflow_data_validation/statistics/stats_options.py). # This vocab can then be used to compute NLP statistics (see the description # of the stats_options_updater_fn below_. tft.annotate_asset(_BERT_VOCAB, vocab.decode()) return processor.tokenize_single_sentence_pad( tf.reshape(feature, [-1]), max_len=_MAX_LEN) The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature Tensors. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature Tensors. """ input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY]) return { _LABEL_KEY: inputs[_LABEL_KEY], _INPUT_WORD_IDS: input_word_ids, _INPUT_MASK: input_mask, _SEGMENT_IDS: segment_ids }
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature Tensors.
166,147
from typing import List import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.transform import stats_options_util from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor from tfx_bsl.public import tfxio from google.protobuf import text_format _BERT_VOCAB = 'bert_vocab' _INPUT_WORD_IDS = 'input_word_ids' _MAX_LEN = 256 The provided code snippet includes necessary dependencies for implementing the `stats_options_updater_fn` function. Write a Python function `def stats_options_updater_fn( stats_type: stats_options_util.StatsType, stats_options: tfdv.StatsOptions) -> tfdv.StatsOptions` to solve the following problem: Update transform stats. This function is called by the Transform component before it computes pre-transform or post-transform statistics. It takes as input a stats_type, which indicates whether this call is intended for pre-transform or post-transform statistics. It also takes as argument the StatsOptions that are to be (optionally) modified before being passed onto TDFV. Args: stats_type: The type of statistics that are to be computed (pre-transform or post-transform). stats_options: The configuration to pass to TFDV for computing the desired statistics. Returns: An updated StatsOptions object. Here is the function: def stats_options_updater_fn( stats_type: stats_options_util.StatsType, stats_options: tfdv.StatsOptions) -> tfdv.StatsOptions: """Update transform stats. This function is called by the Transform component before it computes pre-transform or post-transform statistics. It takes as input a stats_type, which indicates whether this call is intended for pre-transform or post-transform statistics. It also takes as argument the StatsOptions that are to be (optionally) modified before being passed onto TDFV. Args: stats_type: The type of statistics that are to be computed (pre-transform or post-transform). stats_options: The configuration to pass to TFDV for computing the desired statistics. Returns: An updated StatsOptions object. """ if stats_type == stats_options_util.StatsType.POST_TRANSFORM: for f in stats_options.schema.feature: if f.name == _INPUT_WORD_IDS: # Here we extend the schema for the input_word_ids feature to enable # NLP statistics to be computed. We pass the vocabulary (_BERT_VOCAB) # that was used in tokenizing this feature, key tokens of interest # (e.g. "[CLS]", "[PAD]", "[SEP]", "[UNK]") and key thresholds to # validate. For more information on the field descriptions, see here: # https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/schema.proto text_format.Parse( """ vocabulary: "{vocab}" coverage: {{ min_coverage: 1.0 min_avg_token_length: 3.0 excluded_string_tokens: ["[CLS]", "[PAD]", "[SEP]"] oov_string_tokens: ["[UNK]"] }} token_constraints {{ string_value: "[CLS]" min_per_sequence: 1 max_per_sequence: 1 min_fraction_of_sequences: 1 max_fraction_of_sequences: 1 }} token_constraints {{ string_value: "[PAD]" min_per_sequence: 0 max_per_sequence: {max_pad_per_seq} min_fraction_of_sequences: 0 max_fraction_of_sequences: 1 }} token_constraints {{ string_value: "[SEP]" min_per_sequence: 1 max_per_sequence: 1 min_fraction_of_sequences: 1 max_fraction_of_sequences: 1 }} token_constraints {{ string_value: "[UNK]" min_per_sequence: 0 max_per_sequence: {max_unk_per_seq} min_fraction_of_sequences: 0 max_fraction_of_sequences: 1 }} sequence_length_constraints {{ excluded_string_value: ["[PAD]"] min_sequence_length: 3 max_sequence_length: {max_seq_len} }} """.format( vocab=_BERT_VOCAB, max_pad_per_seq=_MAX_LEN - 3, # [CLS], [SEP], Token max_unk_per_seq=_MAX_LEN - 2, # [CLS], [SEP] max_seq_len=_MAX_LEN), f.natural_language_domain) return stats_options
Update transform stats. This function is called by the Transform component before it computes pre-transform or post-transform statistics. It takes as input a stats_type, which indicates whether this call is intended for pre-transform or post-transform statistics. It also takes as argument the StatsOptions that are to be (optionally) modified before being passed onto TDFV. Args: stats_type: The type of statistics that are to be computed (pre-transform or post-transform). stats_options: The configuration to pass to TFDV for computing the desired statistics. Returns: An updated StatsOptions object.
166,148
from typing import List import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.transform import stats_options_util from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor from tfx_bsl.public import tfxio from google.protobuf import text_format _TRAIN_BATCH_SIZE = 16 _EVAL_BATCH_SIZE = 16 _BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2' _MAX_LEN = 256 _EPOCHS = 1 def _input_fn(file_pattern: List[str], data_accessor: tfx.components.DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of materialized transformed input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ dataset = data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), tf_transform_output.transformed_metadata.schema) dataset = dataset.repeat() return dataset.prefetch(tf.data.AUTOTUNE) def _get_serve_tf_examples_fn(model, tf_transform_output): """Returns a function that parses a serialized tf.Example.""" model.tft_layer = tf_transform_output.transform_features_layer() def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" feature_spec = tf_transform_output.raw_feature_spec() feature_spec.pop(_LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return model(transformed_features) return serve_tf_examples_fn def build_and_compile_bert_classifier( bert_layer: tf.keras.layers.Layer, max_len: int, num_classes: int, learning_rate: float = 5e-5, metrics: Optional[List[Union[str, tf.keras.metrics.Metric]]] = None): """Build and compile keras BERT classification model. Apart from the necessary inputs, use default/suggested parameters in build and compile BERT classifier functions. Args: bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer. max_len: The maximum length of preprocessed tokens. num_classes: Number of unique classes in the labels. Determines the output shape of the classification layer. learning_rate: Suggested learning rate to be used in tf.keras.optimizer.Adam. The three suggested learning_rates for fine-tuning are [2e-5, 3e-5,5e-5] metrics: Default None will use ['sparse_categorical_accuracy']. An array of strings or tf.keras.metrics. Returns: A compiled keras BERT Classification model. """ if metrics is None: metrics = ["sparse_categorical_accuracy"] model = build_bert_classifier(bert_layer, max_len, num_classes) compile_bert_classifier(model, learning_rate=learning_rate, metrics=metrics) return model The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, batch_size=_EVAL_BATCH_SIZE) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True) model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2) model.fit( train_dataset, epochs=_EPOCHS, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps) signatures = { 'serving_default': _get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function( tf.TensorSpec( shape=[None], dtype=tf.string, name='examples')), } model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,149
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.dsl.components.common import resolver from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import example_gen_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing class Model(_TfxArtifact): """Artifact that contains the actual persisted model. Training components stores the trained model like a saved model in this artifact. A `Model` artifact contains serialization of the trained model in one or more formats, each suitable for different usage (e.g. serving, evaluation), and serving environments. * File structure: - `{uri}/` - `Format-Serving/`: Model exported for serving. - `saved_model.pb` - Other actual model files. - `Format-TFMA/`: Model exported for evaluation. - `saved_model.pb` - Other actual model files. * Commonly used custom properties of the Model artifact: """ TYPE_NAME = 'Model' TYPE_ANNOTATION = SystemModel class ModelBlessing(_TfxArtifact): """Artifact that contains the evaluation of a trained model. This artifact is usually used with Conditional when determining whether to push this model on service or not. ```python # Run pusher if evaluator has blessed the model. with tfx.dsl.Cond(evaluator.outputs['blessing'].future() [0].custom_property('blessed') == 1): pusher = Pusher(...) ``` * File structure: - `{uri}/` - `BLESSED`: if the evaluator has blessed the model. - `NOT_BLESSED`: if the evaluator has not blessed the model. - See tfx/components/evaluator/executor.py for how to write ModelBlessing. * Commonly used custom properties of the ModelBlessing artifact: - `blessed`: int value that represents whether the evaluator has blessed its model or not. """ TYPE_NAME = 'ModelBlessing' The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem: Implements the Bert classication on mrpc dataset pipline with TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline: """Implements the Bert classication on mrpc dataset pipline with TFX.""" input_config = example_gen_pb2.Input(splits=[ example_gen_pb2.Input.Split(name='train', pattern='train/*'), example_gen_pb2.Input.Split(name='eval', pattern='validation/*') ]) # Brings data into the pipline example_gen = CsvExampleGen(input_base=data_root, input_config=input_config) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that trains a model. trainer = Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], # Adjust these steps when training on the full dataset. train_args=trainer_pb2.TrainArgs(num_steps=1), eval_args=trainer_pb2.EvalArgs(num_steps=1)) # Get the latest blessed model for model validation. model_resolver = resolver.Resolver( strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel( type=ModelBlessing)).with_id('latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='label')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( # Adjust the threshold when training on the # full dataset. # TODO(b/236089934): Change back to 0.5. lower_bound={'value': 0.1}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-2}))) ]) ]) evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir))) components = [ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, pusher, ] return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), enable_cache=True, beam_pipeline_args=beam_pipeline_args, )
Implements the Bert classication on mrpc dataset pipline with TFX.
166,150
from typing import List import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.transform import stats_options_util from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor from tfx_bsl.public import tfxio from google.protobuf import text_format _INPUT_WORD_IDS = 'input_word_ids' _INPUT_MASK = 'input_mask' _SEGMENT_IDS = 'segment_ids' _FEATURE_KEY_A = 'sentence1' _FEATURE_KEY_B = 'sentence2' _LABEL_KEY = 'label' def _tokenize(sequence_a, sequence_b): """Tokenize the two sentences and insert appropriate tokens.""" processor = BertPreprocessor(_BERT_LINK) vocab = processor.get_vocab_name() # Annotate asset provides the mapping between the name (_BERT_VOCAB) and the # path within the StatsOptions object passed to TFDV ( # https://github.com/tensorflow/data-validation/blob/master/ # tensorflow_data_validation/statistics/stats_options.py). # This vocab can then be used to compute NLP statistics (see the description # of the stats_options_updater_fn below. tft.annotate_asset(_BERT_VOCAB, vocab.decode()) return processor.tokenize_sentence_pair( tf.reshape(sequence_a, [-1]), tf.reshape(sequence_b, [-1]), _MAX_LEN) The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature Tensors. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature Tensors. """ input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY_A], inputs[_FEATURE_KEY_B]) return { _LABEL_KEY: inputs[_LABEL_KEY], _INPUT_WORD_IDS: input_word_ids, _INPUT_MASK: input_mask, _SEGMENT_IDS: segment_ids }
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature Tensors.
166,151
from typing import List import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.transform import stats_options_util from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor from tfx_bsl.public import tfxio from google.protobuf import text_format _BERT_VOCAB = 'bert_vocab' _INPUT_WORD_IDS = 'input_word_ids' _MAX_LEN = 128 The provided code snippet includes necessary dependencies for implementing the `stats_options_updater_fn` function. Write a Python function `def stats_options_updater_fn( stats_type: stats_options_util.StatsType, stats_options: tfdv.StatsOptions) -> tfdv.StatsOptions` to solve the following problem: Update transform stats. This function is called by the Transform component before it computes pre-transform or post-transform statistics. It takes as input a stats_type, which indicates whether this call is intended for pre-transform or post-transform statistics. It also takes as argument the StatsOptions that are to be (optionally) modified before being passed onto TDFV. Args: stats_type: The type of statistics that are to be computed (pre-transform or post-transform). stats_options: The configuration to pass to TFDV for computing the desired statistics. Returns: An updated StatsOptions object. Here is the function: def stats_options_updater_fn( stats_type: stats_options_util.StatsType, stats_options: tfdv.StatsOptions) -> tfdv.StatsOptions: """Update transform stats. This function is called by the Transform component before it computes pre-transform or post-transform statistics. It takes as input a stats_type, which indicates whether this call is intended for pre-transform or post-transform statistics. It also takes as argument the StatsOptions that are to be (optionally) modified before being passed onto TDFV. Args: stats_type: The type of statistics that are to be computed (pre-transform or post-transform). stats_options: The configuration to pass to TFDV for computing the desired statistics. Returns: An updated StatsOptions object. """ if stats_type == stats_options_util.StatsType.POST_TRANSFORM: for f in stats_options.schema.feature: if f.name == _INPUT_WORD_IDS: # Here we extend the schema for the input_word_ids feature to enable # NLP statistics to be computed. We pass the vocabulary (_BERT_VOCAB) # that was used in tokenizing this feature, key tokens of interest # (e.g. "[CLS]", "[PAD]", "[SEP]", "[UNK]") and key thresholds to # validate. For more information on the field descriptions, see here: # https://github.com/tensorflow/metadata/blob/master/ # tensorflow_metadata/proto/v0/schema.proto text_format.Parse( """ vocabulary: "{vocab}" coverage: {{ min_coverage: 1.0 min_avg_token_length: 3.0 excluded_string_tokens: ["[CLS]", "[PAD]", "[SEP]"] oov_string_tokens: ["[UNK]"] }} token_constraints {{ string_value: "[CLS]" min_per_sequence: 1 max_per_sequence: 1 min_fraction_of_sequences: 1 max_fraction_of_sequences: 1 }} token_constraints {{ string_value: "[PAD]" min_per_sequence: 0 max_per_sequence: {max_pad_per_seq} min_fraction_of_sequences: 0 max_fraction_of_sequences: 1 }} token_constraints {{ string_value: "[SEP]" min_per_sequence: 2 max_per_sequence: 2 min_fraction_of_sequences: 1 max_fraction_of_sequences: 1 }} token_constraints {{ string_value: "[UNK]" min_per_sequence: 0 max_per_sequence: {max_unk_per_seq} min_fraction_of_sequences: 0 max_fraction_of_sequences: 1 }} """.format( vocab=_BERT_VOCAB, max_pad_per_seq=_MAX_LEN - 3, # [CLS], 2x[SEP], Token max_unk_per_seq=_MAX_LEN - 4 # [CLS], 2x[SEP] ), f.natural_language_domain) return stats_options
Update transform stats. This function is called by the Transform component before it computes pre-transform or post-transform statistics. It takes as input a stats_type, which indicates whether this call is intended for pre-transform or post-transform statistics. It also takes as argument the StatsOptions that are to be (optionally) modified before being passed onto TDFV. Args: stats_type: The type of statistics that are to be computed (pre-transform or post-transform). stats_options: The configuration to pass to TFDV for computing the desired statistics. Returns: An updated StatsOptions object.
166,152
from typing import List import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_hub as hub import tensorflow_transform as tft from tfx import v1 as tfx from tfx.components.transform import stats_options_util from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor from tfx_bsl.public import tfxio from google.protobuf import text_format _BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2' _EPOCHS = 1 _EVAL_BATCH_SIZE = 32 _MAX_LEN = 128 _TRAIN_BATCH_SIZE = 32 def _input_fn(file_pattern: List[str], data_accessor: tfx.components.DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ dataset = data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), tf_transform_output.transformed_metadata.schema) dataset = dataset.repeat() return dataset.prefetch(tf.data.AUTOTUNE) def _get_serve_tf_examples_fn(model, tf_transform_output): """Returns a function that parses a serialized tf.Example.""" model.tft_layer = tf_transform_output.transform_features_layer() def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" feature_spec = tf_transform_output.raw_feature_spec() feature_spec.pop(_LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return model(transformed_features) return serve_tf_examples_fn def build_and_compile_bert_classifier( bert_layer: tf.keras.layers.Layer, max_len: int, num_classes: int, learning_rate: float = 5e-5, metrics: Optional[List[Union[str, tf.keras.metrics.Metric]]] = None): """Build and compile keras BERT classification model. Apart from the necessary inputs, use default/suggested parameters in build and compile BERT classifier functions. Args: bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer. max_len: The maximum length of preprocessed tokens. num_classes: Number of unique classes in the labels. Determines the output shape of the classification layer. learning_rate: Suggested learning rate to be used in tf.keras.optimizer.Adam. The three suggested learning_rates for fine-tuning are [2e-5, 3e-5,5e-5] metrics: Default None will use ['sparse_categorical_accuracy']. An array of strings or tf.keras.metrics. Returns: A compiled keras BERT Classification model. """ if metrics is None: metrics = ["sparse_categorical_accuracy"] model = build_bert_classifier(bert_layer, max_len, num_classes) compile_bert_classifier(model, learning_rate=learning_rate, metrics=metrics) return model The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, batch_size=_EVAL_BATCH_SIZE) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True) model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2, 2e-5) model.fit( train_dataset, epochs=_EPOCHS, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps) signatures = { 'serving_default': _get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function( tf.TensorSpec( shape=[None], dtype=tf.string, name='examples')), } model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,153
import os import pandas as pd import tensorflow_datasets as tfds The provided code snippet includes necessary dependencies for implementing the `fetch_data` function. Write a Python function `def fetch_data()` to solve the following problem: This downloads the full dataset to $(pwd)/data/imdb.csv. Here is the function: def fetch_data(): """This downloads the full dataset to $(pwd)/data/imdb.csv.""" ds = tfds.load('imdb_reviews', split='train+test') numpy_ds = tfds.as_numpy(ds) df = pd.DataFrame(numpy_ds) df['text'] = df['text'].str.decode('utf-8') dst_path = os.getcwd() + '/data/imdb.csv' df.to_csv(dst_path, index=False)
This downloads the full dataset to $(pwd)/data/imdb.csv.
166,154
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.dsl.components.common import resolver from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import example_gen_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing class Model(_TfxArtifact): """Artifact that contains the actual persisted model. Training components stores the trained model like a saved model in this artifact. A `Model` artifact contains serialization of the trained model in one or more formats, each suitable for different usage (e.g. serving, evaluation), and serving environments. * File structure: - `{uri}/` - `Format-Serving/`: Model exported for serving. - `saved_model.pb` - Other actual model files. - `Format-TFMA/`: Model exported for evaluation. - `saved_model.pb` - Other actual model files. * Commonly used custom properties of the Model artifact: """ TYPE_NAME = 'Model' TYPE_ANNOTATION = SystemModel class ModelBlessing(_TfxArtifact): """Artifact that contains the evaluation of a trained model. This artifact is usually used with Conditional when determining whether to push this model on service or not. ```python # Run pusher if evaluator has blessed the model. with tfx.dsl.Cond(evaluator.outputs['blessing'].future() [0].custom_property('blessed') == 1): pusher = Pusher(...) ``` * File structure: - `{uri}/` - `BLESSED`: if the evaluator has blessed the model. - `NOT_BLESSED`: if the evaluator has not blessed the model. - See tfx/components/evaluator/executor.py for how to write ModelBlessing. * Commonly used custom properties of the ModelBlessing artifact: - `blessed`: int value that represents whether the evaluator has blessed its model or not. """ TYPE_NAME = 'ModelBlessing' The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem: Implements the imdb sentiment analysis pipline with TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline: """Implements the imdb sentiment analysis pipline with TFX.""" output = example_gen_pb2.Output( split_config=example_gen_pb2.SplitConfig(splits=[ example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=9), example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1) ])) # Brings data in to the pipline example_gen = CsvExampleGen(input_base=data_root, output_config=output) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that trains a model. trainer = Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=trainer_pb2.TrainArgs(num_steps=500), eval_args=trainer_pb2.EvalArgs(num_steps=200)) # Get the latest blessed model for model validation. model_resolver = resolver.Resolver( strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel( type=ModelBlessing)).with_id('latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='label')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='BinaryAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( # Increase this threshold when training on complete # dataset. lower_bound={'value': 0.01}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-2}))) ]) ]) evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir))) components = [ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, pusher, ] return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), enable_cache=True, beam_pipeline_args=beam_pipeline_args)
Implements the imdb sentiment analysis pipline with TFX.
166,155
from typing import List import absl import tensorflow as tf from tensorflow import keras import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import DataAccessor from tfx.components.trainer.fn_args_utils import FnArgs from tfx_bsl.tfxio import dataset_options _FEATURE_KEY = 'text' _LABEL_KEY = 'label' def _transformed_name(key, is_input=False): return key + ('_xf_input' if is_input else '_xf') def _tokenize_review(review): """Tokenize the reviews by spliting the reviews. Constructing a vocabulary. Map the words to their frequency index in the vocabulary. Args: review: tensors containing the reviews. (batch_size/None, 1) Returns: Tokenized and padded review tensors. (batch_size/None, _MAX_LEN) """ review_sparse = tf.strings.split(tf.reshape(review, [-1])).to_sparse() # tft.apply_vocabulary doesn't reserve 0 for oov words. In order to comply # with convention and use mask_zero in keras.embedding layer, set oov value # to _VOCAB_SIZE and padding value to -1. Then add 1 to all the tokens. review_indices = tft.compute_and_apply_vocabulary( review_sparse, default_value=_VOCAB_SIZE, top_k=_VOCAB_SIZE) dense = tf.sparse.to_dense(review_indices, default_value=-1) # TFX transform expects the transform result to be FixedLenFeature. padding_config = [[0, 0], [0, _MAX_LEN]] dense = tf.pad(dense, padding_config, 'CONSTANT', -1) padded = tf.slice(dense, [0, 0], [-1, _MAX_LEN]) padded += 1 return padded The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ return { _transformed_name(_LABEL_KEY): inputs[_LABEL_KEY], _transformed_name(_FEATURE_KEY, True): _tokenize_review(inputs[_FEATURE_KEY]) }
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,156
from typing import List import absl import tensorflow as tf from tensorflow import keras import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import DataAccessor from tfx.components.trainer.fn_args_utils import FnArgs from tfx_bsl.tfxio import dataset_options _EVAL_BATCH_SIZE = 5 _TRAIN_BATCH_SIZE = 10 def _input_fn(file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch. Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)), tf_transform_output.transformed_metadata.schema) return dataset.repeat() def _build_keras_model() -> keras.Model: """Creates a LSTM Keras model for classifying imdb data. Reference: https://www.tensorflow.org/tutorials/text/text_classification_rnn Returns: A Keras Model. """ # The model below is built with Sequential API, please refer to # https://www.tensorflow.org/guide/keras/sequential_model model = keras.Sequential([ keras.layers.Embedding( _VOCAB_SIZE + 2, _EMBEDDING_UNITS, name=_transformed_name(_FEATURE_KEY)), keras.layers.Bidirectional( keras.layers.LSTM(_LSTM_UNITS, dropout=_DROPOUT_RATE)), keras.layers.Dense(_HIDDEN_UNITS, activation='relu'), keras.layers.Dense(1) ]) model.compile( loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=keras.optimizers.Adam(_LEARNING_RATE), metrics=['accuracy']) model.summary(print_fn=absl.logging.info) return model def _get_serve_tf_examples_fn(model, tf_transform_output): """Returns a function that parses a serialized tf.Example.""" model.tft_layer = tf_transform_output.transform_features_layer() def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" feature_spec = tf_transform_output.raw_feature_spec() feature_spec.pop(_LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return model(transformed_features) return serve_tf_examples_fn class FnArgs: """Args to pass to user defined training/tuning function(s). Attributes: working_dir: Working dir. train_files: A list of patterns for train files. eval_files: A list of patterns for eval files. train_steps: Number of train steps. eval_steps: Number of eval steps. schema_path: A single uri for schema file. Will be None if not specified. schema_file: Deprecated, use `schema_path` instead. transform_graph_path: An optional single uri for transform graph produced by TFT. Will be None if not specified. transform_output: Deprecated, use `transform_graph_path` instead. data_accessor: Contains factories that can create tf.data.Datasets or other means to access the train/eval data. They provide a uniform way of accessing data, regardless of how the data is stored on disk. serving_model_dir: A single uri for the output directory of the serving model. eval_model_dir: A single uri for the output directory of the eval model. Note that this is estimator only, Keras doesn't require it for TFMA. model_run_dir: A single uri for the output directory of model training related files. base_model: An optional base model path that will be used for this training. hyperparameters: An optional keras_tuner.HyperParameters config. custom_config: An optional dictionary passed to the component. """ working_dir = attr.ib(type=str, default=None) train_files = attr.ib(type=List[str], default=None) eval_files = attr.ib(type=List[str], default=None) train_steps = attr.ib(type=int, default=None) eval_steps = attr.ib(type=int, default=None) schema_path = attr.ib(type=str, default=None) schema_file = attr.ib(type=str, default=None) transform_graph_path = attr.ib(type=str, default=None) transform_output = attr.ib(type=str, default=None) data_accessor = attr.ib(type=DataAccessor, default=None) serving_model_dir = attr.ib(type=str, default=None) eval_model_dir = attr.ib(type=str, default=None) model_run_dir = attr.ib(type=str, default=None) base_model = attr.ib(type=str, default=None) hyperparameters = attr.ib(type=Dict[str, Any], default=None) custom_config = attr.ib(type=Dict[str, Any], default=None) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Here is the function: def run_fn(fn_args: FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, batch_size=_EVAL_BATCH_SIZE) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = _build_keras_model() # In distributed training, it is common to use num_steps instead of num_epochs # to control training. # Reference: https://stackoverflow.com/questions/45989971/ # /distributed-training-with-tf-estimator-resulting-in-more-training-steps model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps) signatures = { 'serving_default': _get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function( tf.TensorSpec( shape=[None], dtype=tf.string, name='examples')), } model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
166,157
import os import absl from tfx.components import CsvExampleGen from tfx.components import StatisticsGen from tfx.examples.custom_components.hello_world.hello_component import component from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, metadata_path: str) -> pipeline.Pipeline` to solve the following problem: Implements the chicago taxi pipeline with TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, metadata_path: str) -> pipeline.Pipeline: """Implements the chicago taxi pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = CsvExampleGen(input_base=data_root) hello = component.HelloComponent( input_data=example_gen.outputs['examples'], name=u'HelloWorld') # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=hello.outputs['output_data']) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[example_gen, hello, statistics_gen], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path))
Implements the chicago taxi pipeline with TFX.
166,158
import os import absl from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ModelValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.examples.custom_components.presto_example_gen.presto_component.component import PrestoExampleGen from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2 from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import evaluator_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 class PrestoExampleGen(component.QueryBasedExampleGen): # pylint: disable=protected-access """Official TFX PrestoExampleGen component. The Presto examplegen component takes a query, connection client configuration, and generates train and eval examples for downstream components. Component `outputs` contains: - `examples`: Channel of type `standard_artifacts.Examples` for output train and eval examples. """ EXECUTOR_SPEC = executor_spec.BeamExecutorSpec(executor.Executor) def __init__(self, conn_config: presto_config_pb2.PrestoConnConfig, query: Optional[str] = None, input_config: Optional[example_gen_pb2.Input] = None, output_config: Optional[example_gen_pb2.Output] = None): """Constructs a PrestoExampleGen component. Args: conn_config: Parameters for Presto connection client. query: Presto sql string, query result will be treated as a single split, can be overwritten by input_config. input_config: An example_gen_pb2.Input instance with Split.pattern as Presto sql string. If set, it overwrites the 'query' arg, and allows different queries per split. output_config: An example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1. Raises: RuntimeError: Only one of query and input_config should be set. Or required host field in connection_config should be set. """ if bool(query) == bool(input_config): raise RuntimeError('Exactly one of query and input_config should be set.') if not bool(conn_config.host): raise RuntimeError( 'Required host field in connection config should be set.') input_config = input_config or utils.make_default_input_config(query) packed_custom_config = example_gen_pb2.CustomConfig() packed_custom_config.custom_config.Pack(conn_config) output_config = output_config or utils.make_default_output_config( input_config) super().__init__( input_config=input_config, output_config=output_config, custom_config=packed_custom_config) The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, module_file: str, presto_config: presto_config_pb2.PrestoConnConfig, query: str, serving_model_dir: str, metadata_path: str) -> pipeline.Pipeline` to solve the following problem: Implements the chicago taxi pipeline with TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, module_file: str, presto_config: presto_config_pb2.PrestoConnConfig, query: str, serving_model_dir: str, metadata_path: str) -> pipeline.Pipeline: """Implements the chicago taxi pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data example_gen = PrestoExampleGen(presto_config, query=query) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics']) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that implements a model. trainer = Trainer( module_file=module_file, transformed_examples=transform.outputs['transformed_examples'], schema=schema_gen.outputs['schema'], transform_graph=transform.outputs['transform_graph'], train_args=trainer_pb2.TrainArgs(num_steps=10000), eval_args=trainer_pb2.EvalArgs(num_steps=5000)) # Uses TFMA to compute a evaluation statistics over features of a model. evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[ evaluator_pb2.SingleSlicingSpec( column_for_slicing=['trip_start_hour']) ])) # Performs quality validation of a candidate model (compared to a baseline). model_validator = ModelValidator( examples=example_gen.outputs['examples'], model=trainer.outputs['model']) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=model_validator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir))) return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, evaluator, model_validator, pusher ], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), )
Implements the chicago taxi pipeline with TFX.
166,159
import datetime from typing import Any, Dict, Iterable, Tuple import apache_beam as beam import prestodb import tensorflow as tf from tfx.components.example_gen import base_example_gen_executor from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2 from tfx.proto import example_gen_pb2 from tfx.utils import proto_utils class _ReadPrestoDoFn(beam.DoFn): """Beam DoFn class that reads from Presto. Attributes: cursor: A prestodb.dbapi.Cursor object that reads records from Presto table. """ def __init__(self, client: prestodb.dbapi.Connection): self.cursor = client.cursor() def process(self, query: str) -> Iterable[Tuple[str, str, Any]]: """Yields rows from query results. Args: query: A SQL query used to return results from Presto table. Yields: One row from the query result, represented by a list of tuples. Each tuple contains information on column name, column data type, data. """ self.cursor.execute(query) rows = self.cursor.fetchall() if rows: cols = [] col_types = [] # Returns a list of (column_name, column_type, None, ...) # https://github.com/prestodb/presto-python-client/blob/master/prestodb/dbapi.py#L199 for metadata in self.cursor.description: cols.append(metadata[0]) col_types.append(metadata[1]) for r in rows: yield zip(cols, col_types, r) def teardown(self): if self.cursor: self.cursor.close() def _deserialize_conn_config( conn_config: presto_config_pb2.PrestoConnConfig ) -> prestodb.dbapi.Connection: """Deserializes Presto connection config to Presto client. Args: conn_config: Protobuf-encoded connection config for Presto client. Returns: A prestodb.dbapi.Connection instance initialized with user-supplied parameters. """ params = {'host': conn_config.host} # Required field # Only deserialize rest of parameters if set by user if conn_config.HasField('port'): params['port'] = conn_config.port if conn_config.HasField('user'): params['user'] = conn_config.user if conn_config.HasField('source'): params['source'] = conn_config.source if conn_config.HasField('catalog'): params['catalog'] = conn_config.catalog if conn_config.HasField('schema'): params['schema'] = conn_config.schema if conn_config.HasField('http_scheme'): params['http_scheme'] = conn_config.http_scheme if conn_config.WhichOneof('opt_auth'): params['auth'] = _deserialize_auth_config(conn_config) if conn_config.HasField('max_attempts'): params['max_attempts'] = conn_config.max_attempts if conn_config.HasField('request_timeout'): params['request_timeout'] = conn_config.request_timeout return prestodb.dbapi.connect(**params) def _row_to_example( instance: Iterable[Tuple[str, str, Any]]) -> tf.train.Example: """Convert presto result row to tf example.""" feature = {} for key, data_type, value in instance: if value is None: feature[key] = tf.train.Feature() elif data_type in {'tinyint', 'smallint', 'integer', 'bigint'}: feature[key] = tf.train.Feature( int64_list=tf.train.Int64List(value=[value])) elif data_type in {'real', 'double', 'decimal'}: feature[key] = tf.train.Feature( float_list=tf.train.FloatList(value=[value])) elif data_type in {'varchar', 'char'}: feature[key] = tf.train.Feature( bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(value)])) elif data_type in {'timestamp'}: value = int(datetime.datetime.fromisoformat(value).timestamp()) feature[key] = tf.train.Feature( int64_list=tf.train.Int64List(value=[value])) else: # TODO(b/140266796): support more types # https://prestodb.github.io/docs/current/language/types raise RuntimeError( 'Presto column type {} is not supported.'.format(data_type)) return tf.train.Example(features=tf.train.Features(feature=feature)) The provided code snippet includes necessary dependencies for implementing the `_PrestoToExample` function. Write a Python function `def _PrestoToExample( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem: Read from Presto and transform to TF examples. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a Presto sql string. Returns: PCollection of TF examples. Here is the function: def _PrestoToExample( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection: """Read from Presto and transform to TF examples. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a Presto sql string. Returns: PCollection of TF examples. """ conn_config = example_gen_pb2.CustomConfig() proto_utils.json_to_proto(exec_properties['custom_config'], conn_config) presto_config = presto_config_pb2.PrestoConnConfig() conn_config.custom_config.Unpack(presto_config) client = _deserialize_conn_config(presto_config) return (pipeline | 'Query' >> beam.Create([split_pattern]) | 'QueryTable' >> beam.ParDo(_ReadPrestoDoFn(client)) | 'ToTFExample' >> beam.Map(_row_to_example))
Read from Presto and transform to TF examples. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a Presto sql string. Returns: PCollection of TF examples.
166,160
from tfx.dsl.component.experimental import container_component from tfx.dsl.component.experimental import placeholders from tfx.types import standard_artifacts downloader_component = container_component.create_container_component( name='DownloadFromHttp', outputs={ 'data': standard_artifacts.ExternalArtifact, }, parameters={ 'url': str, }, # The component code uses gsutil to upload the data to GCS, so the # container image needs to have gsutil installed and configured. # Fixing b/150670779 by merging cl/294536017 will lift this limitation. image='google/cloud-sdk:278.0.0', command=[ 'sh', '-exc', ''' url="$0" output_data_uri="$1"/data # TODO(b/150515270) Remove when fixed. output_data_path=$(mktemp) # Running the main code wget "$0" -O "$output_data_path" || curl "$0" > "$output_data_path" # Getting data out of the container gsutil cp "$output_data_path" "$output_data_uri" ''', placeholders.InputValuePlaceholder('url'), placeholders.OutputUriPlaceholder('data'), ], ) grep_component = container_component.create_container_component( name='FilterWithGrep', inputs={ 'text': standard_artifacts.ExternalArtifact, }, outputs={ 'filtered_text': standard_artifacts.ExternalArtifact, }, parameters={ 'pattern': str, }, # The component code uses gsutil to upload the data to GCS, so the # container image needs to have gsutil installed and configured. # Fixing b/150670779 by merging cl/294536017 will lift this limitation. image='google/cloud-sdk:278.0.0', command=[ 'sh', '-exc', ''' pattern="$0" text_uri="$1"/data # TODO(b/150515270) Remove when fixed. text_path=$(mktemp) filtered_text_uri="$2"/data # TODO(b/150515270) Remove when fixed. filtered_text_path=$(mktemp) # Getting data into the container gsutil cp "$text_uri" "$text_path" # Running the main code grep "$pattern" "$text_path" >"$filtered_text_path" # Getting data out of the container gsutil cp "$filtered_text_path" "$filtered_text_uri" ''', placeholders.InputValuePlaceholder('pattern'), placeholders.InputUriPlaceholder('text'), placeholders.OutputUriPlaceholder('filtered_text'), ], ) print_component = container_component.create_container_component( name='Print', inputs={ 'text': standard_artifacts.ExternalArtifact, }, # The component code uses gsutil to upload the data to GCS, so the # container image needs to have gsutil installed and configured. # Fixing b/150670779 by merging cl/294536017 will lift this limitation. image='google/cloud-sdk:278.0.0', command=[ 'sh', '-exc', ''' text_uri="$0"/data # TODO(b/150515270) Remove when fixed. text_path=$(mktemp) # Getting data into the container gsutil cp "$text_uri" "$text_path" # Running the main code cat "$text_path" ''', placeholders.InputUriPlaceholder('text'), ], ) The provided code snippet includes necessary dependencies for implementing the `create_pipeline_component_instances` function. Write a Python function `def create_pipeline_component_instances(text_url: str, pattern: str)` to solve the following problem: Creates tasks for the download_grep_print pipeline. Here is the function: def create_pipeline_component_instances(text_url: str, pattern: str): """Creates tasks for the download_grep_print pipeline.""" downloader_task = downloader_component(url=text_url) grep_task = grep_component( text=downloader_task.outputs['data'], pattern=pattern, ) print_task = print_component( text=grep_task.outputs['filtered_text'], ) component_instances = [ downloader_task, grep_task, print_task, ] return component_instances
Creates tasks for the download_grep_print pipeline.
166,161
from typing import List import tensorflow as tf from tensorflow import estimator as tf_estimator import tensorflow_model_analysis as tfma import tensorflow_transform as tft from tensorflow_transform.tf_metadata import schema_utils from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options _CATEGORICAL_FEATURE_KEYS = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] _DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds'] _FEATURE_BUCKET_COUNT = 10 _BUCKET_FEATURE_KEYS = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] _VOCAB_SIZE = 1000 _OOV_SIZE = 10 _VOCAB_FEATURE_KEYS = [ 'payment_type', 'company', ] _LABEL_KEY = 'tips' _FARE_KEY = 'fare' def _transformed_name(key): return key + '_xf' def _fill_in_missing(x): """Replace missing values in a SparseTensor. Fills in missing values of `x` with '' or 0, and converts to a dense tensor. Args: x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in. """ if not isinstance(x, tf.sparse.SparseTensor): return x default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.compat.v1.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values, default_value), axis=1) The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _DENSE_FLOAT_FEATURE_KEYS: # If sparse make it dense, setting nan's to 0 or '', and apply zscore. outputs[_transformed_name(key)] = tft.scale_to_z_score( _fill_in_missing(inputs[key])) for key in _VOCAB_FEATURE_KEYS: # Build a vocabulary for this feature. outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary( _fill_in_missing(inputs[key]), top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE) for key in _BUCKET_FEATURE_KEYS: outputs[_transformed_name(key)] = tft.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT) for key in _CATEGORICAL_FEATURE_KEYS: outputs[_transformed_name(key)] = _fill_in_missing(inputs[key]) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where( tf.math.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,162
from typing import List import tensorflow as tf from tensorflow import estimator as tf_estimator import tensorflow_model_analysis as tfma import tensorflow_transform as tft from tensorflow_transform.tf_metadata import schema_utils from tfx.components.trainer.fn_args_utils import DataAccessor from tfx_bsl.tfxio import dataset_options def _build_estimator(config, hidden_units=None, warm_start_from=None): """Build an estimator for predicting the tipping behavior of taxi riders. Args: config: tf.contrib.learn.RunConfig defining the runtime environment for the estimator (including model_dir). hidden_units: [int], the layer sizes of the DNN (input layer first) warm_start_from: Optional directory to warm start from. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. """ real_valued_columns = [ tf.feature_column.numeric_column(key, shape=()) for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS) ] categorical_columns = [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0) for key in _transformed_names(_VOCAB_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0) for key in _transformed_names(_BUCKET_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip( _transformed_names(_CATEGORICAL_FEATURE_KEYS), _MAX_CATEGORICAL_FEATURE_VALUES) ] return tf_estimator.DNNLinearCombinedClassifier( config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25], warm_start_from=warm_start_from) def _example_serving_receiver_fn(transform_output, schema): """Build the serving in inputs. Args: transform_output: a `tft.TFTransformOutput` object. schema: the schema of the input data. Returns: Tensorflow graph which parses examples, applying tf-transform to them. """ raw_feature_spec = _get_raw_feature_spec(schema) raw_feature_spec.pop(_LABEL_KEY) raw_input_fn = tf_estimator.export.build_parsing_serving_input_receiver_fn( raw_feature_spec, default_batch_size=None) serving_input_receiver = raw_input_fn() _, transformed_features = transform_output.transform_raw_features( serving_input_receiver.features, drop_unused_features=True) return tf_estimator.export.ServingInputReceiver( transformed_features, serving_input_receiver.receiver_tensors) def _eval_input_receiver_fn(transform_output, schema): """Build everything needed for the tf-model-analysis to run the model. Args: transform_output: a `tft.TFTransformOutput` object. schema: the schema of the input data. Returns: EvalInputReceiver function, which contains: - Tensorflow graph which parses raw untransformed features, applies the tf-transform preprocessing operators. - Set of raw, untransformed features. - Label against which predictions will be compared. """ # Notice that the inputs are raw features, not transformed features here. raw_feature_spec = _get_raw_feature_spec(schema) serialized_tf_example = tf.compat.v1.placeholder( dtype=tf.string, shape=[None], name='input_example_tensor') # Add a parse_example operator to the tensorflow graph, which will parse # raw, untransformed, tf examples. features = tf.io.parse_example( serialized=serialized_tf_example, features=raw_feature_spec) # Now that we have our raw examples, process them through the tf-transform # function computed during the preprocessing step. _, transformed_features = transform_output.transform_raw_features( features, drop_unused_features=True) # The key name MUST be 'examples'. receiver_tensors = {'examples': serialized_tf_example} # NOTE: Model is driven by transformed features (since training works on the # materialized output of TFT, but slicing will happen on raw features. features.update(transformed_features) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=receiver_tensors, labels=transformed_features[_transformed_name(_LABEL_KEY)]) def _input_fn(file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)), tf_transform_output.transformed_metadata.schema) The provided code snippet includes necessary dependencies for implementing the `trainer_fn` function. Write a Python function `def trainer_fn(trainer_fn_args, schema)` to solve the following problem: Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. Here is the function: def trainer_fn(trainer_fn_args, schema): """Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. """ # Number of nodes in the first layer of the DNN first_dnn_layer_size = 100 num_dnn_layers = 4 dnn_decay_factor = 0.7 train_batch_size = 40 eval_batch_size = 40 tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output) train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda trainer_fn_args.train_files, trainer_fn_args.data_accessor, tf_transform_output, batch_size=train_batch_size) eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda trainer_fn_args.eval_files, trainer_fn_args.data_accessor, tf_transform_output, batch_size=eval_batch_size) train_spec = tf_estimator.TrainSpec( train_input_fn, max_steps=trainer_fn_args.train_steps) serving_receiver_fn = ( lambda: _example_serving_receiver_fn(tf_transform_output, schema)) exporter = tf_estimator.FinalExporter('chicago-taxi', serving_receiver_fn) eval_spec = tf_estimator.EvalSpec( eval_input_fn, steps=trainer_fn_args.eval_steps, exporters=[exporter], name='chicago-taxi-eval') run_config = tf_estimator.RunConfig( save_checkpoints_steps=999, keep_checkpoint_max=1) run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir) estimator = _build_estimator( # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(first_dnn_layer_size * dnn_decay_factor**i)) for i in range(num_dnn_layers) ], config=run_config, warm_start_from=trainer_fn_args.base_model) # Create an input receiver for TFMA processing receiver_fn = lambda: _eval_input_receiver_fn(tf_transform_output, schema) return { 'estimator': estimator, 'train_spec': train_spec, 'eval_spec': eval_spec, 'eval_input_receiver_fn': receiver_fn }
Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval.
166,163
import datetime import os from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ModelValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.examples.custom_components.slack.slack_component.component import SlackComponent from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_runner import BeamRunner from tfx.proto import evaluator_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 _data_root = os.path.join(_taxi_root, 'data/simple') _taxi_module_file = os.path.join(_taxi_root, 'taxi_utils_slack.py') _serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack') _slack_channel_id = os.environ['TFX_SLACK_CHANNEL_ID'] _slack_token = os.environ['TFX_SLACK_BOT_TOKEN'] _pipeline_name = 'chicago_taxi_slack' _pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name) _metadata_db_root = os.path.join(_tfx_root, 'metadata', _pipeline_name) class SlackComponent(base_component.BaseComponent): """Custom TFX Slack Component. This custom component serves as a bridge between TFX pipeline and human model reviewers to enable review-and-push workflow in model development cycle. It utilizes Slack API to send message to user-defined Slack channel with model URI info and wait for go / no-go decision from the same Slack channel: * To approve the model, a user need to reply the thread sent out by the bot started by SlackComponent with 'lgtm' or 'approve'. * To reject the model, a user need to reply the thread sent out by the bot started by SlackComponent with 'decline' or 'reject'. If the model is approved, an artifact will be created in ML metadata. It will be materialized as a file named 'BLESSED' in the directory specified by the URI of 'slack_blessing' artifact. If the model is rejected, an artifact will be created in ML metadata. It will be materialized as a file named 'NOT_BLESSED' in the directory specified by the URI of 'slack_blessing' channel. If no message indicating approve or reject was is received within given within timeout_sec, component will error out. This ensures that model will not be pushed and the validation is still retry-able. The output artifact might contain the following custom properties: - blessed: integer value indicating whether the model is blessed - slack_decision_maker: the user id that made the decision. - slack_decision_message: the message of the decision - slack_decision_channel: the slack channel the decision is made on - slack_decision_thread: the slack thread the decision is made on """ SPEC_CLASS = SlackComponentSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__(self, model: types.Channel, model_blessing: types.Channel, slack_token: str, slack_channel_id: str, timeout_sec: int, slack_blessing: Optional[types.Channel] = None): """Construct a SlackComponent. Args: model: A Channel of type `standard_artifacts.Model`, usually produced by a Trainer component. model_blessing: A Channel of type `standard_artifacts.ModelBlessing`, usually produced by a ModelValidator component. slack_token: A token used for setting up connection with Slack server. slack_channel_id: Slack channel id to communicate on. timeout_sec: Seconds to wait for response before default to reject. slack_blessing: Optional output channel of type `standard_artifacts.ModelBlessing` with result of blessing; will be created for you if not specified. """ slack_blessing = slack_blessing or types.Channel( type=standard_artifacts.ModelBlessing) spec = SlackComponentSpec( slack_token=slack_token, slack_channel_id=slack_channel_id, timeout_sec=timeout_sec, model=model, model_blessing=model_blessing, slack_blessing=slack_blessing) super().__init__(spec=spec) The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline()` to solve the following problem: Implements the chicago taxi pipeline with TFX. Here is the function: def _create_pipeline(): """Implements the chicago taxi pipeline with TFX.""" # Brings data into the pipeline or otherwise joins/converts training data. example_gen = CsvExampleGen(input_base=_data_root) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics']) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=_taxi_module_file) # Uses user-provided Python function that implements a model. trainer = Trainer( module_file=_taxi_module_file, examples=transform.outputs['transformed_examples'], schema=schema_gen.outputs['schema'], transform_graph=transform.outputs['transform_graph'], train_args=trainer_pb2.TrainArgs(num_steps=10000), eval_args=trainer_pb2.EvalArgs(num_steps=5000)) # Uses TFMA to compute a evaluation statistics over features of a model. evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[ evaluator_pb2.SingleSlicingSpec( column_for_slicing=['trip_start_hour']) ])) # Performs quality validation of a candidate model (compared to a baseline). model_validator = ModelValidator( examples=example_gen.outputs['examples'], model=trainer.outputs['model']) # This custom component serves as a bridge between pipeline and human model # reviewers to enable review-and-push workflow in model development cycle. It # utilizes Slack API to send message to user-defined Slack channel with model # URI info and wait for go / no-go decision from the same Slack channel: # * To approve the model, users need to reply the thread sent out by the bot # started by SlackComponent with 'lgtm' or 'approve'. # * To reject the model, users need to reply the thread sent out by the bot # started by SlackComponent with 'decline' or 'reject'. slack_validator = SlackComponent( model=trainer.outputs['model'], model_blessing=model_validator.outputs['blessing'], slack_token=_slack_token, slack_channel_id=_slack_channel_id, timeout_sec=3600, ) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=slack_validator.outputs['slack_blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=_serving_model_dir))) return pipeline.Pipeline( pipeline_name=_pipeline_name, pipeline_root=_pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, evaluator, model_validator, slack_validator, pusher ], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( _metadata_db_root), )
Implements the chicago taxi pipeline with TFX.
166,164
import datetime import os from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ModelValidator from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.examples.custom_components.slack.slack_component.component import SlackComponent from tfx.orchestration import pipeline from tfx.orchestration.kubeflow import kubeflow_dag_runner from tfx.proto import evaluator_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.utils.dsl_utils import csv_input _data_root = os.path.join(_input_bucket, 'data', 'simple') _taxi_trainer_func = 'example.taxi_utils_slack.trainer_fn' _taxi_transformer_func = 'example.taxi_utils_slack.preprocessing_fn' _serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack') _slack_channel_id = os.environ['TFX_SLACK_CHANNEL_ID'] _slack_token = os.environ['TFX_SLACK_BOT_TOKEN'] _pipeline_name = 'chicago_taxi_slack_kubeflow' _pipeline_root = os.path.join(_input_bucket, _pipeline_name) class SlackComponent(base_component.BaseComponent): """Custom TFX Slack Component. This custom component serves as a bridge between TFX pipeline and human model reviewers to enable review-and-push workflow in model development cycle. It utilizes Slack API to send message to user-defined Slack channel with model URI info and wait for go / no-go decision from the same Slack channel: * To approve the model, a user need to reply the thread sent out by the bot started by SlackComponent with 'lgtm' or 'approve'. * To reject the model, a user need to reply the thread sent out by the bot started by SlackComponent with 'decline' or 'reject'. If the model is approved, an artifact will be created in ML metadata. It will be materialized as a file named 'BLESSED' in the directory specified by the URI of 'slack_blessing' artifact. If the model is rejected, an artifact will be created in ML metadata. It will be materialized as a file named 'NOT_BLESSED' in the directory specified by the URI of 'slack_blessing' channel. If no message indicating approve or reject was is received within given within timeout_sec, component will error out. This ensures that model will not be pushed and the validation is still retry-able. The output artifact might contain the following custom properties: - blessed: integer value indicating whether the model is blessed - slack_decision_maker: the user id that made the decision. - slack_decision_message: the message of the decision - slack_decision_channel: the slack channel the decision is made on - slack_decision_thread: the slack thread the decision is made on """ SPEC_CLASS = SlackComponentSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__(self, model: types.Channel, model_blessing: types.Channel, slack_token: str, slack_channel_id: str, timeout_sec: int, slack_blessing: Optional[types.Channel] = None): """Construct a SlackComponent. Args: model: A Channel of type `standard_artifacts.Model`, usually produced by a Trainer component. model_blessing: A Channel of type `standard_artifacts.ModelBlessing`, usually produced by a ModelValidator component. slack_token: A token used for setting up connection with Slack server. slack_channel_id: Slack channel id to communicate on. timeout_sec: Seconds to wait for response before default to reject. slack_blessing: Optional output channel of type `standard_artifacts.ModelBlessing` with result of blessing; will be created for you if not specified. """ slack_blessing = slack_blessing or types.Channel( type=standard_artifacts.ModelBlessing) spec = SlackComponentSpec( slack_token=slack_token, slack_channel_id=slack_channel_id, timeout_sec=timeout_sec, model=model, model_blessing=model_blessing, slack_blessing=slack_blessing) super().__init__(spec=spec) The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline()` to solve the following problem: Implements the chicago taxi pipeline with TFX. Here is the function: def _create_pipeline(): """Implements the chicago taxi pipeline with TFX.""" examples = csv_input(_data_root) # Brings data into the pipeline or otherwise joins/converts training data. example_gen = CsvExampleGen(input=examples) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics']) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], preprocessing_fn=_taxi_transformer_func) # Uses user-provided Python function that implements a model. trainer = Trainer( trainer_fn=_taxi_trainer_func, examples=transform.outputs['transformed_examples'], schema=schema_gen.outputs['schema'], transform_graph=transform.outputs['transform_graph'], train_args=trainer_pb2.TrainArgs(num_steps=10000), eval_args=trainer_pb2.EvalArgs(num_steps=5000)) # Uses TFMA to compute a evaluation statistics over features of a model. evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[ evaluator_pb2.SingleSlicingSpec( column_for_slicing=['trip_start_hour']) ])) # Performs quality validation of a candidate model (compared to a baseline). model_validator = ModelValidator( examples=example_gen.outputs['examples'], model=trainer.outputs['model']) # This custom component serves as a bridge between pipeline and human model # reviewers to enable review-and-push workflow in model development cycle. It # utilizes Slack API to send message to user-defined Slack channel with model # URI info and wait for go / no-go decision from the same Slack channel: # * To approve the model, users need to reply the thread sent out by the bot # started by SlackComponent with 'lgtm' or 'approve'. # * To reject the model, users need to reply the thread sent out by the bot # started by SlackComponent with 'decline' or 'reject'. slack_validator = SlackComponent( model=trainer.outputs['model'], model_blessing=model_validator.outputs['blessing'], slack_token=_slack_token, slack_channel_id=_slack_channel_id, timeout_sec=3600, ) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=slack_validator.outputs['slack_blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=_serving_model_dir))) return pipeline.Pipeline( pipeline_name=_pipeline_name, pipeline_root=_pipeline_root, components=[ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, evaluator, model_validator, slack_validator, pusher ], enable_cache=True, )
Implements the chicago taxi pipeline with TFX.
166,165
import os from typing import List import absl import flatbuffers import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import DataAccessor from tfx.components.trainer.fn_args_utils import FnArgs from tfx.components.trainer.rewriting import converters from tfx.components.trainer.rewriting import rewriter from tfx.components.trainer.rewriting import rewriter_factory from tfx.dsl.io import fileio from tfx_bsl.tfxio import dataset_options from tflite_support import metadata_schema_py_generated as _metadata_fb from tflite_support import metadata as _metadata _IMAGE_KEY = 'image' _LABEL_KEY = 'label' def _transformed_name(key): return key + '_xf' The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem: tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. Here is the function: def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} # tf.io.decode_png function cannot be applied on a batch of data. # We have to use tf.map_fn image_features = tf.map_fn( lambda x: tf.io.decode_png(x[0], channels=3), inputs[_IMAGE_KEY], dtype=tf.uint8) # image_features = tf.cast(image_features, tf.float32) image_features = tf.image.resize(image_features, [224, 224]) image_features = tf.keras.applications.mobilenet.preprocess_input( image_features) outputs[_transformed_name(_IMAGE_KEY)] = image_features # TODO(b/157064428): Support label transformation for Keras. # Do not apply label transformation as it will result in wrong evaluation. outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY] return outputs
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
166,166
import os from typing import List import absl import flatbuffers import tensorflow as tf import tensorflow_transform as tft from tfx.components.trainer.fn_args_utils import DataAccessor from tfx.components.trainer.fn_args_utils import FnArgs from tfx.components.trainer.rewriting import converters from tfx.components.trainer.rewriting import rewriter from tfx.components.trainer.rewriting import rewriter_factory from tfx.dsl.io import fileio from tfx_bsl.tfxio import dataset_options from tflite_support import metadata_schema_py_generated as _metadata_fb from tflite_support import metadata as _metadata _TRAIN_DATA_SIZE = 128 _TRAIN_BATCH_SIZE = 32 _EVAL_BATCH_SIZE = 32 _FINETUNE_LEARNING_RATE = 7e-6 _CLASSIFIER_EPOCHS = 30 _IMAGE_KEY = 'image' _TFLITE_MODEL_NAME = 'tflite' def _transformed_name(key): return key + '_xf' def _get_serve_image_fn(model): """Returns a function that feeds the input tensor into the model.""" def serve_image_fn(image_tensor): """Returns the output to be used in the serving signature. Args: image_tensor: A tensor represeting input image. The image should have 3 channels. Returns: The model's predicton on input image tensor """ return model(image_tensor) return serve_image_fn def _input_fn(file_pattern: List[str], data_accessor: DataAccessor, tf_transform_output: tft.TFTransformOutput, is_train: bool = False, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. is_train: Whether the input dataset is train split or not. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ dataset = data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)), tf_transform_output.transformed_metadata.schema) # Apply data augmentation. We have to do data augmentation here because # we need to apply data agumentation on-the-fly during training. If we put # it in Transform, it will only be applied once on the whole dataset, which # will lose the point of data augmentation. if is_train: dataset = dataset.map(lambda x, y: (_data_augmentation(x), y)) return dataset def _freeze_model_by_percentage(model: tf.keras.Model, percentage: float): """Freeze part of the model based on specified percentage. Args: model: The keras model need to be partially frozen percentage: the percentage of layers to freeze Raises: ValueError: Invalid values. """ if percentage < 0 or percentage > 1: raise ValueError('Freeze percentage should between 0.0 and 1.0') if not model.trainable: raise ValueError( 'The model is not trainable, please set model.trainable to True') num_layers = len(model.layers) num_layers_to_freeze = int(num_layers * percentage) for idx, layer in enumerate(model.layers): if idx < num_layers_to_freeze: layer.trainable = False else: layer.trainable = True def _build_keras_model() -> tf.keras.Model: """Creates a Image classification model with MobileNet backbone. Returns: The image classifcation Keras Model and the backbone MobileNet model """ # We create a MobileNet model with weights pre-trained on ImageNet. # We remove the top classification layer of the MobileNet, which was # used for classifying ImageNet objects. We will add our own classification # layer for CIFAR10 later. We use average pooling at the last convolution # layer to get a 1D vector for classifcation, which is consistent with the # origin MobileNet setup base_model = tf.keras.applications.MobileNet( input_shape=(224, 224, 3), include_top=False, weights='imagenet', pooling='avg') base_model.input_spec = None # We add a Dropout layer at the top of MobileNet backbone we just created to # prevent overfiting, and then a Dense layer to classifying CIFAR10 objects model = tf.keras.Sequential([ tf.keras.layers.InputLayer( input_shape=(224, 224, 3), name=_transformed_name(_IMAGE_KEY)), base_model, tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10) ]) # Freeze the whole MobileNet backbone to first train the top classifer only _freeze_model_by_percentage(base_model, 1.0) model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.RMSprop(lr=_CLASSIFIER_LEARNING_RATE), metrics=['sparse_categorical_accuracy']) model.summary(print_fn=absl.logging.info) return model, base_model def _write_metadata(model_path: str, label_map_path: str, mean: List[float], std: List[float]): """Add normalization option and label map TFLite metadata to the model. Args: model_path: The path of the TFLite model label_map_path: The path of the label map file mean: The mean value used to normalize input image tensor std: The standard deviation used to normalize input image tensor """ # Creates flatbuffer for model information. model_meta = _metadata_fb.ModelMetadataT() # Creates flatbuffer for model input metadata. # Here we add the input normalization info to input metadata. input_meta = _metadata_fb.TensorMetadataT() input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = mean input_normalization.options.std = std input_meta.processUnits = [input_normalization] # Creates flatbuffer for model output metadata. # Here we add label file to output metadata. output_meta = _metadata_fb.TensorMetadataT() label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(label_map_path) label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS output_meta.associatedFiles = [label_file] # Creates subgraph to contain input and output information, # and add subgraph to the model information. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [output_meta] model_meta.subgraphMetadata = [subgraph] # Serialize the model metadata buffer we created above using flatbuffer # builder. b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() # Populates metadata and label file to the model file. populator = _metadata.MetadataPopulator.with_model_file(model_path) populator.load_metadata_buffer(metadata_buf) populator.load_associated_files([label_map_path]) populator.populate() class FnArgs: """Args to pass to user defined training/tuning function(s). Attributes: working_dir: Working dir. train_files: A list of patterns for train files. eval_files: A list of patterns for eval files. train_steps: Number of train steps. eval_steps: Number of eval steps. schema_path: A single uri for schema file. Will be None if not specified. schema_file: Deprecated, use `schema_path` instead. transform_graph_path: An optional single uri for transform graph produced by TFT. Will be None if not specified. transform_output: Deprecated, use `transform_graph_path` instead. data_accessor: Contains factories that can create tf.data.Datasets or other means to access the train/eval data. They provide a uniform way of accessing data, regardless of how the data is stored on disk. serving_model_dir: A single uri for the output directory of the serving model. eval_model_dir: A single uri for the output directory of the eval model. Note that this is estimator only, Keras doesn't require it for TFMA. model_run_dir: A single uri for the output directory of model training related files. base_model: An optional base model path that will be used for this training. hyperparameters: An optional keras_tuner.HyperParameters config. custom_config: An optional dictionary passed to the component. """ working_dir = attr.ib(type=str, default=None) train_files = attr.ib(type=List[str], default=None) eval_files = attr.ib(type=List[str], default=None) train_steps = attr.ib(type=int, default=None) eval_steps = attr.ib(type=int, default=None) schema_path = attr.ib(type=str, default=None) schema_file = attr.ib(type=str, default=None) transform_graph_path = attr.ib(type=str, default=None) transform_output = attr.ib(type=str, default=None) data_accessor = attr.ib(type=DataAccessor, default=None) serving_model_dir = attr.ib(type=str, default=None) eval_model_dir = attr.ib(type=str, default=None) model_run_dir = attr.ib(type=str, default=None) base_model = attr.ib(type=str, default=None) hyperparameters = attr.ib(type=Dict[str, Any], default=None) custom_config = attr.ib(type=Dict[str, Any], default=None) The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: FnArgs)` to solve the following problem: Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Raises: ValueError: if invalid inputs. Here is the function: def run_fn(fn_args: FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Raises: ValueError: if invalid inputs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn( fn_args.train_files, fn_args.data_accessor, tf_transform_output, is_train=True, batch_size=_TRAIN_BATCH_SIZE) eval_dataset = _input_fn( fn_args.eval_files, fn_args.data_accessor, tf_transform_output, is_train=False, batch_size=_EVAL_BATCH_SIZE) model, base_model = _build_keras_model() absl.logging.info('Tensorboard logging to {}'.format(fn_args.model_run_dir)) # Write logs to path tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='epoch') # Our training regime has two phases: we first freeze the backbone and train # the newly added classifier only, then unfreeze part of the backbone and # fine-tune with classifier jointly. steps_per_epoch = int(_TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE) total_epochs = int(fn_args.train_steps / steps_per_epoch) if _CLASSIFIER_EPOCHS > total_epochs: raise ValueError('Classifier epochs is greater than the total epochs') absl.logging.info('Start training the top classifier') model.fit( train_dataset, epochs=_CLASSIFIER_EPOCHS, steps_per_epoch=steps_per_epoch, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) absl.logging.info('Start fine-tuning the model') # Unfreeze the top MobileNet layers and do joint fine-tuning _freeze_model_by_percentage(base_model, 0.9) # We need to recompile the model because layer properties have changed model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.RMSprop(lr=_FINETUNE_LEARNING_RATE), metrics=['sparse_categorical_accuracy']) model.summary(print_fn=absl.logging.info) model.fit( train_dataset, initial_epoch=_CLASSIFIER_EPOCHS, epochs=total_epochs, steps_per_epoch=steps_per_epoch, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) # Prepare the TFLite model used for serving in MLKit signatures = { 'serving_default': _get_serve_image_fn(model).get_concrete_function( tf.TensorSpec( shape=[None, 224, 224, 3], dtype=tf.float32, name=_transformed_name(_IMAGE_KEY))) } temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp') model.save(temp_saving_model_dir, save_format='tf', signatures=signatures) tfrw = rewriter_factory.create_rewriter( rewriter_factory.TFLITE_REWRITER, name='tflite_rewriter') converters.rewrite_saved_model(temp_saving_model_dir, fn_args.serving_model_dir, tfrw, rewriter.ModelType.TFLITE_MODEL) # Add necessary TFLite metadata to the model in order to use it within MLKit # TODO(dzats@): Handle label map file path more properly, currently # hard-coded. tflite_model_path = os.path.join(fn_args.serving_model_dir, _TFLITE_MODEL_NAME) # TODO(dzats@): Extend the TFLite rewriter to be able to add TFLite metadata #@ to the model. _write_metadata( model_path=tflite_model_path, label_map_path=fn_args.custom_config['labels_path'], mean=[127.5], std=[127.5]) fileio.rmtree(temp_saving_model_dir)
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. Raises: ValueError: if invalid inputs.
166,167
import os from typing import List import absl import tensorflow_model_analysis as tfma from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ImportExampleGen from tfx.components import Pusher from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.dsl.components.common import resolver from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import example_gen_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing class Model(_TfxArtifact): """Artifact that contains the actual persisted model. Training components stores the trained model like a saved model in this artifact. A `Model` artifact contains serialization of the trained model in one or more formats, each suitable for different usage (e.g. serving, evaluation), and serving environments. * File structure: - `{uri}/` - `Format-Serving/`: Model exported for serving. - `saved_model.pb` - Other actual model files. - `Format-TFMA/`: Model exported for evaluation. - `saved_model.pb` - Other actual model files. * Commonly used custom properties of the Model artifact: """ TYPE_NAME = 'Model' TYPE_ANNOTATION = SystemModel class ModelBlessing(_TfxArtifact): """Artifact that contains the evaluation of a trained model. This artifact is usually used with Conditional when determining whether to push this model on service or not. ```python # Run pusher if evaluator has blessed the model. with tfx.dsl.Cond(evaluator.outputs['blessing'].future() [0].custom_property('blessed') == 1): pusher = Pusher(...) ``` * File structure: - `{uri}/` - `BLESSED`: if the evaluator has blessed the model. - `NOT_BLESSED`: if the evaluator has not blessed the model. - See tfx/components/evaluator/executor.py for how to write ModelBlessing. * Commonly used custom properties of the ModelBlessing artifact: - `blessed`: int value that represents whether the evaluator has blessed its model or not. """ TYPE_NAME = 'ModelBlessing' The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir_lite: str, metadata_path: str, labels_path: str, beam_pipeline_args: List[str], accuracy_threshold: float = 0.55) -> pipeline.Pipeline` to solve the following problem: Implements the CIFAR10 image classification pipeline using TFX. Here is the function: def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir_lite: str, metadata_path: str, labels_path: str, beam_pipeline_args: List[str], accuracy_threshold: float = 0.55) -> pipeline.Pipeline: """Implements the CIFAR10 image classification pipeline using TFX.""" # This is needed for datasets with pre-defined splits # Change the pattern argument to train_whole/* and test_whole/* to train # on the whole CIFAR-10 dataset input_config = example_gen_pb2.Input(splits=[ example_gen_pb2.Input.Split(name='train', pattern='train/*'), example_gen_pb2.Input.Split(name='eval', pattern='test/*') ]) # Brings data into the pipeline. example_gen = ImportExampleGen( input_base=data_root, input_config=input_config) # Computes statistics over data for visualization and example validation. statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) # Generates schema based on statistics files. schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) # Performs anomaly detection based on statistics and data schema. example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file) # Uses user-provided Python function that trains a model. # When traning on the whole dataset, use 18744 for train steps, 156 for eval # steps. 18744 train steps correspond to 24 epochs on the whole train set, and # 156 eval steps correspond to 1 epoch on the whole test set. The # configuration below is for training on the dataset we provided in the data # folder, which has 128 train and 128 test samples. The 160 train steps # correspond to 40 epochs on this tiny train set, and 4 eval steps correspond # to 1 epoch on this tiny test set. trainer = Trainer( module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=trainer_pb2.TrainArgs(num_steps=160), eval_args=trainer_pb2.EvalArgs(num_steps=4), custom_config={'labels_path': labels_path}) # Get the latest blessed model for model validation. model_resolver = resolver.Resolver( strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel( type=ModelBlessing)).with_id('latest_blessed_model_resolver') # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compare to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='label_xf', model_type='tf_lite')], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': accuracy_threshold}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-3}))) ]) ]) # Uses TFMA to compute the evaluation statistics over features of a model. # We evaluate using the materialized examples that are output by Transform # because # 1. the decoding_png function currently performed within Transform are not # compatible with TFLite. # 2. MLKit requires deserialized (float32) tensor image inputs # Note that for deployment, the same logic that is performed within Transform # must be reproduced client-side. evaluator = Evaluator( examples=transform.outputs['transformed_examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=serving_model_dir_lite))) components = [ example_gen, statistics_gen, schema_gen, example_validator, transform, trainer, model_resolver, evaluator, pusher ] return pipeline.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path), beam_pipeline_args=beam_pipeline_args)
Implements the CIFAR10 image classification pipeline using TFX.
166,168
import os def make_extra_packages_docker_image(): # Packages needed for tfx docker image. return [ # TODO(b/304892416): Migrate from KFP SDK v1 to v2. 'kfp>=1.8.14,<2', 'kfp-pipeline-spec>=0.1.10,<0.2', 'mmh>=2.2,<3', 'python-snappy>=0.5,<0.6', # Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py 'tensorflow-cloud>=0.1,<0.2', 'tensorflow-io>=0.9.0, <=0.24.0', ]
null
166,169
import os def make_extra_packages_test(): """Prepare extra packages needed for running unit tests.""" # Note: It is okay to pin packages to exact versions in this list to minimize # conflicts. return make_extra_packages_airflow() + make_extra_packages_kfp() + [ 'pytest>=5,<7', ] def make_extra_packages_tfjs(): # Packages needed for tfjs. return [ 'tensorflowjs>=4.5,<5', ] def make_extra_packages_tflite_support(): # Required for tfx/examples/cifar10 return [ 'flatbuffers>=1.12', 'tflite-support>=0.4.3,<0.4.5', ] def make_extra_packages_tf_ranking(): # Packages needed for tf-ranking which is used in tfx/examples/ranking. return [ 'tensorflow-ranking>=0.5,<0.6', 'struct2tensor' + select_constraint( default='>=0.45,<0.46', nightly='>=0.46.0.dev', git_master='@git+https://github.com/google/struct2tensor@master'), ] def make_extra_packages_tfdf(): # Packages needed for tensorflow-decision-forests. # Required for tfx/examples/penguin/penguin_utils_tfdf_experimental.py return [ # NOTE: TFDF 1.0.1 is only compatible with TF 2.10.x. 'tensorflow-decision-forests>=1.0.1,<1.9', ] def make_extra_packages_flax(): # Packages needed for the flax example. # Required for the experimental tfx/examples using Flax, e.g., # tfx/examples/penguin. return [ # TODO(b/324157691): Upgrade jax once we upgrade TF version. 'jax<0.4.24', 'jaxlib<0.4.24', 'flax<1', 'optax<1', ] def make_extra_packages_examples(): # Extra dependencies required for tfx/examples. return [ # Required for presto ExampleGen custom component in # tfx/examples/custom_components/presto_example_gen 'presto-python-client>=0.7,<0.8', # Required for slack custom component in # tfx/examples/custom_components/slack 'slackclient>=2.8.2,<3', 'websocket-client>=0.57,<1', # Required for bert examples in tfx/examples/bert 'tensorflow-text>=1.15.1,<3', # Required for tfx/examples/penguin/experimental # LINT.IfChange 'scikit-learn>=1.0,<2', # LINT.ThenChange( # examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py) # Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py 'tensorflow-cloud>=0.1,<0.2', ] def make_extra_packages_all(): # All extra dependencies. return [ *make_extra_packages_test(), *make_extra_packages_tfjs(), *make_extra_packages_tflite_support(), *make_extra_packages_tf_ranking(), *make_extra_packages_tfdf(), *make_extra_packages_flax(), *make_extra_packages_examples(), ]
null
166,170
import collections import shutil import tempfile import time from absl import logging import apache_beam as beam from apache_beam.utils import shared import tensorflow as tf import tensorflow_transform as tft from tensorflow_transform import graph_tools from tensorflow_transform import impl_helper import tensorflow_transform.beam as tft_beam from tensorflow_transform.beam import impl as tft_beam_impl from tensorflow_transform.saved import saved_transform_io from tensorflow_transform.saved import saved_transform_io_v2 from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import schema_utils import tfx from tfx.benchmarks import benchmark_utils from tfx.benchmarks import benchmark_base from tfx_bsl.coders import example_coder from tfx_bsl.tfxio import tensor_adapter from tfx_bsl.tfxio import tf_example_record class _AnalyzeAndTransformDataset(beam.PTransform): """PTransform to run AnalyzeAndTransformDataset.""" def __init__(self, dataset, tfxio, preprocessing_fn, transform_input_dataset_metadata, force_tf_compat_v1=True, max_num_examples=None, generate_dataset=False): """Constructor. Args: dataset: BenchmarkDataset object. tfxio: A `tfx_bsl.TFXIO` instance. preprocessing_fn: preprocessing_fn. transform_input_dataset_metadata: dataset_metadata.DatasetMetadata. force_tf_compat_v1: If False then Transform will use its native TF2 version, if True then Transform will use its TF1 version. max_num_examples: Max number of examples to read from the dataset. generate_dataset: If True, generates the raw dataset and appropriate intermediate outputs (just the TFT SavedModel for now) necessary for other benchmarks. """ self._dataset = dataset self._tfxio = tfxio self._preprocessing_fn = preprocessing_fn self._transform_input_dataset_metadata = transform_input_dataset_metadata self._force_tf_compat_v1 = force_tf_compat_v1 self._max_num_examples = max_num_examples self._generate_dataset = generate_dataset def expand(self, pipeline): # TODO(b/147620802): Consider making this (and other parameters) # configurable to test more variants (e.g. with and without deep-copy # optimisation, with and without cache, etc). with tft_beam.Context( temp_dir=tempfile.mkdtemp(), force_tf_compat_v1=self._force_tf_compat_v1): raw_data = ( pipeline | "ReadDataset" >> beam.Create( self._dataset.read_raw_dataset( deserialize=False, limit=self._max_num_examples)) | "Decode" >> self._tfxio.BeamSource()) transform_fn, output_metadata = ( (raw_data, self._tfxio.TensorAdapterConfig()) | "AnalyzeDataset" >> tft_beam.AnalyzeDataset(self._preprocessing_fn)) if self._generate_dataset: _ = transform_fn | "CopySavedModel" >> _CopySavedModel( dest_path=self._dataset.tft_saved_model_path( self._force_tf_compat_v1)) (transformed_dataset, transformed_metadata) = ( ((raw_data, self._tfxio.TensorAdapterConfig()), (transform_fn, output_metadata)) | "TransformDataset" >> tft_beam.TransformDataset(output_record_batches=True)) return transformed_dataset, transformed_metadata def _get_common_variables(dataset, force_tf_compat_v1): """Returns metadata schema, preprocessing fn, input dataset metadata.""" tf_metadata_schema = benchmark_utils.read_schema( dataset.tf_metadata_schema_path()) preprocessing_fn = dataset.tft_preprocessing_fn() feature_spec = schema_utils.schema_as_feature_spec( tf_metadata_schema).feature_spec type_spec = impl_helper.get_type_specs_from_feature_specs(feature_spec) transform_input_columns = ( tft.get_transform_input_columns( preprocessing_fn, type_spec, force_tf_compat_v1=force_tf_compat_v1)) transform_input_dataset_metadata = dataset_metadata.DatasetMetadata( schema_utils.schema_from_feature_spec({ feature: feature_spec[feature] for feature in transform_input_columns })) tfxio = tf_example_record.TFExampleBeamRecord( physical_format="tfexamples", schema=transform_input_dataset_metadata.schema, telemetry_descriptors=["TFTransformBenchmark"]) return CommonVariablesTuple( tf_metadata_schema=tf_metadata_schema, preprocessing_fn=preprocessing_fn, transform_input_dataset_metadata=transform_input_dataset_metadata, tfxio=tfxio) The provided code snippet includes necessary dependencies for implementing the `regenerate_intermediates_for_dataset` function. Write a Python function `def regenerate_intermediates_for_dataset(dataset, force_tf_compat_v1=True, max_num_examples=None)` to solve the following problem: Regenerate intermediate outputs required for the benchmark. Here is the function: def regenerate_intermediates_for_dataset(dataset, force_tf_compat_v1=True, max_num_examples=None): """Regenerate intermediate outputs required for the benchmark.""" common_variables = _get_common_variables(dataset, force_tf_compat_v1) logging.info("Regenerating intermediate outputs required for benchmark.") with beam.Pipeline() as p: _ = p | _AnalyzeAndTransformDataset( dataset, common_variables.tfxio, common_variables.preprocessing_fn, common_variables.transform_input_dataset_metadata, force_tf_compat_v1=force_tf_compat_v1, max_num_examples=max_num_examples, generate_dataset=True) logging.info("Intermediate outputs regenerated.")
Regenerate intermediate outputs required for the benchmark.
166,171
import collections import shutil import tempfile import time from absl import logging import apache_beam as beam from apache_beam.utils import shared import tensorflow as tf import tensorflow_transform as tft from tensorflow_transform import graph_tools from tensorflow_transform import impl_helper import tensorflow_transform.beam as tft_beam from tensorflow_transform.beam import impl as tft_beam_impl from tensorflow_transform.saved import saved_transform_io from tensorflow_transform.saved import saved_transform_io_v2 from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import schema_utils import tfx from tfx.benchmarks import benchmark_utils from tfx.benchmarks import benchmark_base from tfx_bsl.coders import example_coder from tfx_bsl.tfxio import tensor_adapter from tfx_bsl.tfxio import tf_example_record def _get_common_variables(dataset, force_tf_compat_v1): """Returns metadata schema, preprocessing fn, input dataset metadata.""" tf_metadata_schema = benchmark_utils.read_schema( dataset.tf_metadata_schema_path()) preprocessing_fn = dataset.tft_preprocessing_fn() feature_spec = schema_utils.schema_as_feature_spec( tf_metadata_schema).feature_spec type_spec = impl_helper.get_type_specs_from_feature_specs(feature_spec) transform_input_columns = ( tft.get_transform_input_columns( preprocessing_fn, type_spec, force_tf_compat_v1=force_tf_compat_v1)) transform_input_dataset_metadata = dataset_metadata.DatasetMetadata( schema_utils.schema_from_feature_spec({ feature: feature_spec[feature] for feature in transform_input_columns })) tfxio = tf_example_record.TFExampleBeamRecord( physical_format="tfexamples", schema=transform_input_dataset_metadata.schema, telemetry_descriptors=["TFTransformBenchmark"]) return CommonVariablesTuple( tf_metadata_schema=tf_metadata_schema, preprocessing_fn=preprocessing_fn, transform_input_dataset_metadata=transform_input_dataset_metadata, tfxio=tfxio) The provided code snippet includes necessary dependencies for implementing the `_get_batched_records` function. Write a Python function `def _get_batched_records(dataset, force_tf_compat_v1, max_num_examples=None)` to solve the following problem: Returns a (batch_size, iterator for batched records) tuple for the dataset. Args: dataset: BenchmarkDataset object. force_tf_compat_v1: If False then Transform will use its native TF2 version, if True then Transform will use its TF1 version. max_num_examples: Maximum number of examples to read from the dataset. Returns: Tuple of (batch_size, iterator for batched records), where records are decoded tf.train.Examples. Here is the function: def _get_batched_records(dataset, force_tf_compat_v1, max_num_examples=None): """Returns a (batch_size, iterator for batched records) tuple for the dataset. Args: dataset: BenchmarkDataset object. force_tf_compat_v1: If False then Transform will use its native TF2 version, if True then Transform will use its TF1 version. max_num_examples: Maximum number of examples to read from the dataset. Returns: Tuple of (batch_size, iterator for batched records), where records are decoded tf.train.Examples. """ batch_size = 1000 common_variables = _get_common_variables(dataset, force_tf_compat_v1) converter = example_coder.ExamplesToRecordBatchDecoder( common_variables.transform_input_dataset_metadata.schema .SerializeToString()) serialized_records = benchmark_utils.batched_iterator( dataset.read_raw_dataset(deserialize=False, limit=max_num_examples), batch_size) records = [converter.DecodeBatch(x) for x in serialized_records] return batch_size, records
Returns a (batch_size, iterator for batched records) tuple for the dataset. Args: dataset: BenchmarkDataset object. force_tf_compat_v1: If False then Transform will use its native TF2 version, if True then Transform will use its TF1 version. max_num_examples: Maximum number of examples to read from the dataset. Returns: Tuple of (batch_size, iterator for batched records), where records are decoded tf.train.Examples.
166,172
import importlib from google.protobuf import text_format from tensorflow_metadata.proto.v0 import schema_pb2 The provided code snippet includes necessary dependencies for implementing the `get_dataset` function. Write a Python function `def get_dataset(name, base_dir=None)` to solve the following problem: Imports the given dataset and returns an instance of it. Here is the function: def get_dataset(name, base_dir=None): """Imports the given dataset and returns an instance of it.""" lib = importlib.import_module("..datasets.%s.dataset" % name, __name__) return lib.get_dataset(base_dir)
Imports the given dataset and returns an instance of it.
166,173
import itertools import math import os import shutil import tempfile from typing import Optional from absl import logging import apache_beam as beam import tensorflow_transform as tft from tfx import components from tfx.benchmarks import benchmark_dataset from tfx.components.example_gen.csv_example_gen import executor as csv_exgen from tfx.examples.chicago_taxi_pipeline import taxi_utils from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import trainer_pb2 from tfx_bsl.coders import csv_decoder class ChicagoTaxiDataset(benchmark_dataset.BenchmarkDataset): """Chicago taxi dataset.""" def dataset_path(self): return self.datasets_dir("chicago_taxi/data/taxi_1M.tfrecords.gz") def tf_metadata_schema_path(self): return self.datasets_dir( "../../examples/chicago_taxi_pipeline/data/user_provided_schema/" "schema.pbtxt") def trained_saved_model_path(self): return self.datasets_dir("chicago_taxi/model/trained_saved_model") def tft_saved_model_path(self, force_tf_compat_v1): if force_tf_compat_v1: return self.datasets_dir("chicago_taxi/model/tft_saved_model") else: return self.datasets_dir("chicago_taxi/model/tft_tf2_saved_model") def tfma_saved_model_path(self): return self.datasets_dir("chicago_taxi/model/tfma_saved_model") def tft_preprocessing_fn(self): return taxi_utils.preprocessing_fn def num_examples(self, limit=None): result = 1000000 if limit: result = min(result, limit) return result def convert_csv_to_tf_examples(self, csv_path, tfrecords_output_path): """Runs a Beam pipeline to convert the CSV file into a TFRecords file. This is needed because the conversion is orders of magnitude more time-consuming than the functions we want to benchmark, so instead of doing the conversion each time, we do it once to generate a converted dataset and use that for the benchmark instead. Args: csv_path: Path to CSV file containing examples. tfrecords_output_path: Path to output TFRecords file containing parsed examples. """ # Copied from CSV example gen. fp = open(csv_path, "r") column_names = next(fp).strip().split(",") fp.close() with beam.Pipeline() as p: parsed_csv_lines = ( p | "ReadFromText" >> beam.io.ReadFromText( file_pattern=csv_path, skip_header_lines=1) | "ParseCSVLine" >> beam.ParDo(csv_decoder.ParseCSVLine(delimiter=",")) | "ExtractParsedCSVLines" >> beam.Keys()) column_infos = beam.pvalue.AsSingleton( parsed_csv_lines | "InferColumnTypes" >> beam.CombineGlobally( csv_decoder.ColumnTypeInferrer( column_names, skip_blank_lines=True))) _ = ( parsed_csv_lines | "ToTFExample" >> beam.ParDo( csv_exgen._ParsedCsvToTfExample(), # pylint: disable=protected-access column_infos) | "Serialize" >> beam.Map(lambda x: x.SerializeToString()) | "WriteToTFRecord" >> beam.io.tfrecordio.WriteToTFRecord( file_path_prefix=tfrecords_output_path, shard_name_template="", compression_type=beam.io.filesystem.CompressionTypes.GZIP)) def generate_raw_dataset(self, args): logging.warn( "Not actually regenerating the raw dataset.\n" "To regenerate the raw CSV dataset, see the TFX Chicago Taxi example " "for details as to how to do so. " "tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py " "has the BigQuery query used to generate the dataset.\n" "After regenerating the raw CSV dataset, you should also regenerate " "the derived TFRecords dataset. You can do so by passing " "--generate_dataset_args=/path/to/csv_dataset.csv to " "regenerate_datasets.py.") if args: logging.info("Converting CSV at %s to TFRecords", args) self.convert_csv_to_tf_examples(args, self.dataset_path()) logging.info("TFRecords written to %s", self.dataset_path()) def generate_models(self, args, force_tf_compat_v1=True): # Modified version of Chicago Taxi Example pipeline # tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py root = tempfile.mkdtemp() pipeline_root = os.path.join(root, "pipeline") metadata_path = os.path.join(root, "metadata/metadata.db") module_file = os.path.join( os.path.dirname(__file__), "../../../examples/chicago_taxi_pipeline/taxi_utils.py") example_gen = components.ImportExampleGen( input_base=os.path.dirname(self.dataset_path())) statistics_gen = components.StatisticsGen( examples=example_gen.outputs["examples"]) schema_gen = components.SchemaGen( statistics=statistics_gen.outputs["statistics"], infer_feature_shape=False) transform = components.Transform( examples=example_gen.outputs["examples"], schema=schema_gen.outputs["schema"], module_file=module_file, force_tf_compat_v1=force_tf_compat_v1) trainer = components.Trainer( module_file=module_file, transformed_examples=transform.outputs["transformed_examples"], schema=schema_gen.outputs["schema"], transform_graph=transform.outputs["transform_graph"], train_args=trainer_pb2.TrainArgs(num_steps=100), eval_args=trainer_pb2.EvalArgs(num_steps=50)) p = pipeline.Pipeline( pipeline_name="chicago_taxi_beam", pipeline_root=pipeline_root, components=[ example_gen, statistics_gen, schema_gen, transform, trainer ], enable_cache=True, metadata_connection_config=metadata.sqlite_metadata_connection_config( metadata_path)) BeamDagRunner().run(p) def join_unique_subdir(path): dirs = os.listdir(path) if len(dirs) != 1: raise ValueError( "expecting there to be only one subdirectory in %s, but " "subdirectories were: %s" % (path, dirs)) return os.path.join(path, dirs[0]) trainer_output_dir = join_unique_subdir( os.path.join(pipeline_root, "Trainer/model")) eval_model_dir = join_unique_subdir( os.path.join(trainer_output_dir, "eval_model_dir")) serving_model_dir = join_unique_subdir( os.path.join(trainer_output_dir, "serving_model_dir/export/chicago-taxi")) transform_output_dir = join_unique_subdir( os.path.join(pipeline_root, "Transform/transform_graph")) transform_model_dir = os.path.join(transform_output_dir, "transform_fn") tft_saved_model_path = self.tft_saved_model_path(force_tf_compat_v1) shutil.rmtree(self.trained_saved_model_path(), ignore_errors=True) shutil.rmtree(self.tfma_saved_model_path(), ignore_errors=True) shutil.rmtree(tft_saved_model_path, ignore_errors=True) shutil.copytree(serving_model_dir, self.trained_saved_model_path()) shutil.copytree(eval_model_dir, self.tfma_saved_model_path()) shutil.copytree(transform_model_dir, tft_saved_model_path) def get_dataset(base_dir=None): return ChicagoTaxiDataset(base_dir)
null
166,174
import itertools import math import os import shutil import tempfile from typing import Optional from absl import logging import apache_beam as beam import tensorflow_transform as tft from tfx import components from tfx.benchmarks import benchmark_dataset from tfx.components.example_gen.csv_example_gen import executor as csv_exgen from tfx.examples.chicago_taxi_pipeline import taxi_utils from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner from tfx.proto import trainer_pb2 from tfx_bsl.coders import csv_decoder class WideChicagoTaxiDataset(ChicagoTaxiDataset): """Chicago taxi dataset with a TFT preprocessing_fn containing specified number of analyzers. Note that the analyzers are called within the corresponding mappers. Half of the mappers will be `tft.compute_and_apply_vocabulary`. Another half is split between `tft.bucketize` and `tft.scale_to_z_score`. """ # Percentage of mappers in the preprocessing function of the given type. The # remaining mappers will be `tft.scale_to_z_score`. _VOCABS_SHARE = 0.5 _BUCKETIZE_SHARE = 0.25 _VOCABULARY_KEYS = taxi_utils._VOCAB_FEATURE_KEYS _BUCKETIZE_KEYS = taxi_utils._BUCKET_FEATURE_KEYS _SCALE_KEYS = taxi_utils._DENSE_FLOAT_FEATURE_KEYS def __init__(self, base_dir: Optional[str] = None, num_analyzers: int = 10): super().__init__(base_dir) self._num_vocabs = max( len(self._VOCABULARY_KEYS), math.ceil(num_analyzers * self._VOCABS_SHARE)) self._num_bucketize = max( len(self._BUCKETIZE_KEYS), math.ceil(num_analyzers * self._BUCKETIZE_SHARE)) self._num_scale = max( len(self._SCALE_KEYS), num_analyzers - self._num_vocabs - self._num_bucketize) def tft_preprocessing_fn(self): def wide_preprocessing_fn(inputs): """TFT preprocessing function. Args: inputs: Map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for idx, key in enumerate( itertools.islice( itertools.cycle(self._BUCKETIZE_KEYS), self._num_bucketize)): outputs["bucketized" + str(idx)] = tft.bucketize( taxi_utils._fill_in_missing(inputs[key]), taxi_utils._FEATURE_BUCKET_COUNT) for idx, key in enumerate( itertools.islice(itertools.cycle(self._SCALE_KEYS), self._num_scale)): # If sparse make it dense, setting nan's to 0 or '', and apply zscore. outputs["scaled" + str(idx)] = tft.scale_to_z_score( taxi_utils._fill_in_missing(inputs[key])) for idx, key in enumerate( itertools.islice( itertools.cycle(self._VOCABULARY_KEYS), self._num_vocabs)): outputs["vocab" + str(idx)] = tft.compute_and_apply_vocabulary( taxi_utils._fill_in_missing(inputs[key]), top_k=taxi_utils._VOCAB_SIZE, num_oov_buckets=taxi_utils._OOV_SIZE) # Pass-through features. for key in taxi_utils._CATEGORICAL_FEATURE_KEYS + [taxi_utils._LABEL_KEY]: outputs[key] = inputs[key] return outputs return wide_preprocessing_fn def get_wide_dataset(base_dir=None, num_analyzers=10): return WideChicagoTaxiDataset(base_dir, num_analyzers)
null
166,175
from typing import TypeVar from absl import flags from tfx.orchestration.portable import data_types from tfx.orchestration.python_execution_binary import python_execution_binary_utils as flag_utils _LEGACY_EXECUTION_INVOCATION = flags.DEFINE_string( 'tfx_execution_info_b64', None, 'url safe base64 encoded tfx.orchestration.ExecutionInvocation proto', ) def _require_flag(flag: flags.FlagHolder[_T]) -> _T: if not flag.present: raise flags.ValidationError(f'Flag --{flag.name} is required.') return flag.value def parse_execution_info() -> data_types.ExecutionInfo: exec_invocation_b64 = _require_flag(_LEGACY_EXECUTION_INVOCATION) return flag_utils.deserialize_execution_info(exec_invocation_b64)
null
166,176
from typing import Optional, Union from absl import logging from tfx.dsl.io import fileio from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.orchestration.portable import python_driver_operator from tfx.proto.orchestration import driver_output_pb2 from tfx.proto.orchestration import executable_spec_pb2 from tfx.orchestration.python_execution_binary import python_executor_operator_dispatcher _PythonClassExecutableSpec = executable_spec_pb2.PythonClassExecutableSpec _BeamExecutableSpec = executable_spec_pb2.BeamExecutableSpec def run_python_custom_component( executable_spec: Union[_PythonClassExecutableSpec, _BeamExecutableSpec], execution_info: data_types.ExecutionInfo, mlmd_connection_config: Optional[metadata.ConnectionConfigType] = None, ) -> None: """Run Python custom component declared with @component decorator.""" # MLMD connection config being set indicates a driver execution instead of an # executor execution as accessing MLMD is not supported for executors. if mlmd_connection_config: run_result = _run_driver( executable_spec, mlmd_connection_config, execution_info ) else: run_result = python_executor_operator_dispatcher.run_executor( executable_spec, execution_info ) if run_result: with fileio.open(execution_info.execution_output_uri, 'wb') as f: f.write(run_result.SerializeToString()) The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run( executable_spec: Union[_PythonClassExecutableSpec, _BeamExecutableSpec], execution_info: data_types.ExecutionInfo, mlmd_connection_config: Optional[metadata.ConnectionConfigType] = None, ) -> None` to solve the following problem: Run Python executable. Here is the function: def run( executable_spec: Union[_PythonClassExecutableSpec, _BeamExecutableSpec], execution_info: data_types.ExecutionInfo, mlmd_connection_config: Optional[metadata.ConnectionConfigType] = None, ) -> None: """Run Python executable.""" logging.info('Executing Python custom component') run_python_custom_component( executable_spec, execution_info, mlmd_connection_config )
Run Python executable.
166,177
import base64 from typing import Union from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.proto.orchestration import executable_spec_pb2 from tfx.proto.orchestration import execution_invocation_pb2 from tfx.proto.orchestration import metadata_pb2 from tfx.utils import import_utils _PythonClassExecutableSpec = executable_spec_pb2.PythonClassExecutableSpec _BeamExecutableSpec = executable_spec_pb2.BeamExecutableSpec The provided code snippet includes necessary dependencies for implementing the `import_class_path` function. Write a Python function `def import_class_path( executable_spec: Union[_PythonClassExecutableSpec, _BeamExecutableSpec], )` to solve the following problem: Import the class path from Python or Beam executor spec. Here is the function: def import_class_path( executable_spec: Union[_PythonClassExecutableSpec, _BeamExecutableSpec], ): """Import the class path from Python or Beam executor spec.""" if isinstance(executable_spec, _BeamExecutableSpec): import_utils.import_class_by_path( executable_spec.python_executor_spec.class_path ) elif isinstance(executable_spec, _PythonClassExecutableSpec): import_utils.import_class_by_path(executable_spec.class_path) else: raise ValueError( f'Executable spec type {type(executable_spec)} is not supported.' )
Import the class path from Python or Beam executor spec.
166,178
import base64 from typing import Union from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.proto.orchestration import executable_spec_pb2 from tfx.proto.orchestration import execution_invocation_pb2 from tfx.proto.orchestration import metadata_pb2 from tfx.utils import import_utils The provided code snippet includes necessary dependencies for implementing the `deserialize_mlmd_connection_config` function. Write a Python function `def deserialize_mlmd_connection_config( mlmd_connection_config_b64: str) -> metadata.ConnectionConfigType` to solve the following problem: De-serializes an MLMD connection config from base64 flag. Here is the function: def deserialize_mlmd_connection_config( mlmd_connection_config_b64: str) -> metadata.ConnectionConfigType: """De-serializes an MLMD connection config from base64 flag.""" mlmd_connection_config = ( metadata_pb2.MLMDConnectionConfig.FromString( base64.b64decode(mlmd_connection_config_b64))) return getattr(mlmd_connection_config, mlmd_connection_config.WhichOneof('connection_config'))
De-serializes an MLMD connection config from base64 flag.
166,179
import base64 from typing import Union from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.proto.orchestration import executable_spec_pb2 from tfx.proto.orchestration import execution_invocation_pb2 from tfx.proto.orchestration import metadata_pb2 from tfx.utils import import_utils The provided code snippet includes necessary dependencies for implementing the `deserialize_executable_spec` function. Write a Python function `def deserialize_executable_spec( executable_spec_b64: str, with_beam: bool = False, ) -> Union[executable_spec_pb2.PythonClassExecutableSpec, executable_spec_pb2.BeamExecutableSpec]` to solve the following problem: De-serializes an executable spec from base64 flag. Here is the function: def deserialize_executable_spec( executable_spec_b64: str, with_beam: bool = False, ) -> Union[executable_spec_pb2.PythonClassExecutableSpec, executable_spec_pb2.BeamExecutableSpec]: """De-serializes an executable spec from base64 flag.""" if with_beam: return executable_spec_pb2.BeamExecutableSpec.FromString( base64.b64decode(executable_spec_b64)) return executable_spec_pb2.PythonClassExecutableSpec.FromString( base64.b64decode(executable_spec_b64))
De-serializes an executable spec from base64 flag.
166,180
import base64 from typing import Union from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.proto.orchestration import executable_spec_pb2 from tfx.proto.orchestration import execution_invocation_pb2 from tfx.proto.orchestration import metadata_pb2 from tfx.utils import import_utils The provided code snippet includes necessary dependencies for implementing the `serialize_mlmd_connection_config` function. Write a Python function `def serialize_mlmd_connection_config( connection_config: metadata.ConnectionConfigType) -> str` to solve the following problem: Serializes an MLMD connection config into a base64 flag of its wrapper. Here is the function: def serialize_mlmd_connection_config( connection_config: metadata.ConnectionConfigType) -> str: """Serializes an MLMD connection config into a base64 flag of its wrapper.""" mlmd_wrapper = metadata_pb2.MLMDConnectionConfig() for name, descriptor in ( metadata_pb2.MLMDConnectionConfig.DESCRIPTOR.fields_by_name.items()): if ( descriptor.message_type.full_name == connection_config.DESCRIPTOR.full_name ): getattr(mlmd_wrapper, name).CopyFrom(connection_config) break return base64.b64encode(mlmd_wrapper.SerializeToString()).decode('ascii')
Serializes an MLMD connection config into a base64 flag of its wrapper.
166,181
import base64 from typing import Union from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.proto.orchestration import executable_spec_pb2 from tfx.proto.orchestration import execution_invocation_pb2 from tfx.proto.orchestration import metadata_pb2 from tfx.utils import import_utils The provided code snippet includes necessary dependencies for implementing the `serialize_executable_spec` function. Write a Python function `def serialize_executable_spec( executable_spec: Union[executable_spec_pb2.PythonClassExecutableSpec, executable_spec_pb2.BeamExecutableSpec] ) -> str` to solve the following problem: Serializes an executable spec into a base64 flag. Here is the function: def serialize_executable_spec( executable_spec: Union[executable_spec_pb2.PythonClassExecutableSpec, executable_spec_pb2.BeamExecutableSpec] ) -> str: """Serializes an executable spec into a base64 flag.""" return base64.b64encode(executable_spec.SerializeToString()).decode('ascii')
Serializes an executable spec into a base64 flag.
166,182
import base64 from typing import Union from tfx.orchestration import metadata from tfx.orchestration.portable import data_types from tfx.proto.orchestration import executable_spec_pb2 from tfx.proto.orchestration import execution_invocation_pb2 from tfx.proto.orchestration import metadata_pb2 from tfx.utils import import_utils The provided code snippet includes necessary dependencies for implementing the `serialize_execution_info` function. Write a Python function `def serialize_execution_info(execution_info: data_types.ExecutionInfo) -> str` to solve the following problem: Serializes the ExecutionInfo class from a base64 flag. Here is the function: def serialize_execution_info(execution_info: data_types.ExecutionInfo) -> str: """Serializes the ExecutionInfo class from a base64 flag.""" execution_info_proto = execution_info.to_proto() return base64.b64encode( execution_info_proto.SerializeToString()).decode('ascii')
Serializes the ExecutionInfo class from a base64 flag.
166,183
import itertools from typing import Any, Dict, List, Optional, Tuple, cast from absl import logging from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import components from tfx.components.evaluator import constants from tfx.dsl.compiler import compiler_utils as tfx_compiler_utils from tfx.dsl.component.experimental import executor_specs from tfx.dsl.component.experimental import placeholders from tfx.dsl.components.base import base_component from tfx.dsl.components.base import base_node from tfx.dsl.components.base import executor_spec from tfx.dsl.components.common import importer from tfx.dsl.components.common import resolver from tfx.dsl.context_managers import dsl_context_registry from tfx.dsl.experimental.conditionals import conditional from tfx.dsl.input_resolution.strategies import latest_artifact_strategy from tfx.dsl.input_resolution.strategies import latest_blessed_model_strategy from tfx.dsl.placeholder import placeholder from tfx.orchestration import data_types from tfx.orchestration.kubeflow import decorators from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.v2 import compiler_utils from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.types import channel_utils from tfx.types import standard_artifacts from tfx.types.channel import Channel from tfx.utils import deprecation_utils from tfx.utils import name_utils from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `_resolve_command_line` function. Write a Python function `def _resolve_command_line( container_spec: executor_specs.TemplatedExecutorContainerSpec, exec_properties: Dict[str, Any], ) -> List[str]` to solve the following problem: Resolves placeholders in the command line of a container. Args: container_spec: Container structure to resolve exec_properties: The map of component's execution properties Returns: Resolved command line. Raises: TypeError: On unsupported type of command-line arguments, or when the resolved argument is not a string. Here is the function: def _resolve_command_line( container_spec: executor_specs.TemplatedExecutorContainerSpec, exec_properties: Dict[str, Any], ) -> List[str]: """Resolves placeholders in the command line of a container. Args: container_spec: Container structure to resolve exec_properties: The map of component's execution properties Returns: Resolved command line. Raises: TypeError: On unsupported type of command-line arguments, or when the resolved argument is not a string. """ def expand_command_line_arg( cmd_arg: placeholders.CommandlineArgumentType) -> str: """Resolves a single argument.""" if isinstance(cmd_arg, str): return cmd_arg elif isinstance(cmd_arg, placeholders.InputValuePlaceholder): if cmd_arg.input_name in exec_properties: return "{{$.inputs.parameters['%s']}}" % cmd_arg.input_name else: return "{{$.inputs.artifacts['%s'].value}}" % cmd_arg.input_name elif isinstance(cmd_arg, placeholders.InputUriPlaceholder): return "{{$.inputs.artifacts['%s'].uri}}" % cmd_arg.input_name elif isinstance(cmd_arg, placeholders.OutputUriPlaceholder): return "{{$.outputs.artifacts['%s'].uri}}" % cmd_arg.output_name elif isinstance(cmd_arg, placeholders.ConcatPlaceholder): resolved_items = [expand_command_line_arg(item) for item in cmd_arg.items] for item in resolved_items: if not isinstance(item, str): raise TypeError('Expanded item "{}" has incorrect type "{}"'.format( item, type(item))) return ''.join(resolved_items) else: raise TypeError('Unsupported type of command-line arguments: "{}".' ' Supported types are {}.'.format( type(cmd_arg), str(executor_specs.CommandlineArgumentType))) resolved_command_line = [] for cmd_arg in (container_spec.command or []): resolved_cmd_arg = expand_command_line_arg(cmd_arg) if not isinstance(resolved_cmd_arg, str): raise TypeError( 'Resolved argument "{}" (type="{}") is not a string.'.format( resolved_cmd_arg, type(resolved_cmd_arg))) resolved_command_line.append(resolved_cmd_arg) return resolved_command_line
Resolves placeholders in the command line of a container. Args: container_spec: Container structure to resolve exec_properties: The map of component's execution properties Returns: Resolved command line. Raises: TypeError: On unsupported type of command-line arguments, or when the resolved argument is not a string.
166,184
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 def value_converter( tfx_value: Any) -> Optional[pipeline_pb2.ValueOrRuntimeParameter]: """Converts TFX/MLMD values into Kubeflow pipeline ValueOrRuntimeParameter.""" if tfx_value is None: return None result = pipeline_pb2.ValueOrRuntimeParameter() if isinstance(tfx_value, (int, float, str)): result.constant_value.CopyFrom(get_kubeflow_value(tfx_value)) elif isinstance(tfx_value, (Dict, List)): result.constant_value.CopyFrom( pipeline_pb2.Value(string_value=json.dumps(tfx_value))) elif isinstance(tfx_value, data_types.RuntimeParameter): # Attach the runtime parameter to the context. parameter_utils.attach_parameter(tfx_value) result.runtime_parameter = tfx_value.name elif isinstance(tfx_value, metadata_store_pb2.Value): if tfx_value.WhichOneof('value') == 'int_value': result.constant_value.CopyFrom( pipeline_pb2.Value(int_value=tfx_value.int_value)) elif tfx_value.WhichOneof('value') == 'double_value': result.constant_value.CopyFrom( pipeline_pb2.Value(double_value=tfx_value.double_value)) elif tfx_value.WhichOneof('value') == 'string_value': result.constant_value.CopyFrom( pipeline_pb2.Value(string_value=tfx_value.string_value)) elif isinstance(tfx_value, message.Message): result.constant_value.CopyFrom( pipeline_pb2.Value( string_value=json_format.MessageToJson( message=tfx_value, sort_keys=True))) else: # By default will attempt to encode the object using json_utils.dumps. result.constant_value.CopyFrom( pipeline_pb2.Value(string_value=json_utils.dumps(tfx_value))) return result The provided code snippet includes necessary dependencies for implementing the `build_runtime_parameter_spec` function. Write a Python function `def build_runtime_parameter_spec( parameters: List[data_types.RuntimeParameter] ) -> Dict[str, pipeline_pb2.PipelineSpec.RuntimeParameter]` to solve the following problem: Converts RuntimeParameters to mapping from names to proto messages. Here is the function: def build_runtime_parameter_spec( parameters: List[data_types.RuntimeParameter] ) -> Dict[str, pipeline_pb2.PipelineSpec.RuntimeParameter]: """Converts RuntimeParameters to mapping from names to proto messages.""" def to_message(parameter: data_types.RuntimeParameter): """Converts a RuntimeParameter to RuntimeParameter message.""" result = pipeline_pb2.PipelineSpec.RuntimeParameter() # 1. Map the RuntimeParameter type to an enum in the proto definition. if parameter.ptype == int or parameter.ptype == bool: result.type = pipeline_pb2.PrimitiveType.INT elif parameter.ptype == float: result.type = pipeline_pb2.PrimitiveType.DOUBLE elif parameter.ptype == str: result.type = pipeline_pb2.PrimitiveType.STRING else: raise TypeError( 'Unknown parameter type: {} found in parameter: {}'.format( parameter.ptype, parameter.name)) # 2. Convert its default value. default = value_converter(parameter.default) if default is not None: result.default_value.CopyFrom(default.constant_value) return result return {param.name: to_message(param) for param in parameters}
Converts RuntimeParameters to mapping from names to proto messages.
166,185
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `build_parameter_type_spec` function. Write a Python function `def build_parameter_type_spec( value: Union[types.Property, data_types.RuntimeParameter] ) -> pipeline_pb2.ComponentInputsSpec.ParameterSpec` to solve the following problem: Extracts the artifact type info into ComponentInputsSpec.ParameterSpec. Here is the function: def build_parameter_type_spec( value: Union[types.Property, data_types.RuntimeParameter] ) -> pipeline_pb2.ComponentInputsSpec.ParameterSpec: """Extracts the artifact type info into ComponentInputsSpec.ParameterSpec.""" is_runtime_param = isinstance(value, data_types.RuntimeParameter) result = pipeline_pb2.ComponentInputsSpec.ParameterSpec() if isinstance(value, int) or (is_runtime_param and value.ptype == int): result.type = pipeline_pb2.PrimitiveType.PrimitiveTypeEnum.INT elif isinstance(value, float) or (is_runtime_param and value.ptype == float): result.type = pipeline_pb2.PrimitiveType.PrimitiveTypeEnum.DOUBLE elif isinstance(value, str) or (is_runtime_param and value.ptype == str): result.type = pipeline_pb2.PrimitiveType.PrimitiveTypeEnum.STRING else: # By default, unrecognized object will be json dumped, hence is string type. # For example, resolver class. result.type = pipeline_pb2.PrimitiveType.PrimitiveTypeEnum.STRING return result
Extracts the artifact type info into ComponentInputsSpec.ParameterSpec.
166,186
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 def _validate_properties_schema( instance_schema: str, properties: Optional[Mapping[str, artifact.PropertyType]] = None): """Validates the declared property types are consistent with the schema. Args: instance_schema: YAML string of the artifact property schema. properties: The actual property schema of an Artifact Python class. Raises: KeyError: When actual property have additional properties than what's specified in the YAML schema. TypeError: When the same property is declared with different types in YAML schema and the Artifact Python class. """ schema = yaml.safe_load(instance_schema).get('properties', {}) properties = properties or {} for k, v in properties.items(): if k not in schema: raise KeyError('Actual property: {} not expected in artifact type schema:' ' {}'.format(k, schema)) # It's okay that we only validate the constant_value case, since # RuntimeParameter's ptype should be validated during component # instantiation. # We only validate primitive-typed property for now because other types can # have nested schema in the YAML spec as well. # pytype: disable=attribute-error # use-enum-overlay if (schema[k]['type'] == _YAML_INT_TYPE and v.type != artifact.PropertyType.INT or schema[k]['type'] == _YAML_STRING_TYPE and v.type != artifact.PropertyType.STRING or schema[k]['type'] == _YAML_DOUBLE_TYPE and v.type != artifact.PropertyType.FLOAT): raise TypeError(f'Property type mismatched at {k} for schema: {schema}. ' f'Expected {schema[k]["type"]} but got {v.type}') # pytype: enable=attribute-error # use-enum-overlay def get_artifact_schema(artifact_type: Type[artifact.Artifact]) -> str: """Gets the YAML schema string associated with the artifact type. Args: artifact_type: the artifact type that the schema is generated for. Returns: the encoded yaml schema definition for the artifact. Raises: ValueError if custom artifact type name does not adhere to KFP schema title. """ if artifact_type in _SUPPORTED_STANDARD_ARTIFACT_TYPES: # For supported first-party artifact types, get the built-in schema yaml per # its type name. schema_path = os.path.join( os.path.dirname(__file__), 'artifact_types', '{}.yaml'.format(artifact_type.TYPE_NAME)) return fileio.open(schema_path, 'rb').read() else: # Otherwise, fall back to the generic `Artifact` type schema. # To recover the Python type object at runtime, the artifact TYPE_NAME will # be encoded as the schema title. # Read the generic artifact schema template. if not _SCHEMA_TITLE_RE.fullmatch(artifact_type.TYPE_NAME): raise ValueError( f'Invalid custom artifact type name: {artifact_type.TYPE_NAME}') schema_path = os.path.join( os.path.dirname(__file__), 'artifact_types', 'Artifact.yaml') data = yaml.safe_load(fileio.open(schema_path, 'rb').read()) # Encode artifact TYPE_NAME. data['title'] = artifact_type.TYPE_NAME return yaml.dump(data, sort_keys=False) The provided code snippet includes necessary dependencies for implementing the `build_input_artifact_spec` function. Write a Python function `def build_input_artifact_spec( channel_spec: channel.Channel ) -> pipeline_pb2.ComponentInputsSpec.ArtifactSpec` to solve the following problem: Builds artifact type spec for an input channel. Here is the function: def build_input_artifact_spec( channel_spec: channel.Channel ) -> pipeline_pb2.ComponentInputsSpec.ArtifactSpec: """Builds artifact type spec for an input channel.""" result = pipeline_pb2.ComponentInputsSpec.ArtifactSpec() result.artifact_type.CopyFrom( pipeline_pb2.ArtifactTypeSchema( instance_schema=get_artifact_schema(channel_spec.type))) _validate_properties_schema( instance_schema=result.artifact_type.instance_schema, properties=channel_spec.type.PROPERTIES) return result
Builds artifact type spec for an input channel.
166,187
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `build_output_parameter_spec` function. Write a Python function `def build_output_parameter_spec( output_type: Any) -> pipeline_pb2.ComponentOutputsSpec.ParameterSpec` to solve the following problem: Builds parameter type spec for an output channel. Here is the function: def build_output_parameter_spec( output_type: Any) -> pipeline_pb2.ComponentOutputsSpec.ParameterSpec: """Builds parameter type spec for an output channel.""" parameter_types = { 'Integer': pipeline_pb2.ParameterType.NUMBER_INTEGER, 'Double': pipeline_pb2.ParameterType.NUMBER_DOUBLE, 'String': pipeline_pb2.ParameterType.STRING, 'Boolean': pipeline_pb2.ParameterType.BOOLEAN, } result = pipeline_pb2.ComponentOutputsSpec.ParameterSpec() if output_type not in parameter_types: raise ValueError( '{} is an unsupported component output type. Currently, we support Integer, Double, String, and Boolean types only.' .format(output_type)) result.parameter_type = parameter_types[output_type] return result
Builds parameter type spec for an output channel.
166,188
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 def _validate_properties_schema( instance_schema: str, properties: Optional[Mapping[str, artifact.PropertyType]] = None): """Validates the declared property types are consistent with the schema. Args: instance_schema: YAML string of the artifact property schema. properties: The actual property schema of an Artifact Python class. Raises: KeyError: When actual property have additional properties than what's specified in the YAML schema. TypeError: When the same property is declared with different types in YAML schema and the Artifact Python class. """ schema = yaml.safe_load(instance_schema).get('properties', {}) properties = properties or {} for k, v in properties.items(): if k not in schema: raise KeyError('Actual property: {} not expected in artifact type schema:' ' {}'.format(k, schema)) # It's okay that we only validate the constant_value case, since # RuntimeParameter's ptype should be validated during component # instantiation. # We only validate primitive-typed property for now because other types can # have nested schema in the YAML spec as well. # pytype: disable=attribute-error # use-enum-overlay if (schema[k]['type'] == _YAML_INT_TYPE and v.type != artifact.PropertyType.INT or schema[k]['type'] == _YAML_STRING_TYPE and v.type != artifact.PropertyType.STRING or schema[k]['type'] == _YAML_DOUBLE_TYPE and v.type != artifact.PropertyType.FLOAT): raise TypeError(f'Property type mismatched at {k} for schema: {schema}. ' f'Expected {schema[k]["type"]} but got {v.type}') # pytype: enable=attribute-error # use-enum-overlay def get_artifact_schema(artifact_type: Type[artifact.Artifact]) -> str: """Gets the YAML schema string associated with the artifact type. Args: artifact_type: the artifact type that the schema is generated for. Returns: the encoded yaml schema definition for the artifact. Raises: ValueError if custom artifact type name does not adhere to KFP schema title. """ if artifact_type in _SUPPORTED_STANDARD_ARTIFACT_TYPES: # For supported first-party artifact types, get the built-in schema yaml per # its type name. schema_path = os.path.join( os.path.dirname(__file__), 'artifact_types', '{}.yaml'.format(artifact_type.TYPE_NAME)) return fileio.open(schema_path, 'rb').read() else: # Otherwise, fall back to the generic `Artifact` type schema. # To recover the Python type object at runtime, the artifact TYPE_NAME will # be encoded as the schema title. # Read the generic artifact schema template. if not _SCHEMA_TITLE_RE.fullmatch(artifact_type.TYPE_NAME): raise ValueError( f'Invalid custom artifact type name: {artifact_type.TYPE_NAME}') schema_path = os.path.join( os.path.dirname(__file__), 'artifact_types', 'Artifact.yaml') data = yaml.safe_load(fileio.open(schema_path, 'rb').read()) # Encode artifact TYPE_NAME. data['title'] = artifact_type.TYPE_NAME return yaml.dump(data, sort_keys=False) The provided code snippet includes necessary dependencies for implementing the `build_output_artifact_spec` function. Write a Python function `def build_output_artifact_spec( channel_spec: channel.Channel ) -> pipeline_pb2.ComponentOutputsSpec.ArtifactSpec` to solve the following problem: Builds artifact type spec for an output channel. Here is the function: def build_output_artifact_spec( channel_spec: channel.Channel ) -> pipeline_pb2.ComponentOutputsSpec.ArtifactSpec: """Builds artifact type spec for an output channel.""" # We use the first artifact instance if available from channel, otherwise # create one. result = pipeline_pb2.ComponentOutputsSpec.ArtifactSpec() result.artifact_type.CopyFrom( pipeline_pb2.ArtifactTypeSchema( instance_schema=get_artifact_schema(channel_spec.type))) _validate_properties_schema( instance_schema=result.artifact_type.instance_schema, properties=channel_spec.type.PROPERTIES) if channel_spec.additional_properties: result.metadata.update(channel_spec.additional_properties) if channel_spec.additional_custom_properties: result.metadata.update(channel_spec.additional_custom_properties) return result
Builds artifact type spec for an output channel.
166,189
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `pack_artifact_properties` function. Write a Python function `def pack_artifact_properties(artifact_instance: artifact.Artifact)` to solve the following problem: Packs artifact properties and custom properties into a Struct proto. Here is the function: def pack_artifact_properties(artifact_instance: artifact.Artifact): """Packs artifact properties and custom properties into a Struct proto.""" struct_proto = struct_pb2.Struct() metadata = {} for k, v in itertools.chain( artifact_instance.mlmd_artifact.properties.items(), artifact_instance.mlmd_artifact.custom_properties.items()): metadata[k] = getattr(v, v.WhichOneof('value')) # JSON does not differetiate between int and double, but MLMD value type does. # MLMD int is stored as double as a result. struct_proto.update(metadata) return struct_proto
Packs artifact properties and custom properties into a Struct proto.
166,190
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 _SUPPORTED_STANDARD_ARTIFACT_TYPES = frozenset( (standard_artifacts.ExampleAnomalies, standard_artifacts.ExampleStatistics, standard_artifacts.Examples, standard_artifacts.HyperParameters, standard_artifacts.InferenceResult, standard_artifacts.InfraBlessing, standard_artifacts.Model, standard_artifacts.ModelBlessing, standard_artifacts.ModelEvaluation, standard_artifacts.ModelRun, standard_artifacts.PushedModel, standard_artifacts.Schema, standard_artifacts.TransformGraph, standard_artifacts.TransformCache, standard_artifacts.TunerResults, standard_artifacts.Float, standard_artifacts.Integer, standard_artifacts.String, standard_artifacts.Boolean, standard_artifacts.JsonValue, simple_artifacts.Metrics, simple_artifacts.Statistics, simple_artifacts.Dataset, simple_artifacts.File)) The provided code snippet includes necessary dependencies for implementing the `get_artifact_title` function. Write a Python function `def get_artifact_title(artifact_type: Type[artifact.Artifact]) -> str` to solve the following problem: Gets the schema title from the artifact python class. Here is the function: def get_artifact_title(artifact_type: Type[artifact.Artifact]) -> str: """Gets the schema title from the artifact python class.""" if artifact_type in _SUPPORTED_STANDARD_ARTIFACT_TYPES: return 'tfx.{}'.format(artifact_type.__name__) return 'tfx.Artifact'
Gets the schema title from the artifact python class.
166,191
import itertools import json import os import re from typing import Any, Dict, List, Mapping, Optional, Type, Union from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx import types from tfx.dsl.io import fileio from tfx.orchestration import data_types from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.proto.orchestration import placeholder_pb2 from tfx.types import artifact from tfx.types import channel from tfx.types import standard_artifacts from tfx.types.experimental import simple_artifacts from tfx.utils import json_utils from tfx.utils import name_utils import yaml from google.protobuf import struct_pb2 from google.protobuf import json_format from google.protobuf import message from ml_metadata.proto import metadata_store_pb2 The provided code snippet includes necessary dependencies for implementing the `placeholder_to_cel` function. Write a Python function `def placeholder_to_cel( expression: placeholder_pb2.PlaceholderExpression) -> str` to solve the following problem: Encodes a Predicate into a CEL string expression. The CEL specification is at: https://github.com/google/cel-spec/blob/master/doc/langdef.md Args: expression: A PlaceholderExpression proto descrbing a Predicate. Returns: A CEL expression in string format. Here is the function: def placeholder_to_cel( expression: placeholder_pb2.PlaceholderExpression) -> str: """Encodes a Predicate into a CEL string expression. The CEL specification is at: https://github.com/google/cel-spec/blob/master/doc/langdef.md Args: expression: A PlaceholderExpression proto descrbing a Predicate. Returns: A CEL expression in string format. """ if expression.HasField('value'): value_field_name = expression.value.WhichOneof('value') if value_field_name == 'int_value': # In KFP IR, all values are defined as google.protobuf.Value, # which does not differentiate between int and float. CEL's treats # comparison between different types as an error. Hence we need to convert # ints to floats for comparison in CEL. return f'{float(expression.value.int_value)}' if value_field_name == 'double_value': return f'{expression.value.double_value}' if value_field_name == 'string_value': return f'\'{expression.value.string_value}\'' raise NotImplementedError( 'Only supports predicate with primitive type values.') if expression.HasField('placeholder'): placeholder_pb = expression.placeholder # Predicates are always built from ChannelWrappedPlaceholder, which means # a component can only write a predicate about its inputs. It doesn't make # sense for a component to say "run only if my output is something." if placeholder_pb.type != placeholder_pb2.Placeholder.INPUT_ARTIFACT: raise NotImplementedError( 'Only supports accessing input artifact through placeholders on KFPv2.' f'Got {placeholder_pb.type}.') if not placeholder_pb.key: raise ValueError( 'Only supports accessing placeholders with a key on KFPv2.') # Note that because CEL automatically performs dynamic value conversion, # we don't need type info for the oneof fields in google.protobuf.Value. return f"inputs.artifacts['{placeholder_pb.key}'].artifacts" if expression.HasField('operator'): operator_name = expression.operator.WhichOneof('operator_type') operator_pb = getattr(expression.operator, operator_name) if operator_name == 'index_op': sub_expression_str = placeholder_to_cel(operator_pb.expression) return f'{sub_expression_str}[{operator_pb.index}]' if operator_name == 'artifact_property_op': sub_expression_str = placeholder_to_cel(operator_pb.expression) # CEL's dynamic value conversion applies to here as well. return f"{sub_expression_str}.metadata['{operator_pb.key}']" if operator_name == 'artifact_uri_op': sub_expression_str = placeholder_to_cel(operator_pb.expression) if operator_pb.split: raise NotImplementedError( 'Accessing artifact\'s split uri is unsupported.') return f'{sub_expression_str}.uri' if operator_name == 'concat_op': expression_str = ' + '.join( placeholder_to_cel(e) for e in operator_pb.expressions) return f'({expression_str})' if operator_name == 'compare_op': lhs_str = placeholder_to_cel(operator_pb.lhs) rhs_str = placeholder_to_cel(operator_pb.rhs) if operator_pb.op == placeholder_pb2.ComparisonOperator.Operation.EQUAL: op_str = '==' elif operator_pb.op == placeholder_pb2.ComparisonOperator.Operation.LESS_THAN: op_str = '<' elif operator_pb.op == placeholder_pb2.ComparisonOperator.Operation.GREATER_THAN: op_str = '>' else: return f'Unknown Comparison Operation {operator_pb.op}' return f'({lhs_str} {op_str} {rhs_str})' if operator_name == 'unary_logical_op': expression_str = placeholder_to_cel(operator_pb.expression) if operator_pb.op == placeholder_pb2.UnaryLogicalOperator.Operation.NOT: op_str = '!' else: return f'Unknown Unary Logical Operation {operator_pb.op}' return f'{op_str}({expression_str})' if operator_name == 'binary_logical_op': lhs_str = placeholder_to_cel(operator_pb.lhs) rhs_str = placeholder_to_cel(operator_pb.rhs) if operator_pb.op == placeholder_pb2.BinaryLogicalOperator.Operation.AND: op_str = '&&' elif operator_pb.op == placeholder_pb2.BinaryLogicalOperator.Operation.OR: op_str = '||' else: return f'Unknown Binary Logical Operation {operator_pb.op}' return f'({lhs_str} {op_str} {rhs_str})' raise ValueError(f'Got unsupported placeholder operator {operator_name}.') raise ValueError('Unknown placeholder expression.')
Encodes a Predicate into a CEL string expression. The CEL specification is at: https://github.com/google/cel-spec/blob/master/doc/langdef.md Args: expression: A PlaceholderExpression proto descrbing a Predicate. Returns: A CEL expression in string format.
166,192
import argparse import os from typing import List, Tuple from absl import app from absl import logging from absl.flags import argparse_flags from kfp.pipeline_spec import pipeline_spec_pb2 from tfx.components.evaluator import executor as evaluator_executor from tfx.dsl.components.base import base_beam_executor from tfx.dsl.components.base import base_executor from tfx.dsl.io import fileio from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils from tfx.orchestration.portable import outputs_utils from tfx.types import artifact_utils from tfx.types import standard_artifacts from tfx.types import standard_component_specs from tfx.types import value_artifact from tfx.utils import import_utils from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_run_executor` function. Write a Python function `def _run_executor(args: argparse.Namespace, beam_args: List[str]) -> None` to solve the following problem: Selects a particular executor and run it based on name. Args: args: --executor_class_path: The import path of the executor class. --json_serialized_invocation_args: Full JSON-serialized parameters for this execution. beam_args: Optional parameter that maps to the optional_pipeline_args parameter in the pipeline, which provides additional configuration options for apache-beam and tensorflow.logging. For more about the beam arguments please refer to: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params Here is the function: def _run_executor(args: argparse.Namespace, beam_args: List[str]) -> None: """Selects a particular executor and run it based on name. Args: args: --executor_class_path: The import path of the executor class. --json_serialized_invocation_args: Full JSON-serialized parameters for this execution. beam_args: Optional parameter that maps to the optional_pipeline_args parameter in the pipeline, which provides additional configuration options for apache-beam and tensorflow.logging. For more about the beam arguments please refer to: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params """ logging.set_verbosity(logging.INFO) # Rehydrate inputs/outputs/exec_properties from the serialized metadata. executor_input = pipeline_spec_pb2.ExecutorInput() json_format.Parse( args.json_serialized_invocation_args, executor_input, ignore_unknown_fields=True) inputs_dict = executor_input.inputs.artifacts outputs_dict = executor_input.outputs.artifacts inputs_parameter = executor_input.inputs.parameters outputs_parameters = executor_input.outputs.parameters # Format {pipelineJob.runtimeConfig.gcsOutputDirectory}/{project_number} # /{pipeline_job_user_id}/{task_name}_{task_uuid}/executor_output.json task_root = os.path.dirname(executor_input.outputs.output_file) tmp_path = os.path.join(task_root, '.temp') task_unique_id = os.path.basename(task_root) if fileio.exists(executor_input.outputs.output_file): # It has a driver that outputs the updated exec_properties in this file. with fileio.open(executor_input.outputs.output_file, 'rb') as output_meta_json: output_metadata = pipeline_spec_pb2.ExecutorOutput() json_format.Parse( output_meta_json.read(), output_metadata, ignore_unknown_fields=True) # Append/Overwrite exec_propertise. for k, v in output_metadata.parameters.items(): inputs_parameter[k].CopyFrom(v) name_from_id = {} inputs = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict( inputs_dict, name_from_id) outputs = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict( outputs_dict, name_from_id) exec_properties = kubeflow_v2_entrypoint_utils.parse_execution_properties( inputs_parameter) logging.info('Executor %s do: inputs: %s, outputs: %s, exec_properties: %s', args.executor_class_path, inputs, outputs, exec_properties) executor_cls = import_utils.import_class_by_path(args.executor_class_path) if issubclass(executor_cls, base_beam_executor.BaseBeamExecutor): executor_context = base_beam_executor.BaseBeamExecutor.Context( beam_pipeline_args=beam_args, unique_id=task_unique_id, tmp_dir=tmp_path) else: executor_context = base_executor.BaseExecutor.Context( extra_flags=beam_args, unique_id=task_unique_id, tmp_dir=tmp_path) executor = executor_cls(executor_context) logging.info('Starting executor') executor.Do(inputs, outputs, exec_properties) outputs_utils.tag_output_artifacts_with_version(outputs) # TODO(b/169583143): Remove this workaround when TFX migrates to use str-typed # id/name to identify artifacts. # Convert ModelBlessing artifact to use managed MLMD resource name. if (issubclass(executor_cls, evaluator_executor.Executor) and standard_component_specs.BLESSING_KEY in outputs): # Parse the parent prefix for managed MLMD resource name. kubeflow_v2_entrypoint_utils.refactor_model_blessing( artifact_utils.get_single_instance( outputs[standard_component_specs.BLESSING_KEY]), name_from_id) # Log the output metadata to a file. So that it can be picked up by MP. metadata_uri = executor_input.outputs.output_file executor_output = pipeline_spec_pb2.ExecutorOutput() for k, v in kubeflow_v2_entrypoint_utils.translate_executor_output( outputs, name_from_id).items(): executor_output.artifacts[k].CopyFrom(v) for key in outputs_parameters.keys(): if key not in outputs.keys(): raise ValueError( 'All OutputParameters must have corresponding OutputValueArtifacts.') assert len(outputs[key]) == 1 and isinstance( outputs[key][0], value_artifact.ValueArtifact), ( 'Parameter should have one corresponding ValueArtifact.') artifact = outputs[key][0] if isinstance(artifact, standard_artifacts.String): executor_output.parameter_values[key].string_value = artifact.read() elif isinstance(artifact, standard_artifacts.Float) or isinstance( artifact, standard_artifacts.Integer): executor_output.parameter_values[key].number_value = artifact.read() elif isinstance(artifact, standard_artifacts.Boolean): executor_output.parameter_values[key].bool_value = artifact.read() else: raise ValueError( 'Only String, Float, Int, and Boolean ValueArtifacts are supported.' ) fileio.makedirs(os.path.dirname(metadata_uri)) with fileio.open(metadata_uri, 'wb') as f: f.write(json_format.MessageToJson(executor_output))
Selects a particular executor and run it based on name. Args: args: --executor_class_path: The import path of the executor class. --json_serialized_invocation_args: Full JSON-serialized parameters for this execution. beam_args: Optional parameter that maps to the optional_pipeline_args parameter in the pipeline, which provides additional configuration options for apache-beam and tensorflow.logging. For more about the beam arguments please refer to: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params
166,193
import argparse import os from typing import List, Tuple from absl import app from absl import logging from absl.flags import argparse_flags from kfp.pipeline_spec import pipeline_spec_pb2 from tfx.components.evaluator import executor as evaluator_executor from tfx.dsl.components.base import base_beam_executor from tfx.dsl.components.base import base_executor from tfx.dsl.io import fileio from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils from tfx.orchestration.portable import outputs_utils from tfx.types import artifact_utils from tfx.types import standard_artifacts from tfx.types import standard_component_specs from tfx.types import value_artifact from tfx.utils import import_utils from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_parse_flags` function. Write a Python function `def _parse_flags(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]` to solve the following problem: Parses command line arguments. Args: argv: Unparsed arguments for run_executor.py. Known argument names include --executor_class_path: Python class of executor in format of <module>.<class>. --json_serialized_invocation_args: Full JSON-serialized parameters for this execution. The remaining part of the arguments will be parsed as the beam args used by each component executors. Some commonly used beam args are as follows: --runner: The beam pipeline runner environment. Can be DirectRunner (for running locally) or DataflowRunner (for running on GCP Dataflow service). --project: The GCP project ID. Neede when runner==DataflowRunner --direct_num_workers: Number of threads or subprocesses executing the work load. For more about the beam arguments please refer to: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params Returns: Tuple of an argparse result and remaining beam args. Here is the function: def _parse_flags(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]: """Parses command line arguments. Args: argv: Unparsed arguments for run_executor.py. Known argument names include --executor_class_path: Python class of executor in format of <module>.<class>. --json_serialized_invocation_args: Full JSON-serialized parameters for this execution. The remaining part of the arguments will be parsed as the beam args used by each component executors. Some commonly used beam args are as follows: --runner: The beam pipeline runner environment. Can be DirectRunner (for running locally) or DataflowRunner (for running on GCP Dataflow service). --project: The GCP project ID. Neede when runner==DataflowRunner --direct_num_workers: Number of threads or subprocesses executing the work load. For more about the beam arguments please refer to: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params Returns: Tuple of an argparse result and remaining beam args. """ parser = argparse_flags.ArgumentParser() parser.add_argument( '--executor_class_path', type=str, required=True, help='Python class of executor in format of <module>.<class>.') parser.add_argument( '--json_serialized_invocation_args', type=str, required=True, help='JSON-serialized metadata for this execution.') return parser.parse_known_args(argv)
Parses command line arguments. Args: argv: Unparsed arguments for run_executor.py. Known argument names include --executor_class_path: Python class of executor in format of <module>.<class>. --json_serialized_invocation_args: Full JSON-serialized parameters for this execution. The remaining part of the arguments will be parsed as the beam args used by each component executors. Some commonly used beam args are as follows: --runner: The beam pipeline runner environment. Can be DirectRunner (for running locally) or DataflowRunner (for running on GCP Dataflow service). --project: The GCP project ID. Neede when runner==DataflowRunner --direct_num_workers: Number of threads or subprocesses executing the work load. For more about the beam arguments please refer to: https://cloud.google.com/dataflow/docs/guides/specifying-exec-params Returns: Tuple of an argparse result and remaining beam args.
166,194
import random import re import string import typing from typing import Any, Dict, List, Mapping, Optional, Union from absl import logging from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx.dsl.components.base import base_node from tfx.dsl.placeholder import placeholder from tfx.orchestration import data_types from tfx.orchestration import pipeline from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.v2 import compiler_utils from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.orchestration.kubeflow.v2 import step_builder from tfx.types import channel_utils from google.protobuf import json_format _LEGAL_NAME_PATTERN = re.compile(r'[a-z0-9][a-z0-9-]{0,127}') The provided code snippet includes necessary dependencies for implementing the `_check_name` function. Write a Python function `def _check_name(name: str) -> None` to solve the following problem: Checks the user-provided pipeline name. Here is the function: def _check_name(name: str) -> None: """Checks the user-provided pipeline name.""" if not _LEGAL_NAME_PATTERN.fullmatch(name): raise ValueError('User provided pipeline name % is illegal, please follow ' 'the pattern of [a-z0-9][a-z0-9-]{0,127}.')
Checks the user-provided pipeline name.
166,195
import random import re import string import typing from typing import Any, Dict, List, Mapping, Optional, Union from absl import logging from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx.dsl.components.base import base_node from tfx.dsl.placeholder import placeholder from tfx.orchestration import data_types from tfx.orchestration import pipeline from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.v2 import compiler_utils from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.orchestration.kubeflow.v2 import step_builder from tfx.types import channel_utils from google.protobuf import json_format def _generate_component_name_suffix() -> str: letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(10))
null
166,196
import random import re import string import typing from typing import Any, Dict, List, Mapping, Optional, Union from absl import logging from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx.dsl.components.base import base_node from tfx.dsl.placeholder import placeholder from tfx.orchestration import data_types from tfx.orchestration import pipeline from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.v2 import compiler_utils from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.orchestration.kubeflow.v2 import step_builder from tfx.types import channel_utils from google.protobuf import json_format DEFAULT_IMAGE_PATH_KEY = 'default_image_path' The provided code snippet includes necessary dependencies for implementing the `_get_component_image` function. Write a Python function `def _get_component_image( default_image: Union[str, Mapping[str, str]], component_id: str ) -> str` to solve the following problem: Gets component image path given component_id. Here is the function: def _get_component_image( default_image: Union[str, Mapping[str, str]], component_id: str ) -> str: """Gets component image path given component_id.""" if isinstance(default_image, str): return default_image if ( component_id not in default_image and DEFAULT_IMAGE_PATH_KEY not in default_image ): raise ValueError( f'Any of component id {component_id} or default key must be found ' 'in default_image map.' ) return default_image.get(component_id, default_image[DEFAULT_IMAGE_PATH_KEY])
Gets component image path given component_id.
166,197
import random import re import string import typing from typing import Any, Dict, List, Mapping, Optional, Union from absl import logging from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 from tfx.dsl.components.base import base_node from tfx.dsl.placeholder import placeholder from tfx.orchestration import data_types from tfx.orchestration import pipeline from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.v2 import compiler_utils from tfx.orchestration.kubeflow.v2 import parameter_utils from tfx.orchestration.kubeflow.v2 import step_builder from tfx.types import channel_utils from google.protobuf import json_format DEFAULT_IMAGE_PATH_KEY = 'default_image_path' def _check_default_image(default_image) -> None: if ( isinstance(default_image, Mapping) and DEFAULT_IMAGE_PATH_KEY in default_image ): logging.warning('DEFAULT_IMAGE_PATH_KEY is not found in default_image.')
null
166,198
import argparse import os from typing import List from absl import app from absl import logging from absl.flags import argparse_flags from kfp.pipeline_spec import pipeline_spec_pb2 from tfx.components.example_gen import driver from tfx.components.example_gen import input_processor from tfx.components.example_gen import utils from tfx.dsl.io import fileio from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils from tfx.proto import example_gen_pb2 from tfx.proto import range_config_pb2 from tfx.types import artifact_utils from tfx.types import standard_component_specs from tfx.utils import proto_utils from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_run_driver` function. Write a Python function `def _run_driver(executor_input: pipeline_spec_pb2.ExecutorInput) -> None` to solve the following problem: Runs the driver, writing its output as a ExecutorOutput proto. The main goal of this driver is to calculate the span and fingerprint of input data, allowing for the executor invocation to be skipped if the ExampleGen component has been previously run on the same data with the same configuration. This span and fingerprint are added as new custom execution properties to an ExecutorOutput proto and written to a GCS path. The CAIP pipelines system reads this file and updates MLMD with the new execution properties. Args: executor_input: pipeline_spec_pb2.ExecutorInput that contains TFX artifacts and exec_properties information. Here is the function: def _run_driver(executor_input: pipeline_spec_pb2.ExecutorInput) -> None: """Runs the driver, writing its output as a ExecutorOutput proto. The main goal of this driver is to calculate the span and fingerprint of input data, allowing for the executor invocation to be skipped if the ExampleGen component has been previously run on the same data with the same configuration. This span and fingerprint are added as new custom execution properties to an ExecutorOutput proto and written to a GCS path. The CAIP pipelines system reads this file and updates MLMD with the new execution properties. Args: executor_input: pipeline_spec_pb2.ExecutorInput that contains TFX artifacts and exec_properties information. """ exec_properties = kubeflow_v2_entrypoint_utils.parse_execution_properties( executor_input.inputs.parameters) name_from_id = {} outputs_dict = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict( executor_input.outputs.artifacts, name_from_id) # A path at which an ExecutorOutput message will be # written with updated execution properties and output artifacts. The CAIP # Pipelines service will update the task's properties and artifacts prior to # running the executor. output_metadata_uri = executor_input.outputs.output_file logging.set_verbosity(logging.INFO) logging.info('exec_properties = %s\noutput_metadata_uri = %s', exec_properties, output_metadata_uri) input_base_uri = exec_properties.get(standard_component_specs.INPUT_BASE_KEY) input_config = example_gen_pb2.Input() proto_utils.json_to_proto( exec_properties[standard_component_specs.INPUT_CONFIG_KEY], input_config) range_config = None range_config_entry = exec_properties.get( standard_component_specs.RANGE_CONFIG_KEY) if range_config_entry: range_config = range_config_pb2.RangeConfig() proto_utils.json_to_proto(range_config_entry, range_config) processor = input_processor.FileBasedInputProcessor(input_base_uri, input_config.splits, range_config) span, version = processor.resolve_span_and_version() fingerprint = processor.get_input_fingerprint(span, version) logging.info('Calculated span: %s', span) logging.info('Calculated fingerprint: %s', fingerprint) exec_properties[utils.SPAN_PROPERTY_NAME] = span exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint exec_properties[utils.VERSION_PROPERTY_NAME] = version # Updates the input_config.splits.pattern. for split in input_config.splits: split.pattern = processor.get_pattern_for_span_version( split.pattern, span, version) exec_properties[standard_component_specs .INPUT_CONFIG_KEY] = proto_utils.proto_to_json(input_config) if standard_component_specs.EXAMPLES_KEY not in outputs_dict: raise ValueError('Example artifact was missing in the ExampleGen outputs.') example_artifact = artifact_utils.get_single_instance( outputs_dict[standard_component_specs.EXAMPLES_KEY]) driver.update_output_artifact( exec_properties=exec_properties, output_artifact=example_artifact.mlmd_artifact) # Log the output metadata file output_metadata = pipeline_spec_pb2.ExecutorOutput() output_metadata.parameters[utils.SPAN_PROPERTY_NAME].int_value = span output_metadata.parameters[ utils.FINGERPRINT_PROPERTY_NAME].string_value = fingerprint if version is not None: output_metadata.parameters[utils.VERSION_PROPERTY_NAME].int_value = version output_metadata.parameters[ standard_component_specs .INPUT_CONFIG_KEY].string_value = proto_utils.proto_to_json(input_config) output_metadata.artifacts[ standard_component_specs.EXAMPLES_KEY].artifacts.add().CopyFrom( kubeflow_v2_entrypoint_utils.to_runtime_artifact( example_artifact, name_from_id)) fileio.makedirs(os.path.dirname(output_metadata_uri)) with fileio.open(output_metadata_uri, 'wb') as f: f.write(json_format.MessageToJson(output_metadata, sort_keys=True))
Runs the driver, writing its output as a ExecutorOutput proto. The main goal of this driver is to calculate the span and fingerprint of input data, allowing for the executor invocation to be skipped if the ExampleGen component has been previously run on the same data with the same configuration. This span and fingerprint are added as new custom execution properties to an ExecutorOutput proto and written to a GCS path. The CAIP pipelines system reads this file and updates MLMD with the new execution properties. Args: executor_input: pipeline_spec_pb2.ExecutorInput that contains TFX artifacts and exec_properties information.
166,199
import argparse import os from typing import List from absl import app from absl import logging from absl.flags import argparse_flags from kfp.pipeline_spec import pipeline_spec_pb2 from tfx.components.example_gen import driver from tfx.components.example_gen import input_processor from tfx.components.example_gen import utils from tfx.dsl.io import fileio from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils from tfx.proto import example_gen_pb2 from tfx.proto import range_config_pb2 from tfx.types import artifact_utils from tfx.types import standard_component_specs from tfx.utils import proto_utils from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_parse_flags` function. Write a Python function `def _parse_flags(argv: List[str]) -> argparse.Namespace` to solve the following problem: Command lines flag parsing. Here is the function: def _parse_flags(argv: List[str]) -> argparse.Namespace: """Command lines flag parsing.""" parser = argparse_flags.ArgumentParser() parser.add_argument( '--json_serialized_invocation_args', type=str, required=True, help='JSON-serialized metadata for this execution.') # Ignore unknown args which is expected. Beam related args are also supplied # as command line arguments. # TODO(b/182333035): Wrap beam related flags into a dedicated flag. namespace, _ = parser.parse_known_args(argv) return namespace
Command lines flag parsing.
166,200
import datetime import json import os from typing import Any, Dict, List, Optional, Union, MutableMapping from absl import logging from kfp.pipeline_spec import pipeline_spec_pb2 from tfx import version from tfx.dsl.components.base import base_component from tfx.dsl.components.base import base_node from tfx.dsl.io import fileio from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration import tfx_runner from tfx.orchestration.config import pipeline_config from tfx.orchestration.kubeflow.v2 import pipeline_builder from tfx.utils import telemetry_utils from tfx.utils import version_utils from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_get_current_time` function. Write a Python function `def _get_current_time()` to solve the following problem: Gets the current timestamp. Here is the function: def _get_current_time(): """Gets the current timestamp.""" return datetime.datetime.now()
Gets the current timestamp.
166,201
from typing import Any, Dict, List, Optional from tfx.dsl.component.experimental import component_utils from tfx.dsl.component.experimental import placeholders from tfx.dsl.components.base import base_component from tfx.dsl.components.base import executor_spec from tfx.orchestration.kubeflow.v2.components.experimental import ai_platform_training_executor from tfx.types import channel_utils from tfx.types import component_spec from tfx.utils import json_utils The provided code snippet includes necessary dependencies for implementing the `create_ai_platform_training` function. Write a Python function `def create_ai_platform_training( name: str, project_id: str, region: Optional[str] = None, job_id: Optional[str] = None, image_uri: Optional[str] = None, args: Optional[List[placeholders.CommandlineArgumentType]] = None, # TODO(jxzheng): support Python training spec scale_tier: Optional[str] = None, training_input: Optional[Dict[str, Any]] = None, labels: Optional[Dict[str, str]] = None, inputs: Optional[Dict[str, Any]] = None, outputs: Optional[Dict[str, Any]] = None, parameters: Optional[Dict[str, Any]] = None, ) -> base_component.BaseComponent` to solve the following problem: Creates a pipeline step that launches a AIP training job. The generated TFX component will have a component spec specified dynamically, through inputs/outputs/parameters in the following format: - inputs: A mapping from input name to the upstream channel connected. The artifact type of the channel will be automatically inferred. - outputs: A mapping from output name to the associated artifact type. - parameters: A mapping from execution property names to its associated value. Only primitive typed values are supported. Note that RuntimeParameter is not supported yet. For example: ``` create_ai_platform_training( ... inputs: { # Assuming there is an upstream node example_gen, with an output # 'examples' of the type Examples. 'examples': example_gen.outputs['examples'], }, outputs: { 'model': standard_artifacts.Model, }, parameters: { 'n_steps': 100, 'optimizer': 'sgd', } ... ) ``` will generate a component instance with a component spec equivalent to: ``` class MyComponentSpec(ComponentSpec): INPUTS = { 'examples': ChannelParameter(type=standard_artifacts.Examples) } OUTPUTS = { 'model': ChannelParameter(type=standard_artifacts.Model) } PARAMETERS = { 'n_steps': ExecutionParameter(type=int), 'optimizer': ExecutionParameter(type=str) } ``` with its input 'examples' is connected to the example_gen output, and execution properties specified as 100 and 'sgd' respectively. Example usage of the component: ``` # A single node training job. my_train = create_ai_platform_training( name='my_training_step', project_id='my-project', region='us-central1', image_uri='gcr.io/my-project/caip-training-test:latest', 'args': [ '--examples', placeholders.InputUriPlaceholder('examples'), '--n-steps', placeholders.InputValuePlaceholder('n_step'), '--output-location', placeholders.OutputUriPlaceholder('model') ] scale_tier='BASIC_GPU', inputs={'examples': example_gen.outputs['examples']}, outputs={ 'model': standard_artifacts.Model }, parameters={'n_step': 100} ) # More complex setting can be expressed by providing training_input # directly. my_distributed_train = create_ai_platform_training( name='my_training_step', project_id='my-project', training_input={ 'scaleTier': 'CUSTOM', 'region': 'us-central1', 'masterType': 'n1-standard-8', 'masterConfig': { 'imageUri': 'gcr.io/my-project/my-dist-training:latest' }, 'workerType': 'n1-standard-8', 'workerCount': 8, 'workerConfig': { 'imageUri': 'gcr.io/my-project/my-dist-training:latest' }, 'args': [ '--examples', placeholders.InputUriPlaceholder('examples'), '--n-steps', placeholders.InputValuePlaceholder('n_step'), '--output-location', placeholders.OutputUriPlaceholder('model') ] }, inputs={'examples': example_gen.outputs['examples']}, outputs={'model': Model}, parameters={'n_step': 100} ) ``` Args: name: name of the component. This is needed to construct the component spec and component class dynamically as well. project_id: the GCP project under which the AIP training job will be running. region: GCE region where the AIP training job will be running. job_id: the unique ID of the job. Default to 'tfx_%Y%m%d%H%M%S' image_uri: the GCR location of the container image, which will be used to execute the training program. If the same field is specified in training_input, the latter overrides image_uri. args: command line arguments that will be passed into the training program. Users can use placeholder semantics as in tfx.dsl.component.experimental.container_component to wire the args with component inputs/outputs/parameters. scale_tier: Cloud ML resource requested by the job. See https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#ScaleTier training_input: full training job spec. This field overrides other specifications if applicable. This field follows the [TrainingInput](https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#traininginput) schema. labels: user-specified label attached to the job. inputs: the dict of component inputs. outputs: the dict of component outputs. parameters: the dict of component parameters, aka, execution properties. Returns: A component instance that represents the AIP job in the DSL. Raises: ValueError: when image_uri is missing and masterConfig is not specified in training_input, or when region is missing and training_input does not provide region either. TypeError: when non-primitive parameters are specified. Here is the function: def create_ai_platform_training( name: str, project_id: str, region: Optional[str] = None, job_id: Optional[str] = None, image_uri: Optional[str] = None, args: Optional[List[placeholders.CommandlineArgumentType]] = None, # TODO(jxzheng): support Python training spec scale_tier: Optional[str] = None, training_input: Optional[Dict[str, Any]] = None, labels: Optional[Dict[str, str]] = None, inputs: Optional[Dict[str, Any]] = None, outputs: Optional[Dict[str, Any]] = None, parameters: Optional[Dict[str, Any]] = None, ) -> base_component.BaseComponent: """Creates a pipeline step that launches a AIP training job. The generated TFX component will have a component spec specified dynamically, through inputs/outputs/parameters in the following format: - inputs: A mapping from input name to the upstream channel connected. The artifact type of the channel will be automatically inferred. - outputs: A mapping from output name to the associated artifact type. - parameters: A mapping from execution property names to its associated value. Only primitive typed values are supported. Note that RuntimeParameter is not supported yet. For example: ``` create_ai_platform_training( ... inputs: { # Assuming there is an upstream node example_gen, with an output # 'examples' of the type Examples. 'examples': example_gen.outputs['examples'], }, outputs: { 'model': standard_artifacts.Model, }, parameters: { 'n_steps': 100, 'optimizer': 'sgd', } ... ) ``` will generate a component instance with a component spec equivalent to: ``` class MyComponentSpec(ComponentSpec): INPUTS = { 'examples': ChannelParameter(type=standard_artifacts.Examples) } OUTPUTS = { 'model': ChannelParameter(type=standard_artifacts.Model) } PARAMETERS = { 'n_steps': ExecutionParameter(type=int), 'optimizer': ExecutionParameter(type=str) } ``` with its input 'examples' is connected to the example_gen output, and execution properties specified as 100 and 'sgd' respectively. Example usage of the component: ``` # A single node training job. my_train = create_ai_platform_training( name='my_training_step', project_id='my-project', region='us-central1', image_uri='gcr.io/my-project/caip-training-test:latest', 'args': [ '--examples', placeholders.InputUriPlaceholder('examples'), '--n-steps', placeholders.InputValuePlaceholder('n_step'), '--output-location', placeholders.OutputUriPlaceholder('model') ] scale_tier='BASIC_GPU', inputs={'examples': example_gen.outputs['examples']}, outputs={ 'model': standard_artifacts.Model }, parameters={'n_step': 100} ) # More complex setting can be expressed by providing training_input # directly. my_distributed_train = create_ai_platform_training( name='my_training_step', project_id='my-project', training_input={ 'scaleTier': 'CUSTOM', 'region': 'us-central1', 'masterType': 'n1-standard-8', 'masterConfig': { 'imageUri': 'gcr.io/my-project/my-dist-training:latest' }, 'workerType': 'n1-standard-8', 'workerCount': 8, 'workerConfig': { 'imageUri': 'gcr.io/my-project/my-dist-training:latest' }, 'args': [ '--examples', placeholders.InputUriPlaceholder('examples'), '--n-steps', placeholders.InputValuePlaceholder('n_step'), '--output-location', placeholders.OutputUriPlaceholder('model') ] }, inputs={'examples': example_gen.outputs['examples']}, outputs={'model': Model}, parameters={'n_step': 100} ) ``` Args: name: name of the component. This is needed to construct the component spec and component class dynamically as well. project_id: the GCP project under which the AIP training job will be running. region: GCE region where the AIP training job will be running. job_id: the unique ID of the job. Default to 'tfx_%Y%m%d%H%M%S' image_uri: the GCR location of the container image, which will be used to execute the training program. If the same field is specified in training_input, the latter overrides image_uri. args: command line arguments that will be passed into the training program. Users can use placeholder semantics as in tfx.dsl.component.experimental.container_component to wire the args with component inputs/outputs/parameters. scale_tier: Cloud ML resource requested by the job. See https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#ScaleTier training_input: full training job spec. This field overrides other specifications if applicable. This field follows the [TrainingInput](https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#traininginput) schema. labels: user-specified label attached to the job. inputs: the dict of component inputs. outputs: the dict of component outputs. parameters: the dict of component parameters, aka, execution properties. Returns: A component instance that represents the AIP job in the DSL. Raises: ValueError: when image_uri is missing and masterConfig is not specified in training_input, or when region is missing and training_input does not provide region either. TypeError: when non-primitive parameters are specified. """ training_input = training_input or {} if scale_tier and not training_input.get('scale_tier'): training_input['scaleTier'] = scale_tier if not training_input.get('masterConfig'): # If no replica config is specified, create a default one. if not image_uri: raise ValueError('image_uri is required when masterConfig is not ' 'explicitly specified in training_input.') training_input['masterConfig'] = {'imageUri': image_uri} # Note: A custom entrypoint can be set to training_input['masterConfig'] # through key 'container_command'. training_input['args'] = args if not training_input.get('region'): if not region: raise ValueError('region is required when it is not set in ' 'training_input.') training_input['region'] = region training_job = { 'training_input': training_input, ai_platform_training_executor.LABELS_CONFIG_KEY: labels, } # Squash training_input, project, job_id, and labels into an exec property # namely 'aip_training_config'. aip_training_config = { ai_platform_training_executor.PROJECT_CONFIG_KEY: project_id, ai_platform_training_executor.TRAINING_JOB_CONFIG_KEY: training_job, ai_platform_training_executor.JOB_ID_CONFIG_KEY: job_id, ai_platform_training_executor.LABELS_CONFIG_KEY: labels, } aip_training_config_str = json_utils.dumps(aip_training_config) # Construct the component spec. if inputs is None: inputs = {} if outputs is None: outputs = {} if parameters is None: parameters = {} input_channel_parameters = {} output_channel_parameters = {} output_channels = {} execution_parameters = { ai_platform_training_executor.CONFIG_KEY: component_spec.ExecutionParameter(type=str) } for input_name, single_channel in inputs.items(): # Infer the type of input channels based on the channels passed in. # TODO(b/155804245) Sanitize the names so that they're valid python names input_channel_parameters[input_name] = ( component_spec.ChannelParameter(type=single_channel.type)) for output_name, channel_type in outputs.items(): # TODO(b/155804245) Sanitize the names so that they're valid python names output_channel_parameters[output_name] = ( component_spec.ChannelParameter(type=channel_type)) artifact = channel_type() channel = channel_utils.as_channel([artifact]) output_channels[output_name] = channel # TODO(jxzheng): Support RuntimeParameter as parameters. for param_name, single_parameter in parameters.items(): # Infer the type of parameters based on the parameters passed in. # TODO(b/155804245) Sanitize the names so that they're valid python names if not isinstance(single_parameter, (int, float, str, bytes)): raise TypeError( 'Parameter can only be int/float/str/bytes, got {}'.format( type(single_parameter))) execution_parameters[param_name] = ( component_spec.ExecutionParameter(type=type(single_parameter))) default_init_args = { **inputs, **output_channels, **parameters, ai_platform_training_executor.CONFIG_KEY: aip_training_config_str } tfx_component_class = component_utils.create_tfx_component_class( name=name, tfx_executor_spec=executor_spec.ExecutorClassSpec( ai_platform_training_executor.AiPlatformTrainingExecutor), input_channel_parameters=input_channel_parameters, output_channel_parameters=output_channel_parameters, execution_parameters=execution_parameters, default_init_args=default_init_args) return tfx_component_class()
Creates a pipeline step that launches a AIP training job. The generated TFX component will have a component spec specified dynamically, through inputs/outputs/parameters in the following format: - inputs: A mapping from input name to the upstream channel connected. The artifact type of the channel will be automatically inferred. - outputs: A mapping from output name to the associated artifact type. - parameters: A mapping from execution property names to its associated value. Only primitive typed values are supported. Note that RuntimeParameter is not supported yet. For example: ``` create_ai_platform_training( ... inputs: { # Assuming there is an upstream node example_gen, with an output # 'examples' of the type Examples. 'examples': example_gen.outputs['examples'], }, outputs: { 'model': standard_artifacts.Model, }, parameters: { 'n_steps': 100, 'optimizer': 'sgd', } ... ) ``` will generate a component instance with a component spec equivalent to: ``` class MyComponentSpec(ComponentSpec): INPUTS = { 'examples': ChannelParameter(type=standard_artifacts.Examples) } OUTPUTS = { 'model': ChannelParameter(type=standard_artifacts.Model) } PARAMETERS = { 'n_steps': ExecutionParameter(type=int), 'optimizer': ExecutionParameter(type=str) } ``` with its input 'examples' is connected to the example_gen output, and execution properties specified as 100 and 'sgd' respectively. Example usage of the component: ``` # A single node training job. my_train = create_ai_platform_training( name='my_training_step', project_id='my-project', region='us-central1', image_uri='gcr.io/my-project/caip-training-test:latest', 'args': [ '--examples', placeholders.InputUriPlaceholder('examples'), '--n-steps', placeholders.InputValuePlaceholder('n_step'), '--output-location', placeholders.OutputUriPlaceholder('model') ] scale_tier='BASIC_GPU', inputs={'examples': example_gen.outputs['examples']}, outputs={ 'model': standard_artifacts.Model }, parameters={'n_step': 100} ) # More complex setting can be expressed by providing training_input # directly. my_distributed_train = create_ai_platform_training( name='my_training_step', project_id='my-project', training_input={ 'scaleTier': 'CUSTOM', 'region': 'us-central1', 'masterType': 'n1-standard-8', 'masterConfig': { 'imageUri': 'gcr.io/my-project/my-dist-training:latest' }, 'workerType': 'n1-standard-8', 'workerCount': 8, 'workerConfig': { 'imageUri': 'gcr.io/my-project/my-dist-training:latest' }, 'args': [ '--examples', placeholders.InputUriPlaceholder('examples'), '--n-steps', placeholders.InputValuePlaceholder('n_step'), '--output-location', placeholders.OutputUriPlaceholder('model') ] }, inputs={'examples': example_gen.outputs['examples']}, outputs={'model': Model}, parameters={'n_step': 100} ) ``` Args: name: name of the component. This is needed to construct the component spec and component class dynamically as well. project_id: the GCP project under which the AIP training job will be running. region: GCE region where the AIP training job will be running. job_id: the unique ID of the job. Default to 'tfx_%Y%m%d%H%M%S' image_uri: the GCR location of the container image, which will be used to execute the training program. If the same field is specified in training_input, the latter overrides image_uri. args: command line arguments that will be passed into the training program. Users can use placeholder semantics as in tfx.dsl.component.experimental.container_component to wire the args with component inputs/outputs/parameters. scale_tier: Cloud ML resource requested by the job. See https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#ScaleTier training_input: full training job spec. This field overrides other specifications if applicable. This field follows the [TrainingInput](https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#traininginput) schema. labels: user-specified label attached to the job. inputs: the dict of component inputs. outputs: the dict of component outputs. parameters: the dict of component parameters, aka, execution properties. Returns: A component instance that represents the AIP job in the DSL. Raises: ValueError: when image_uri is missing and masterConfig is not specified in training_input, or when region is missing and training_input does not provide region either. TypeError: when non-primitive parameters are specified.
166,202
import datetime import time from absl import logging from google.cloud.aiplatform import pipeline_jobs from google.cloud.aiplatform_v1.types import pipeline_state _PIPELINE_COMPLETE_STATES = frozenset([ pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED, pipeline_state.PipelineState.PIPELINE_STATE_FAILED, pipeline_state.PipelineState.PIPELINE_STATE_CANCELLED, pipeline_state.PipelineState.PIPELINE_STATE_PAUSED, ]) The provided code snippet includes necessary dependencies for implementing the `poll_job_status` function. Write a Python function `def poll_job_status(job_id: str, timeout: datetime.timedelta, polling_interval_secs: int)` to solve the following problem: Checks the status of the job. NOTE: aiplatform.init() should be already called. Args: job_id: The relative ID of the pipeline job. timeout: Timeout duration for the job execution. polling_interval_secs: Interval to check the job status. Raises: RuntimeError: On (1) unexpected response from service; or (2) on unexpected job status; or (2) timed out waiting for finishing. Here is the function: def poll_job_status(job_id: str, timeout: datetime.timedelta, polling_interval_secs: int): """Checks the status of the job. NOTE: aiplatform.init() should be already called. Args: job_id: The relative ID of the pipeline job. timeout: Timeout duration for the job execution. polling_interval_secs: Interval to check the job status. Raises: RuntimeError: On (1) unexpected response from service; or (2) on unexpected job status; or (2) timed out waiting for finishing. """ deadline = datetime.datetime.now() + timeout while datetime.datetime.now() < deadline: time.sleep(polling_interval_secs) job = pipeline_jobs.PipelineJob.get(resource_name=job_id) # '.state' is synced everytime we access the property. So it can change # between comparisons. We have to make a copy to compare it multiple times. job_state = job.state if (job_state == pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED): logging.info('Job succeeded: %s', job) return elif job_state in _PIPELINE_COMPLETE_STATES: raise RuntimeError('Job is in an unexpected state: %s' % job_state) raise RuntimeError('Timed out waiting for job to finish.')
Checks the status of the job. NOTE: aiplatform.init() should be already called. Args: job_id: The relative ID of the pipeline job. timeout: Timeout duration for the job execution. polling_interval_secs: Interval to check the job status. Raises: RuntimeError: On (1) unexpected response from service; or (2) on unexpected job status; or (2) timed out waiting for finishing.
166,203
from tfx.dsl.components.base import base_node from tfx.orchestration.kubeflow.decorators import FinalStatusStr class FinalStatusStr(str): """FinalStatusStr: is the type for parameter receiving PipelineTaskFinalStatus. Vertex AI backend passes in jsonlized string of kfp.pipeline_spec.pipeline_spec_pb2.PipelineTaskFinalStatus. This is example usage of FinalStatusStr definition: ``` exit_handler = exit_handler_component( final_status=tfx.dsl.experimental.FinalStatusStr()) ``` """ pass The provided code snippet includes necessary dependencies for implementing the `replace_exec_properties` function. Write a Python function `def replace_exec_properties(component: base_node.BaseNode) -> None` to solve the following problem: Replaces TFX placeholders in execution properties with KFP placeholders. Here is the function: def replace_exec_properties(component: base_node.BaseNode) -> None: """Replaces TFX placeholders in execution properties with KFP placeholders.""" keys = list(component.exec_properties.keys()) for key in keys: exec_property = component.exec_properties[key] if isinstance(exec_property, FinalStatusStr): component.exec_properties[key] = '{{workflow.status}}'
Replaces TFX placeholders in execution properties with KFP placeholders.
166,204
from tfx.dsl.components.base import base_node from tfx.orchestration.kubeflow.decorators import FinalStatusStr The provided code snippet includes necessary dependencies for implementing the `fix_brackets` function. Write a Python function `def fix_brackets(placeholder: str) -> str` to solve the following problem: Fix the imbalanced brackets in placeholder. When ptype is not null, regex matching might grab a placeholder with } missing. This function fix the missing bracket. Args: placeholder: string placeholder of RuntimeParameter Returns: Placeholder with re-balanced brackets. Raises: RuntimeError: if left brackets are less than right brackets. Here is the function: def fix_brackets(placeholder: str) -> str: """Fix the imbalanced brackets in placeholder. When ptype is not null, regex matching might grab a placeholder with } missing. This function fix the missing bracket. Args: placeholder: string placeholder of RuntimeParameter Returns: Placeholder with re-balanced brackets. Raises: RuntimeError: if left brackets are less than right brackets. """ lcount = placeholder.count('{') rcount = placeholder.count('}') if lcount < rcount: raise RuntimeError( 'Unexpected redundant left brackets found in {}'.format(placeholder)) else: patch = ''.join(['}'] * (lcount - rcount)) return placeholder + patch
Fix the imbalanced brackets in placeholder. When ptype is not null, regex matching might grab a placeholder with } missing. This function fix the missing bracket. Args: placeholder: string placeholder of RuntimeParameter Returns: Placeholder with re-balanced brackets. Raises: RuntimeError: if left brackets are less than right brackets.
166,205
from typing import Dict, List, Set from absl import logging from kfp import dsl from kubernetes import client as k8s_client from tfx.dsl.components.base import base_node as tfx_base_node from tfx.orchestration import data_types from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration.kubeflow.proto import kubeflow_pb2 from tfx.proto.orchestration import pipeline_pb2 from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_encode_runtime_parameter` function. Write a Python function `def _encode_runtime_parameter(param: data_types.RuntimeParameter) -> str` to solve the following problem: Encode a runtime parameter into a placeholder for value substitution. Here is the function: def _encode_runtime_parameter(param: data_types.RuntimeParameter) -> str: """Encode a runtime parameter into a placeholder for value substitution.""" if param.ptype is int: type_enum = pipeline_pb2.RuntimeParameter.INT elif param.ptype is float: type_enum = pipeline_pb2.RuntimeParameter.DOUBLE else: type_enum = pipeline_pb2.RuntimeParameter.STRING type_str = pipeline_pb2.RuntimeParameter.Type.Name(type_enum) return f'{param.name}={type_str}:{str(dsl.PipelineParam(name=param.name))}'
Encode a runtime parameter into a placeholder for value substitution.
166,206
from typing import Dict, List, Set from absl import logging from kfp import dsl from kubernetes import client as k8s_client from tfx.dsl.components.base import base_node as tfx_base_node from tfx.orchestration import data_types from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration.kubeflow.proto import kubeflow_pb2 from tfx.proto.orchestration import pipeline_pb2 from google.protobuf import json_format The provided code snippet includes necessary dependencies for implementing the `_replace_placeholder` function. Write a Python function `def _replace_placeholder(component: tfx_base_node.BaseNode) -> None` to solve the following problem: Replaces the RuntimeParameter placeholders with kfp.dsl.PipelineParam. Here is the function: def _replace_placeholder(component: tfx_base_node.BaseNode) -> None: """Replaces the RuntimeParameter placeholders with kfp.dsl.PipelineParam.""" keys = list(component.exec_properties.keys()) for key in keys: exec_property = component.exec_properties[key] if not isinstance(exec_property, data_types.RuntimeParameter): continue component.exec_properties[key] = str( dsl.PipelineParam(name=exec_property.name))
Replaces the RuntimeParameter placeholders with kfp.dsl.PipelineParam.
166,207
import types from typing import Any, Callable from tfx.dsl.component.experimental.decorators import component def component(func: types.FunctionType, /) -> BaseFunctionalComponentFactory: ... def component( *, component_annotation: Optional[ type[system_executions.SystemExecution] ] = None, use_beam: bool = False, ) -> Callable[[types.FunctionType], BaseFunctionalComponentFactory]: ... def component( func: Optional[types.FunctionType] = None, /, *, component_annotation: Optional[ Type[system_executions.SystemExecution] ] = None, use_beam: bool = False, ) -> Union[ BaseFunctionalComponentFactory, Callable[[types.FunctionType], BaseFunctionalComponentFactory], ]: """Decorator: creates a component from a typehint-annotated Python function. This decorator creates a component based on typehint annotations specified for the arguments and return value for a Python function. The decorator can be supplied with a parameter `component_annotation` to specify the annotation for this component decorator. This annotation hints which system execution type this python function-based component belongs to. Specifically, function arguments can be annotated with the following types and associated semantics: * `Parameter[T]` where `T` is `int`, `float`, `str`, or `bool`: indicates that a primitive type execution parameter, whose value is known at pipeline construction time, will be passed for this argument. These parameters will be recorded in ML Metadata as part of the component's execution record. Can be an optional argument. * `int`, `float`, `str`, `bytes`, `bool`, `Dict`, `List`: indicates that a primitive type value will be passed for this argument. This value is tracked as an `Integer`, `Float`, `String`, `Bytes`, `Boolean` or `JsonValue` artifact (see `tfx.types.standard_artifacts`) whose value is read and passed into the given Python component function. Can be an optional argument. * `InputArtifact[ArtifactType]`: indicates that an input artifact object of type `ArtifactType` (deriving from `tfx.types.Artifact`) will be passed for this argument. This artifact is intended to be consumed as an input by this component (possibly reading from the path specified by its `.uri`). Can be an optional argument by specifying a default value of `None`. * `OutputArtifact[ArtifactType]`: indicates that an output artifact object of type `ArtifactType` (deriving from `tfx.types.Artifact`) will be passed for this argument. This artifact is intended to be emitted as an output by this component (and written to the path specified by its `.uri`). Cannot be an optional argument. The return value typehint should be either empty or `None`, in the case of a component function that has no return values, or a `TypedDict` of primitive value types (`int`, `float`, `str`, `bytes`, `bool`, `dict` or `list`; or `Optional[T]`, where T is a primitive type value, in which case `None` can be returned), to indicate that the return value is a dictionary with specified keys and value types. Note that output artifacts should not be included in the return value typehint; they should be included as `OutputArtifact` annotations in the function inputs, as described above. The function to which this decorator is applied must be at the top level of its Python module (it may not be defined within nested classes or function closures). This is example usage of component definition using this decorator: from tfx import v1 as tfx InputArtifact = tfx.dsl.components.InputArtifact OutputArtifact = tfx.dsl.components.OutputArtifact Parameter = tfx.dsl.components.Parameter Examples = tfx.types.standard_artifacts.Examples Model = tfx.types.standard_artifacts.Model class MyOutput(TypedDict): loss: float accuracy: float def MyTrainerComponent( training_data: InputArtifact[Examples], model: OutputArtifact[Model], dropout_hyperparameter: float, num_iterations: Parameter[int] = 10 ) -> MyOutput: '''My simple trainer component.''' records = read_examples(training_data.uri) model_obj = train_model(records, num_iterations, dropout_hyperparameter) model_obj.write_to(model.uri) return { 'loss': model_obj.loss, 'accuracy': model_obj.accuracy } # Example usage in a pipeline graph definition: # ... trainer = MyTrainerComponent( training_data=example_gen.outputs['examples'], dropout_hyperparameter=other_component.outputs['dropout'], num_iterations=1000) pusher = Pusher(model=trainer.outputs['model']) # ... When the parameter `component_annotation` is not supplied, the default value is None. This is another example usage with `component_annotation` = None: def MyTrainerComponent( training_data: InputArtifact[standard_artifacts.Examples], model: OutputArtifact[standard_artifacts.Model], dropout_hyperparameter: float, num_iterations: Parameter[int] = 10 ) -> Output: '''My simple trainer component.''' records = read_examples(training_data.uri) model_obj = train_model(records, num_iterations, dropout_hyperparameter) model_obj.write_to(model.uri) return { 'loss': model_obj.loss, 'accuracy': model_obj.accuracy } When the parameter `use_beam` is True, one of the parameters of the decorated function type-annotated by BeamComponentParameter[beam.Pipeline] and the default value can only be None. It will be replaced by a beam Pipeline made with the tfx pipeline's beam_pipeline_args that's shared with other beam-based components: def DataProcessingComponent( input_examples: InputArtifact[standard_artifacts.Examples], output_examples: OutputArtifact[standard_artifacts.Examples], beam_pipeline: BeamComponentParameter[beam.Pipeline] = None, ) -> None: '''My simple trainer component.''' records = read_examples(training_data.uri) with beam_pipeline as p: ... Experimental: no backwards compatibility guarantees. Args: func: Typehint-annotated component executor function. component_annotation: used to annotate the python function-based component. It is a subclass of SystemExecution from third_party/py/tfx/types/system_executions.py; it can be None. use_beam: Whether to create a component that is a subclass of BaseBeamComponent. This allows a beam.Pipeline to be made with tfx-pipeline-wise beam_pipeline_args. Returns: An object that: 1. you can call like the initializer of a subclass of `base_component.BaseComponent` (or `base_component.BaseBeamComponent`). 2. has a test_call() member function for unit testing the inner implementation of the component. Today, the returned object is literally a subclass of BaseComponent, so it can be used as a `Type` e.g. in isinstance() checks. But you must not rely on this, as we reserve the right to reserve a different kind of object in future, which _only_ satisfies the two criteria (1.) and (2.) above without being a `Type` itself. Raises: EnvironmentError: if the current Python interpreter is not Python 3. """ if func is None: # Python decorators with arguments in parentheses result in two function # calls. The first function call supplies the kwargs and the second supplies # the decorated function. Here we forward the kwargs to the second call. return functools.partial( component, component_annotation=component_annotation, use_beam=use_beam, ) utils.assert_is_top_level_func(func) (inputs, outputs, parameters, arg_formats, arg_defaults, returned_values, json_typehints, return_json_typehints) = ( function_parser.parse_typehint_component_function(func)) if use_beam and list(parameters.values()).count(_BeamPipeline) != 1: raise ValueError('The decorated function must have one and only one ' 'optional parameter of type ' 'BeamComponentParameter[beam.Pipeline] with ' 'default value None when use_beam=True.') component_class = utils.create_component_class( func=func, arg_defaults=arg_defaults, arg_formats=arg_formats, base_executor_class=( _FunctionBeamExecutor if use_beam else _FunctionExecutor ), executor_spec_class=( executor_spec.BeamExecutorSpec if use_beam else executor_spec.ExecutorClassSpec ), base_component_class=( _SimpleBeamComponent if use_beam else _SimpleComponent ), inputs=inputs, outputs=outputs, parameters=parameters, type_annotation=component_annotation, json_compatible_inputs=json_typehints, json_compatible_outputs=return_json_typehints, return_values_optionality=returned_values, ) return typing.cast(BaseFunctionalComponentFactory, component_class) The provided code snippet includes necessary dependencies for implementing the `exit_handler` function. Write a Python function `def exit_handler(func: types.FunctionType) -> Callable[..., Any]` to solve the following problem: Creates an exit handler from a typehint-annotated Python function. This decorator creates an exit handler wrapping the component typehint annotation - typehint annotations specified for the arguments and return value for a Python function. Exit handler is to annotate the component for post actions of a pipeline, only supported in Vertex AI. Specifically, function arguments can be annotated with the following types and associated semantics supported in component. In order to get in the final status of dependent pipeline, parameter should be defined as Parameter[str], passing in FinalStatusStr type when initializing the component. This is example usage of component definition using this decorator: ``` from tfx import v1 as tfx @tfx.orchestration.experimental.exit_handler def MyExitHandlerComponent(final_status: tfx.dsl.components.Parameter[str]): # parse the final status pipeline_task_status = pipeline_pb2.PipelineTaskFinalStatus() proto_utils.json_to_proto(final_status, pipeline_task_status) print(pipeline_task_status) ``` Example usage in a Vertex AI graph definition: ``` exit_handler = exit_handler_component( final_status=tfx.dsl.experimental.FinalStatusStr()) dsl_pipeline = tfx.dsl.Pipeline(...) runner = tfx.orchestration.experimental.KubeflowV2DagRunner(...) runner.set_exit_handler([exit_handler]) runner.run(pipeline=dsl_pipeline) ``` Experimental: no backwards compatibility guarantees. Args: func: Typehint-annotated component executor function. Returns: `base_component.BaseComponent` subclass for the given component executor function. Here is the function: def exit_handler(func: types.FunctionType) -> Callable[..., Any]: """Creates an exit handler from a typehint-annotated Python function. This decorator creates an exit handler wrapping the component typehint annotation - typehint annotations specified for the arguments and return value for a Python function. Exit handler is to annotate the component for post actions of a pipeline, only supported in Vertex AI. Specifically, function arguments can be annotated with the following types and associated semantics supported in component. In order to get in the final status of dependent pipeline, parameter should be defined as Parameter[str], passing in FinalStatusStr type when initializing the component. This is example usage of component definition using this decorator: ``` from tfx import v1 as tfx @tfx.orchestration.experimental.exit_handler def MyExitHandlerComponent(final_status: tfx.dsl.components.Parameter[str]): # parse the final status pipeline_task_status = pipeline_pb2.PipelineTaskFinalStatus() proto_utils.json_to_proto(final_status, pipeline_task_status) print(pipeline_task_status) ``` Example usage in a Vertex AI graph definition: ``` exit_handler = exit_handler_component( final_status=tfx.dsl.experimental.FinalStatusStr()) dsl_pipeline = tfx.dsl.Pipeline(...) runner = tfx.orchestration.experimental.KubeflowV2DagRunner(...) runner.set_exit_handler([exit_handler]) runner.run(pipeline=dsl_pipeline) ``` Experimental: no backwards compatibility guarantees. Args: func: Typehint-annotated component executor function. Returns: `base_component.BaseComponent` subclass for the given component executor function. """ return component(func)
Creates an exit handler from a typehint-annotated Python function. This decorator creates an exit handler wrapping the component typehint annotation - typehint annotations specified for the arguments and return value for a Python function. Exit handler is to annotate the component for post actions of a pipeline, only supported in Vertex AI. Specifically, function arguments can be annotated with the following types and associated semantics supported in component. In order to get in the final status of dependent pipeline, parameter should be defined as Parameter[str], passing in FinalStatusStr type when initializing the component. This is example usage of component definition using this decorator: ``` from tfx import v1 as tfx @tfx.orchestration.experimental.exit_handler def MyExitHandlerComponent(final_status: tfx.dsl.components.Parameter[str]): # parse the final status pipeline_task_status = pipeline_pb2.PipelineTaskFinalStatus() proto_utils.json_to_proto(final_status, pipeline_task_status) print(pipeline_task_status) ``` Example usage in a Vertex AI graph definition: ``` exit_handler = exit_handler_component( final_status=tfx.dsl.experimental.FinalStatusStr()) dsl_pipeline = tfx.dsl.Pipeline(...) runner = tfx.orchestration.experimental.KubeflowV2DagRunner(...) runner.set_exit_handler([exit_handler]) runner.run(pipeline=dsl_pipeline) ``` Experimental: no backwards compatibility guarantees. Args: func: Typehint-annotated component executor function. Returns: `base_component.BaseComponent` subclass for the given component executor function.
166,208
import collections import copy import os from typing import Any, Callable, Dict, List, Optional, Type, cast, MutableMapping from absl import logging from kfp import compiler from kfp import dsl from kfp import gcp from kubernetes import client as k8s_client from tfx import version from tfx.dsl.compiler import compiler as tfx_compiler from tfx.dsl.components.base import base_component as tfx_base_component from tfx.dsl.components.base import base_node from tfx.orchestration import data_types from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration import tfx_runner from tfx.orchestration.config import pipeline_config from tfx.orchestration.kubeflow import base_component from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.proto import kubeflow_pb2 from tfx.orchestration.launcher import base_component_launcher from tfx.orchestration.launcher import in_process_component_launcher from tfx.orchestration.launcher import kubernetes_component_launcher from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import telemetry_utils OpFunc = Callable[[dsl.ContainerOp], dsl.ContainerOp] The provided code snippet includes necessary dependencies for implementing the `_mount_secret_op` function. Write a Python function `def _mount_secret_op(secret_name: str) -> OpFunc` to solve the following problem: Mounts all key-value pairs found in the named Kubernetes Secret. All key-value pairs in the Secret are mounted as environment variables. Args: secret_name: The name of the Secret resource. Returns: An OpFunc for mounting the Secret. Here is the function: def _mount_secret_op(secret_name: str) -> OpFunc: """Mounts all key-value pairs found in the named Kubernetes Secret. All key-value pairs in the Secret are mounted as environment variables. Args: secret_name: The name of the Secret resource. Returns: An OpFunc for mounting the Secret. """ def mount_secret(container_op: dsl.ContainerOp): secret_ref = k8s_client.V1ConfigMapEnvSource( name=secret_name, optional=True) container_op.container.add_env_from( k8s_client.V1EnvFromSource(secret_ref=secret_ref)) return mount_secret
Mounts all key-value pairs found in the named Kubernetes Secret. All key-value pairs in the Secret are mounted as environment variables. Args: secret_name: The name of the Secret resource. Returns: An OpFunc for mounting the Secret.
166,209
import collections import copy import os from typing import Any, Callable, Dict, List, Optional, Type, cast, MutableMapping from absl import logging from kfp import compiler from kfp import dsl from kfp import gcp from kubernetes import client as k8s_client from tfx import version from tfx.dsl.compiler import compiler as tfx_compiler from tfx.dsl.components.base import base_component as tfx_base_component from tfx.dsl.components.base import base_node from tfx.orchestration import data_types from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration import tfx_runner from tfx.orchestration.config import pipeline_config from tfx.orchestration.kubeflow import base_component from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.proto import kubeflow_pb2 from tfx.orchestration.launcher import base_component_launcher from tfx.orchestration.launcher import in_process_component_launcher from tfx.orchestration.launcher import kubernetes_component_launcher from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import telemetry_utils OpFunc = Callable[[dsl.ContainerOp], dsl.ContainerOp] _KUBEFLOW_GCP_SECRET_NAME = 'user-gcp-sa' def _mount_config_map_op(config_map_name: str) -> OpFunc: """Mounts all key-value pairs found in the named Kubernetes ConfigMap. All key-value pairs in the ConfigMap are mounted as environment variables. Args: config_map_name: The name of the ConfigMap resource. Returns: An OpFunc for mounting the ConfigMap. """ def mount_config_map(container_op: dsl.ContainerOp): config_map_ref = k8s_client.V1ConfigMapEnvSource( name=config_map_name, optional=True) container_op.container.add_env_from( k8s_client.V1EnvFromSource(config_map_ref=config_map_ref)) return mount_config_map The provided code snippet includes necessary dependencies for implementing the `get_default_pipeline_operator_funcs` function. Write a Python function `def get_default_pipeline_operator_funcs( use_gcp_sa: bool = False) -> List[OpFunc]` to solve the following problem: Returns a default list of pipeline operator functions. Args: use_gcp_sa: If true, mount a GCP service account secret to each pod, with the name _KUBEFLOW_GCP_SECRET_NAME. Returns: A list of functions with type OpFunc. Here is the function: def get_default_pipeline_operator_funcs( use_gcp_sa: bool = False) -> List[OpFunc]: """Returns a default list of pipeline operator functions. Args: use_gcp_sa: If true, mount a GCP service account secret to each pod, with the name _KUBEFLOW_GCP_SECRET_NAME. Returns: A list of functions with type OpFunc. """ # Enables authentication for GCP services if needed. gcp_secret_op = gcp.use_gcp_secret(_KUBEFLOW_GCP_SECRET_NAME) # Mounts configmap containing Metadata gRPC server configuration. mount_config_map_op = _mount_config_map_op('metadata-grpc-configmap') if use_gcp_sa: return [gcp_secret_op, mount_config_map_op] else: return [mount_config_map_op]
Returns a default list of pipeline operator functions. Args: use_gcp_sa: If true, mount a GCP service account secret to each pod, with the name _KUBEFLOW_GCP_SECRET_NAME. Returns: A list of functions with type OpFunc.
166,210
import collections import copy import os from typing import Any, Callable, Dict, List, Optional, Type, cast, MutableMapping from absl import logging from kfp import compiler from kfp import dsl from kfp import gcp from kubernetes import client as k8s_client from tfx import version from tfx.dsl.compiler import compiler as tfx_compiler from tfx.dsl.components.base import base_component as tfx_base_component from tfx.dsl.components.base import base_node from tfx.orchestration import data_types from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration import tfx_runner from tfx.orchestration.config import pipeline_config from tfx.orchestration.kubeflow import base_component from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.proto import kubeflow_pb2 from tfx.orchestration.launcher import base_component_launcher from tfx.orchestration.launcher import in_process_component_launcher from tfx.orchestration.launcher import kubernetes_component_launcher from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import telemetry_utils The provided code snippet includes necessary dependencies for implementing the `get_default_kubeflow_metadata_config` function. Write a Python function `def get_default_kubeflow_metadata_config( ) -> kubeflow_pb2.KubeflowMetadataConfig` to solve the following problem: Returns the default metadata connection config for Kubeflow. Returns: A config proto that will be serialized as JSON and passed to the running container so the TFX component driver is able to communicate with MLMD in a Kubeflow cluster. Here is the function: def get_default_kubeflow_metadata_config( ) -> kubeflow_pb2.KubeflowMetadataConfig: """Returns the default metadata connection config for Kubeflow. Returns: A config proto that will be serialized as JSON and passed to the running container so the TFX component driver is able to communicate with MLMD in a Kubeflow cluster. """ # The default metadata configuration for a Kubeflow Pipelines cluster is # codified as a Kubernetes ConfigMap # https://github.com/kubeflow/pipelines/blob/master/manifests/kustomize/base/metadata/metadata-grpc-configmap.yaml config = kubeflow_pb2.KubeflowMetadataConfig() # The environment variable to use to obtain the Metadata gRPC service host in # the cluster that is backing Kubeflow Metadata. Note that the key in the # config map and therefore environment variable used, are lower-cased. config.grpc_config.grpc_service_host.environment_variable = 'METADATA_GRPC_SERVICE_HOST' # The environment variable to use to obtain the Metadata grpc service port in # the cluster that is backing Kubeflow Metadata. config.grpc_config.grpc_service_port.environment_variable = 'METADATA_GRPC_SERVICE_PORT' return config
Returns the default metadata connection config for Kubeflow. Returns: A config proto that will be serialized as JSON and passed to the running container so the TFX component driver is able to communicate with MLMD in a Kubeflow cluster.
166,211
import collections import copy import os from typing import Any, Callable, Dict, List, Optional, Type, cast, MutableMapping from absl import logging from kfp import compiler from kfp import dsl from kfp import gcp from kubernetes import client as k8s_client from tfx import version from tfx.dsl.compiler import compiler as tfx_compiler from tfx.dsl.components.base import base_component as tfx_base_component from tfx.dsl.components.base import base_node from tfx.orchestration import data_types from tfx.orchestration import pipeline as tfx_pipeline from tfx.orchestration import tfx_runner from tfx.orchestration.config import pipeline_config from tfx.orchestration.kubeflow import base_component from tfx.orchestration.kubeflow import utils from tfx.orchestration.kubeflow.proto import kubeflow_pb2 from tfx.orchestration.launcher import base_component_launcher from tfx.orchestration.launcher import in_process_component_launcher from tfx.orchestration.launcher import kubernetes_component_launcher from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import telemetry_utils The provided code snippet includes necessary dependencies for implementing the `get_default_pod_labels` function. Write a Python function `def get_default_pod_labels() -> Dict[str, str]` to solve the following problem: Returns the default pod label dict for Kubeflow. Here is the function: def get_default_pod_labels() -> Dict[str, str]: """Returns the default pod label dict for Kubeflow.""" # KFP default transformers add pod env: # https://github.com/kubeflow/pipelines/blob/0.1.32/sdk/python/kfp/compiler/_default_transformers.py result = { 'add-pod-env': 'true', telemetry_utils.LABEL_KFP_SDK_ENV: 'tfx' } return result
Returns the default pod label dict for Kubeflow.