id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,512 | import os
from absl import logging
from tfx import v1 as tfx
from tfx.experimental.templates.penguin.pipeline import configs
from tfx.experimental.templates.penguin.pipeline import pipeline
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
DATA_PATH = 'gs://{}/tfx-template/data/penguin/'.format(configs.GCS_BUCKET_NAME)
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Define a kubeflow pipeline.
Here is the function:
def run():
"""Define a kubeflow pipeline."""
# Metadata config. The defaults works work with the installation of
# KF Pipelines using Kubeflow. If installing KF Pipelines using the
# lightweight deployment option, you may need to override the defaults.
# If you use Kubeflow, metadata will be written to MySQL database inside
# Kubeflow cluster.
metadata_config = tfx.orchestration.experimental.get_default_kubeflow_metadata_config(
)
runner_config = tfx.orchestration.experimental.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
tfx_image=configs.PIPELINE_IMAGE)
pod_labels = {
'add-pod-env': 'true',
tfx.orchestration.experimental.LABEL_KFP_SDK_ENV: 'tfx-template'
}
tfx.orchestration.experimental.KubeflowDagRunner(
config=runner_config, pod_labels_to_attach=pod_labels
).run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# NOTE: Use `query` instead of `data_path` to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
# NOTE: Set the path of the customized schema if any.
# schema_path=generated_schema_path,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=tfx.proto.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=tfx.proto.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# NOTE: Provide GCP configs to use BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
)) | Define a kubeflow pipeline. |
166,513 | import os
from absl import logging
from tfx import v1 as tfx
from tfx.experimental.templates.penguin.pipeline import configs
from tfx.experimental.templates.penguin.pipeline import pipeline
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(OUTPUT_DIR, 'tfx_metadata', configs.PIPELINE_NAME,
'metadata.db')
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Define a pipeline.
Here is the function:
def run():
"""Define a pipeline."""
tfx.orchestration.LocalDagRunner().run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# NOTE: Use `query` instead of `data_path` to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
# NOTE: Set the path of the customized schema if any.
# schema_path=generated_schema_path,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=tfx.proto.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=tfx.proto.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# NOTE: Provide GCP configs to use BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
metadata_connection_config=tfx.orchestration.metadata
.sqlite_metadata_connection_config(METADATA_PATH))) | Define a pipeline. |
166,514 | import tensorflow_transform as tft
from tfx.experimental.templates.penguin.models import features
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# This function is the entry point for your feature engineering with
# TensorFlow Transform, using the TFX Transform component. In this example
# the feature engineering is very simple, only applying z-score scaling.
for key in features.FEATURE_KEYS:
outputs[features.transformed_name(key)] = tft.scale_to_z_score(inputs[key])
# Do not apply label transformation as it will result in wrong evaluation.
outputs[features.transformed_name(
features.LABEL_KEY)] = inputs[features.LABEL_KEY]
return outputs | tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
166,515 | from typing import List
from absl import logging
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx.experimental.templates.penguin.models import constants
from tfx.experimental.templates.penguin.models import features
from tfx_bsl.public import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
def _get_tf_examples_serving_signature(model, schema, tf_transform_output):
"""Returns a serving signature that accepts `tensorflow.Example`."""
if tf_transform_output is None: # Transform component is not used.
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def serve_tf_examples_fn(serialized_tf_example):
"""Returns the output to be used in the serving signature."""
raw_feature_spec = schema_utils.schema_as_feature_spec(
schema).feature_spec
# Remove label feature since these will not be present at serving time.
raw_feature_spec.pop(features.LABEL_KEY)
raw_features = tf.io.parse_example(serialized_tf_example,
raw_feature_spec)
logging.info('serve_features = %s', raw_features)
outputs = model(raw_features)
# TODO(b/154085620): Convert the predicted labels from the model using a
# reverse-lookup (opposite of transform.py).
return {'outputs': outputs}
else: # Transform component exists.
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_inference = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def serve_tf_examples_fn(serialized_tf_example):
"""Returns the output to be used in the serving signature."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
# Remove label feature since these will not be present at serving time.
raw_feature_spec.pop(features.LABEL_KEY)
raw_features = tf.io.parse_example(serialized_tf_example,
raw_feature_spec)
transformed_features = model.tft_layer_inference(raw_features)
logging.info('serve_transformed_features = %s', transformed_features)
outputs = model(transformed_features)
# TODO(b/154085620): Convert the predicted labels from the model using a
# reverse-lookup (opposite of transform.py).
return {'outputs': outputs}
return serve_tf_examples_fn
def _get_transform_features_signature(model, schema, tf_transform_output):
"""Returns a serving signature that applies tf.Transform to features."""
if tf_transform_output is None: # Transform component is not used.
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def transform_features_fn(serialized_tf_example):
"""Returns the transformed_features to be fed as input to evaluator."""
raw_feature_spec = schema_utils.schema_as_feature_spec(
schema).feature_spec
raw_features = tf.io.parse_example(serialized_tf_example,
raw_feature_spec)
logging.info('eval_features = %s', raw_features)
return raw_features
else: # Transform component exists.
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_eval = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def transform_features_fn(serialized_tf_example):
"""Returns the transformed_features to be fed as input to evaluator."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
raw_features = tf.io.parse_example(serialized_tf_example,
raw_feature_spec)
transformed_features = model.tft_layer_eval(raw_features)
logging.info('eval_transformed_features = %s', transformed_features)
return transformed_features
return transform_features_fn
def _input_fn(file_pattern: List[str],
data_accessor: tfx.components.DataAccessor,
schema: schema_pb2.Schema,
label: str,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: A schema proto of input data.
label: Name of the label.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(batch_size=batch_size, label_key=label),
schema).repeat()
def _build_keras_model(feature_list: List[str]) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying penguin data.
Args:
feature_list: List of feature names.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [keras.layers.Input(shape=(1,), name=f) for f in feature_list]
d = keras.layers.concatenate(inputs)
for _ in range(constants.NUM_LAYERS):
d = keras.layers.Dense(constants.HIDDEN_LAYER_UNITS, activation='relu')(d)
outputs = keras.layers.Dense(constants.OUTPUT_LAYER_UNITS)(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(constants.LEARNING_RATE),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem:
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
Here is the function:
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
if fn_args.transform_output is None: # Transform is not used.
tf_transform_output = None
schema = tfx.utils.parse_pbtxt_file(fn_args.schema_file,
schema_pb2.Schema())
feature_list = features.FEATURE_KEYS
label_key = features.LABEL_KEY
else:
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
schema = tf_transform_output.transformed_metadata.schema
feature_list = [features.transformed_name(f) for f in features.FEATURE_KEYS]
label_key = features.transformed_name(features.LABEL_KEY)
mirrored_strategy = tf.distribute.MirroredStrategy()
train_batch_size = (
constants.TRAIN_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
eval_batch_size = (
constants.EVAL_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=train_batch_size)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=eval_batch_size)
with mirrored_strategy.scope():
model = _build_keras_model(feature_list)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='epoch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_tf_examples_serving_signature(model, schema,
tf_transform_output),
'transform_features':
_get_transform_features_signature(model, schema, tf_transform_output),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. |
166,516 | import os
from absl import logging
from tfx import v1 as tfx
from tfx.experimental.templates.taxi.pipeline import configs
from tfx.experimental.templates.taxi.pipeline import pipeline
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
DATA_PATH = 'gs://{}/tfx-template/data/taxi/'.format(configs.GCS_BUCKET_NAME)
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Define a kubeflow pipeline.
Here is the function:
def run():
"""Define a kubeflow pipeline."""
# Metadata config. The defaults works work with the installation of
# KF Pipelines using Kubeflow. If installing KF Pipelines using the
# lightweight deployment option, you may need to override the defaults.
# If you use Kubeflow, metadata will be written to MySQL database inside
# Kubeflow cluster.
metadata_config = tfx.orchestration.experimental.get_default_kubeflow_metadata_config(
)
runner_config = tfx.orchestration.experimental.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
tfx_image=configs.PIPELINE_IMAGE)
pod_labels = {
'add-pod-env': 'true',
tfx.orchestration.experimental.LABEL_KFP_SDK_ENV: 'tfx-template'
}
tfx.orchestration.experimental.KubeflowDagRunner(
config=runner_config, pod_labels_to_attach=pod_labels
).run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# TODO(step 7): (Optional) Uncomment below to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
# TODO(step 5): (Optional) Set the path of the customized schema.
# schema_path=generated_schema_path,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=tfx.proto.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=tfx.proto.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# TODO(step 7): (Optional) Uncomment below to use provide GCP related
# config for BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs
# .BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
# TODO(step 8): (Optional) Uncomment below to use Dataflow.
# beam_pipeline_args=configs.DATAFLOW_BEAM_PIPELINE_ARGS,
# TODO(step 9): (Optional) Uncomment below to use Cloud AI Platform.
# ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS,
# TODO(step 9): (Optional) Uncomment below to use Cloud AI Platform.
# ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS,
)) | Define a kubeflow pipeline. |
166,517 | import os
from absl import logging
from tfx import v1 as tfx
from tfx.experimental.templates.taxi.pipeline import configs
from tfx.experimental.templates.taxi.pipeline import pipeline
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(OUTPUT_DIR, 'tfx_metadata', configs.PIPELINE_NAME,
'metadata.db')
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Define a local pipeline.
Here is the function:
def run():
"""Define a local pipeline."""
tfx.orchestration.LocalDagRunner().run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# TODO(step 7): (Optional) Uncomment here to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
# TODO(step 5): (Optional) Set the path of the customized schema.
# schema_path=generated_schema_path,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=tfx.proto.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=tfx.proto.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# TODO(step 7): (Optional) Uncomment here to use provide GCP related
# config for BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
metadata_connection_config=tfx.orchestration.metadata
.sqlite_metadata_connection_config(METADATA_PATH))) | Define a local pipeline. |
166,518 | import os
from absl import logging
from tfx.experimental.templates.taxi.pipeline import configs
from tfx.experimental.templates.taxi.pipeline import pipeline
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner
from tfx.proto import trainer_pb2
_PIPELINE_ROOT = os.path.join(_OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
_SERVING_MODEL_DIR = os.path.join(_PIPELINE_ROOT, 'serving_model')
_DATA_PATH = 'gs://{}/tfx-template/data/taxi/'.format(configs.GCS_BUCKET_NAME)
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run()` to solve the following problem:
Define a pipeline to be executed using Kubeflow V2 runner.
Here is the function:
def run():
"""Define a pipeline to be executed using Kubeflow V2 runner."""
runner_config = kubeflow_v2_dag_runner.KubeflowV2DagRunnerConfig(
default_image=configs.PIPELINE_IMAGE)
dsl_pipeline = pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=_PIPELINE_ROOT,
data_path=_DATA_PATH,
# TODO(step 7): (Optional) Uncomment here to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=_SERVING_MODEL_DIR,
# TODO(step 7): (Optional) Uncomment here to use provide GCP related
# config for BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
# TODO(step 8): (Optional) Uncomment below to use Dataflow.
# beam_pipeline_args=configs.DATAFLOW_BEAM_PIPELINE_ARGS,
# TODO(step 9): (Optional) Uncomment below to use Cloud AI Platform.
# ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS,
# TODO(step 9): (Optional) Uncomment below to use Cloud AI Platform.
# ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS,
)
runner = kubeflow_v2_dag_runner.KubeflowV2DagRunner(config=runner_config)
runner.run(pipeline=dsl_pipeline) | Define a pipeline to be executed using Kubeflow V2 runner. |
166,519 | from absl import logging
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.estimator_model import constants
from tfx_bsl.public import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
The provided code snippet includes necessary dependencies for implementing the `_gzip_reader_fn` function. Write a Python function `def _gzip_reader_fn(filenames)` to solve the following problem:
Small utility returning a record reader that can read gzip'ed files.
Here is the function:
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP') | Small utility returning a record reader that can read gzip'ed files. |
166,520 | from absl import logging
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.estimator_model import constants
from tfx_bsl.public import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
def _create_train_and_eval_spec(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=constants.TRAIN_BATCH_SIZE)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=constants.EVAL_BATCH_SIZE)
train_spec = tf_estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf_estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf_estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf_estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
hidden_units=constants.HIDDEN_UNITS, config=run_config)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args)` to solve the following problem:
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
Here is the function:
def run_fn(fn_args):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
schema = tfx.utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
train_and_eval_spec = _create_train_and_eval_spec(fn_args, schema)
# Train the model
logging.info('Training model.')
tf_estimator.train_and_evaluate(train_and_eval_spec['estimator'],
train_and_eval_spec['train_spec'],
train_and_eval_spec['eval_spec'])
logging.info('Training complete. Model written to %s',
fn_args.serving_model_dir)
# Export an eval savedmodel for TFMA
# NOTE: When trained in distributed training cluster, eval_savedmodel must be
# exported only by the chief worker.
logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=train_and_eval_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=train_and_eval_spec['eval_input_receiver_fn'])
logging.info('Exported eval_savedmodel to %s.', fn_args.eval_model_dir) | Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. |
166,521 | import tensorflow as tf
import tensorflow_transform as tft
from tfx.experimental.templates.taxi.models import features
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in features.DENSE_FLOAT_FEATURE_KEYS:
# If sparse make it dense, setting nan's to 0 or '', and apply zscore.
outputs[features.transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in features.VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[features.transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=features.VOCAB_SIZE,
num_oov_buckets=features.OOV_SIZE)
for key, num_buckets in zip(features.BUCKET_FEATURE_KEYS,
features.BUCKET_FEATURE_BUCKET_COUNT):
outputs[features.transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
num_buckets)
for key in features.CATEGORICAL_FEATURE_KEYS:
outputs[features.transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[features.FARE_KEY])
tips = _fill_in_missing(inputs[features.LABEL_KEY])
outputs[features.transformed_name(features.LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs | tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
166,522 | from absl import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.keras_model import constants
from tfx_bsl.public import tfxio
def _get_tf_examples_serving_signature(model, tf_transform_output):
"""Returns a serving signature that accepts `tensorflow.Example`."""
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_inference = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def serve_tf_examples_fn(serialized_tf_example):
"""Returns the output to be used in the serving signature."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
# Remove label feature since these will not be present at serving time.
raw_feature_spec.pop(features.LABEL_KEY)
raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
transformed_features = model.tft_layer_inference(raw_features)
logging.info('serve_transformed_features = %s', transformed_features)
outputs = model(transformed_features)
# TODO(b/154085620): Convert the predicted labels from the model using a
# reverse-lookup (opposite of transform.py).
return {'outputs': outputs}
return serve_tf_examples_fn
def _get_transform_features_signature(model, tf_transform_output):
"""Returns a serving signature that applies tf.Transform to features."""
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_eval = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def transform_features_fn(serialized_tf_example):
"""Returns the transformed_features to be fed as input to evaluator."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
transformed_features = model.tft_layer_eval(raw_features)
logging.info('eval_transformed_features = %s', transformed_features)
return transformed_features
return transform_features_fn
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size,
label_key=features.transformed_name(features.LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units, learning_rate):
"""Creates a DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
learning_rate: [float], learning rate of the Adam optimizer.
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in features.transformed_names(features.DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=features.VOCAB_SIZE + features.OOV_SIZE,
default_value=0)
for key in features.transformed_names(features.VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
features.transformed_names(features.BUCKET_FEATURE_KEYS),
features.BUCKET_FEATURE_BUCKET_COUNT)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
features.transformed_names(features.CATEGORICAL_FEATURE_KEYS),
features.CATEGORICAL_FEATURE_MAX_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/140320729) Replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units,
learning_rate=learning_rate)
return model
The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args)` to solve the following problem:
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
Here is the function:
def run_fn(fn_args):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, constants.TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, constants.EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
hidden_units=constants.HIDDEN_UNITS,
learning_rate=constants.LEARNING_RATE)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='epoch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_tf_examples_serving_signature(model, tf_transform_output),
'transform_features':
_get_transform_features_signature(model, tf_transform_output),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. |
166,523 | from typing import List
The provided code snippet includes necessary dependencies for implementing the `vocabulary_name` function. Write a Python function `def vocabulary_name(key: str) -> str` to solve the following problem:
Generate the name of the vocabulary feature from original name.
Here is the function:
def vocabulary_name(key: str) -> str:
"""Generate the name of the vocabulary feature from original name."""
return key + '_vocab' | Generate the name of the vocabulary feature from original name. |
166,524 | import collections
from typing import Dict, List, Mapping, Set
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
def _get_graph_def(filepath: str) -> tf.compat.v1.GraphDef:
graph_def = tf.compat.v1.GraphDef()
with fileio.open(filepath, 'rb') as f:
graph_def.ParseFromString(f.read())
return graph_def
The provided code snippet includes necessary dependencies for implementing the `get_graph_name_to_graph_def` function. Write a Python function `def get_graph_name_to_graph_def( graph_name_to_filepath: Mapping[str, str] ) -> Dict[str, tf.compat.v1.GraphDef]` to solve the following problem:
Gets the `GraphDef` protos from files. Args: graph_name_to_filepath: A mapping from graph names to filepaths. Each filepath points to a `GraphDef` proto in binary. Returns: A mapping from graph names to `GraphDef` protos.
Here is the function:
def get_graph_name_to_graph_def(
graph_name_to_filepath: Mapping[str, str]
) -> Dict[str, tf.compat.v1.GraphDef]:
"""Gets the `GraphDef` protos from files.
Args:
graph_name_to_filepath: A mapping from graph names to filepaths. Each
filepath points to a `GraphDef` proto in binary.
Returns:
A mapping from graph names to `GraphDef` protos.
"""
graph_name_to_graph_def = {
graph_name: _get_graph_def(filepath)
for graph_name, filepath in graph_name_to_filepath.items()
}
return graph_name_to_graph_def | Gets the `GraphDef` protos from files. Args: graph_name_to_filepath: A mapping from graph names to filepaths. Each filepath points to a `GraphDef` proto in binary. Returns: A mapping from graph names to `GraphDef` protos. |
166,525 | import collections
from typing import Dict, List, Mapping, Set
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
def _partition_one_graph(
graph_def: tf.compat.v1.GraphDef,
output_names: List[str]) -> List[execution_spec.ExecutionSpec]:
"""Partitions one graph.
Args:
graph_def: A `GraphDef` proto for that graph.
output_names: A list of graph's output node names.
Returns:
A list of ExecutionSpecs.
"""
graph = _get_graph(graph_def)
node_name_to_node_def = _get_node_name_to_node_def(graph_def)
remote_op_to_immediate_dep = _get_remote_op_to_immediate_dep(
node_name_to_node_def)
specs = _get_execution_specs(graph_def, output_names, graph,
node_name_to_node_def,
remote_op_to_immediate_dep)
_modify_execution_specs_for_input_validity(specs)
return specs
The provided code snippet includes necessary dependencies for implementing the `partition_all_graphs` function. Write a Python function `def partition_all_graphs( graph_name_to_graph_def: Mapping[str, tf.compat.v1.GraphDef], graph_name_to_output_names: Mapping[str, List[str]] ) -> Dict[str, List[execution_spec.ExecutionSpec]]` to solve the following problem:
Partitions all the graphs. For each graph, the partitioning algorithm takes in the graph's `GraphDef` proto and output names, partitions the graph, and returns a list of ExecutionSpecs. Later, the beam_pipeline library can take in the ExecutionSpecs and execute the partitioned subgraphs. Args: graph_name_to_graph_def: A mapping from graph names to `GraphDef` protos. graph_name_to_output_names: A mapping from graph names to lists of their output node names. Returns: A mapping from graph names to a list of ExecutionSpecs, where the order of the list represents the order of execution.
Here is the function:
def partition_all_graphs(
graph_name_to_graph_def: Mapping[str, tf.compat.v1.GraphDef],
graph_name_to_output_names: Mapping[str, List[str]]
) -> Dict[str, List[execution_spec.ExecutionSpec]]:
"""Partitions all the graphs.
For each graph, the partitioning algorithm takes in the graph's `GraphDef`
proto and output names, partitions the graph, and returns a list of
ExecutionSpecs. Later, the beam_pipeline library can take in the
ExecutionSpecs and execute the partitioned subgraphs.
Args:
graph_name_to_graph_def: A mapping from graph names to `GraphDef` protos.
graph_name_to_output_names: A mapping from graph names to lists of their
output node names.
Returns:
A mapping from graph names to a list of ExecutionSpecs, where the order
of the list represents the order of execution.
"""
graph_name_to_specs = {}
for graph_name in graph_name_to_graph_def:
specs = _partition_one_graph(graph_name_to_graph_def[graph_name],
graph_name_to_output_names[graph_name])
graph_name_to_specs[graph_name] = specs
return graph_name_to_specs | Partitions all the graphs. For each graph, the partitioning algorithm takes in the graph's `GraphDef` proto and output names, partitions the graph, and returns a list of ExecutionSpecs. Later, the beam_pipeline library can take in the ExecutionSpecs and execute the partitioned subgraphs. Args: graph_name_to_graph_def: A mapping from graph names to `GraphDef` protos. graph_name_to_output_names: A mapping from graph names to lists of their output node names. Returns: A mapping from graph names to a list of ExecutionSpecs, where the order of the list represents the order of execution. |
166,526 | import tensorflow as tf
tf.compat.v1.disable_eager_execution()
def create_session(graph):
return tf.compat.v1.Session(
graph=graph,
config=tf.compat.v1.ConfigProto(inter_op_parallelism_threads=8))
graph_a = tf.Graph()
with graph_a.as_default():
table_a = tf.random.uniform(shape=[N, NDIMS], seed=10)
ids_a = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_a')
result_a = tf.nn.embedding_lookup(table_a, ids_a)
The provided code snippet includes necessary dependencies for implementing the `remote_op_a` function. Write a Python function `def remote_op_a(input_ids)` to solve the following problem:
Mimics a remote op by numpy_function.
Here is the function:
def remote_op_a(input_ids):
"""Mimics a remote op by numpy_function."""
def remote_lookup(input_ids):
with create_session(graph_a) as sess:
return sess.run(result_a, feed_dict={ids_a: input_ids})
return tf.compat.v1.numpy_function(
func=remote_lookup, inp=[input_ids], Tout=tf.float32, name='remote_op_a') | Mimics a remote op by numpy_function. |
166,527 | import tensorflow as tf
tf.compat.v1.disable_eager_execution()
def create_session(graph):
return tf.compat.v1.Session(
graph=graph,
config=tf.compat.v1.ConfigProto(inter_op_parallelism_threads=8))
graph_b = tf.Graph()
with graph_b.as_default():
ids_b2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b2')
ids_b1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b1')
ids_b1_preprocessed = tf.math.floormod(tf.add(ids_b1, 1), N)
remote_result_a1 = remote_op_a(ids_b1_preprocessed)
remote_result_a2 = remote_op_a(ids_b2)
result_b = tf.math.add(remote_result_a1, remote_result_a2 * 2.5)
The provided code snippet includes necessary dependencies for implementing the `remote_op_b` function. Write a Python function `def remote_op_b(input_ids1, input_ids2)` to solve the following problem:
Mimics another remote op.
Here is the function:
def remote_op_b(input_ids1, input_ids2):
"""Mimics another remote op."""
def remote_lookup(input_ids1, input_ids2):
with create_session(graph_b) as sess:
return sess.run(
result_b, feed_dict={
ids_b1: input_ids1,
ids_b2: input_ids2
})
return tf.compat.v1.numpy_function(
func=remote_lookup,
inp=[input_ids1, input_ids2],
Tout=tf.float32,
name='remote_op_b') | Mimics another remote op. |
166,528 | import tensorflow as tf
tf.compat.v1.disable_eager_execution()
graph_a = tf.Graph()
with graph_a.as_default():
table_a = tf.random.uniform(shape=[N, NDIMS], seed=10)
ids_a = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_a')
result_a = tf.nn.embedding_lookup(table_a, ids_a)
graph_b = tf.Graph()
with graph_b.as_default():
ids_b2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b2')
ids_b1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b1')
ids_b1_preprocessed = tf.math.floormod(tf.add(ids_b1, 1), N)
remote_result_a1 = remote_op_a(ids_b1_preprocessed)
remote_result_a2 = remote_op_a(ids_b2)
result_b = tf.math.add(remote_result_a1, remote_result_a2 * 2.5)
main_graph = tf.Graph()
with main_graph.as_default():
ids1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids1')
ids2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids2')
casted_ids1 = tf.cast(ids1, tf.float32)
casted_ids2 = tf.cast(ids2, tf.float32)
remote_a0 = remote_op_a(ids1)
remote_b0 = remote_op_b(ids1, ids2)
left_upper_concat = tf.concat([remote_a0, remote_b0], 0)
left_upper_sum = tf.reduce_mean(left_upper_concat)
right_upper_sum = tf.reduce_mean(remote_b0)
right_upper_mul = tf.multiply(right_upper_sum, casted_ids2)
right_upper_add = tf.add(right_upper_mul, left_upper_sum)
right_upper_round = tf.math.round(right_upper_mul)
right_upper_floormod = tf.math.floormod(right_upper_round, N)
left_upper_add = tf.add_n([left_upper_sum, casted_ids1, right_upper_add])
left_upper_round = tf.math.round(left_upper_add)
left_upper_floormod = tf.math.floormod(left_upper_round, N)
remote_a1 = remote_op_a(left_upper_floormod)
remote_b1 = remote_op_b(left_upper_floormod, right_upper_floormod)
left_lower_sum = tf.reduce_mean(remote_a1)
right_lower_sum = tf.reduce_mean(remote_b1)
right_lower_mul = tf.multiply(casted_ids2, right_lower_sum)
right_lower_div = tf.divide(right_upper_add, right_lower_mul)
main_result = tf.add_n([
left_lower_sum, right_lower_div, right_lower_sum, right_upper_sum,
tf.cast(left_upper_floormod, tf.float32)
])
def save_examples_as_graphdefs(export_dir):
tf.io.write_graph(
graph_a.as_graph_def(), export_dir, 'graph_a.pb', as_text=False)
tf.io.write_graph(
graph_b.as_graph_def(), export_dir, 'graph_b.pb', as_text=False)
tf.io.write_graph(
main_graph.as_graph_def(), export_dir, 'main_graph.pb', as_text=False) | null |
166,529 | import copy
from typing import Any, Dict, Iterator, List, Mapping
import apache_beam as beam
import tensorflow as tf
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
class _SubgraphLayerDoFn(beam.DoFn):
"""DoFn that executes one subgraph layer."""
def process(
self,
# Not using mapping here because it doesn't support item assignment.
element: Dict[str, Dict[str, Any]],
spec: execution_spec.ExecutionSpec,
remote_op_name: str) -> Iterator[Dict[str, Dict[str, Any]]]:
"""Executes a subgraph layer.
To execute a subgraph layer, we need to prepare a feed_dict by extracting
tensor values from element. Then, we run the subgraph and store its outputs
to a copy of element.
Since we import `GraphDef` protos, all the node names now have the prefix
"import/". Also, TensorFlow feed_dict and outputs accept tensor
names instead of node names. Hence, a conversion from node_name to
"import/node_name:0" is necessary. Note that this conversion assumes
that there is one output per node.
Args:
element: A dictionary from remote op names to a dictionary from tensor
names to values. Element[remote_op_name] stores graph inputs and
previous specs' outputs.
spec: An ExecutionSpec for a subgraph layer.
remote_op_name: The remote op name of the current graph.
Yields:
A dictionary from remote op names to a dictionary from tensor names to
values. The dictionary is a copy of the input element, to which the
outputs of this subgraph layer have been added.
"""
element = copy.deepcopy(element)
input_tensor_names = [
_import_tensor_name(node_name) for node_name in spec.input_names
]
output_tensor_names = [
_import_tensor_name(node_name) for node_name in spec.output_names
]
feed_dict = {
tensor_name: element[remote_op_name][tensor_name]
for tensor_name in input_tensor_names
}
outputs = []
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(spec.subgraph)
outputs = sess.run(output_tensor_names, feed_dict=feed_dict)
for output_tensor_name, output_tensor in zip(output_tensor_names, outputs):
element[remote_op_name][output_tensor_name] = output_tensor
yield element
def _LoadRemoteGraphInputs( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection, parent_remote_op_name: str,
child_remote_op_name: str, remote_op_name_to_graph_name: Mapping[str, str],
graph_to_remote_op_input_name_mapping: Mapping[str, Mapping[str,
Mapping[str,
str]]]
) -> beam.pvalue.PCollection:
"""A PTransform that prepares inputs for a remote graph.
Before executing a remote graph, we need to prepare its inputs. We first
get the mapping from remote graph placeholder names to parent graph input
names. Then, in a copy of element, we copy the inputs from the parent
graph's key to the remote graph's key.
Args:
pcoll: A PCollection of child graph inputs not loaded yet. Each element is a
dictionary from remote op names to a dictionary from tensor names to
values. Here, element[child_remote_op_name] is empty now.
parent_remote_op_name: The remote op name of the parent graph.
child_remote_op_name: The remote op name of the child graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_to_remote_op_input_name_mapping: A mapping from graph names to remote
op names to remote graph placeholder names to parent graph input names.
{graph name: {remote op name: {placeholder name: input name}}}.
Returns:
A PCollection of inputs to the child graph. Each element is a dictionary
from remote op names to a dictionary from tensor names to values. Here,
element[child_remote_op_name] stores the inputs of child graph.
"""
parent_graph_name = remote_op_name_to_graph_name[parent_remote_op_name]
name_mapping = (
graph_to_remote_op_input_name_mapping[parent_graph_name]
[child_remote_op_name])
mapping = name_mapping.items()
# Calling _copy_tensor_value multiple times may introduce a burden, since
# _copy_tensor_value invokes a deepcopy on element.
for child_graph_placeholder_name, parent_graph_input_name in mapping:
step_name = ("PrepareInput[Graph_%s][Input_%s]" %
(child_remote_op_name, child_graph_placeholder_name))
pcoll = pcoll | step_name >> beam.Map(
_copy_tensor_value, parent_remote_op_name,
_import_tensor_name(parent_graph_input_name), child_remote_op_name,
_import_tensor_name(child_graph_placeholder_name))
return pcoll
def _ExtractRemoteGraphOutput( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection,
parent_remote_op_name: str,
child_remote_op_name: str,
remote_op_name_to_graph_name: Mapping[str, str],
graph_name_to_specs: Mapping[str, List[execution_spec.ExecutionSpec]],
) -> beam.pvalue.PCollection:
"""A PTransform that extracts remote graph output.
After finish executing a remote graph, we need to collect its output.
We first find the output name of the remote graph, then we copy the
output of the remote graph to its parent graph. Finally, we clear the
intermediate results of the remote graph.
Note we assumed that each node has only one output, which also applies
to remote op. This means that a remote graph can only have one output.
Args:
pcoll: A PCollection of child graph results. Each element is a dictionary
from remote op names to a dictionary from tensor names to values. Here,
element[child_remote_op_name] stores graph inputs, intermediate results,
and graph output.
parent_remote_op_name: The remote op name of the parent graph.
child_remote_op_name: The remote op name of the child graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_name_to_specs: A mapping from graph names to a list of ExecutionSpecs.
Returns:
A PCollection of child graph output in parent graph. Each element is a
dictionary from remote op names to a dictionary from tensor names to
values. Here, element[parent_remote_op_name] contains the output from
the child graph, and element[child_remote_op_name] is deleted.
"""
child_graph_name = remote_op_name_to_graph_name[child_remote_op_name]
child_specs = graph_name_to_specs[child_graph_name]
child_output_name = list(child_specs[-1].output_names)[0]
step_name_extract = ("ExtractOutput[Graph_%s][Output_%s]" %
(child_remote_op_name, child_output_name))
step_name_clear = ("ClearIntermediateOutputs[Graph_%s]" %
(child_remote_op_name))
return (pcoll
| step_name_extract >> beam.Map(
_copy_tensor_value, child_remote_op_name,
_import_tensor_name(child_output_name), parent_remote_op_name,
_import_tensor_name(child_remote_op_name))
| step_name_clear >> beam.Map(_clear_outputs_for_finished_graph,
child_remote_op_name))
The provided code snippet includes necessary dependencies for implementing the `ExecuteGraph` function. Write a Python function `def ExecuteGraph( # pylint: disable=invalid-name pcoll: beam.pvalue.PCollection, remote_op_name: str, remote_op_name_to_graph_name: Mapping[str, str], graph_name_to_specs: Mapping[str, List[execution_spec.ExecutionSpec]], graph_to_remote_op_input_name_mapping: Mapping[str, Mapping[str, Mapping[str, str]]] ) -> beam.pvalue.PCollection` to solve the following problem:
A PTransform that executes a graph. Each graph has a list of ExecutionSpecs, in which the order of the list represents the order of execution. An ExecutionSpec can either represent a subgraph layer or a remote op in a remote op layer. When executing a subgraph layer, we can load and execute the subgraph with a beam ParDo. When executing a remote op (which represents another graph), we need to load the remote graph inputs, call ExecuteGraph to recursively execute that graph, and extract the remote graph output. When executing a remote op, we call the current graph "parent" and the remote graph "child". Here, each Beam element is a dictionary from remote op names to a dictionary from tensor names to values, or {remote op name: {tensor name: value}}. Note that at any time, PColl only stores input tensor values and computed tensor values. The input PColl should have the input tensor names and values for the graph ready. As we execute the partitioned subgraphs, we add the intermediate output names and values to PColl. Args: pcoll: A PCollection of inputs to the graph. Each element is a dictionary from remote op names to a dictionary from tensor names to values. Here, element[remote_op_name] contains graph inputs. remote_op_name: The remote op name of the current graph. remote_op_name_to_graph_name: A mapping from remote op names to graph names. graph_name_to_specs: A mapping from graph names to a list of ExecutionSpecs, where the order of the list represents the order of execution. graph_to_remote_op_input_name_mapping: A mapping from graph names to remote op names to remote graph placeholder names to parent graph input names. We don't have this information since it was stored in PyFunc's function. {graph name: {remote op name: {placeholder name: input name}}}. Returns: A PCollection of results of this graph. Each element is a dictionary from remote op names to a dictionary from tensor names to values. Here, element[remote_op_name] stores graph inputs, intermediate results, and graph outputs.
Here is the function:
def ExecuteGraph( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection, remote_op_name: str,
remote_op_name_to_graph_name: Mapping[str, str],
graph_name_to_specs: Mapping[str, List[execution_spec.ExecutionSpec]],
graph_to_remote_op_input_name_mapping:
Mapping[str, Mapping[str, Mapping[str, str]]]
) -> beam.pvalue.PCollection:
"""A PTransform that executes a graph.
Each graph has a list of ExecutionSpecs, in which the order of the list
represents the order of execution. An ExecutionSpec can either represent
a subgraph layer or a remote op in a remote op layer. When executing a
subgraph layer, we can load and execute the subgraph with a beam ParDo.
When executing a remote op (which represents another graph), we need to
load the remote graph inputs, call ExecuteGraph to recursively execute that
graph, and extract the remote graph output. When executing a remote op, we
call the current graph "parent" and the remote graph "child".
Here, each Beam element is a dictionary from remote op names to a dictionary
from tensor names to values, or {remote op name: {tensor name: value}}.
Note that at any time, PColl only stores input tensor values and computed
tensor values. The input PColl should have the input tensor names and values
for the graph ready. As we execute the partitioned subgraphs, we add the
intermediate output names and values to PColl.
Args:
pcoll: A PCollection of inputs to the graph. Each element is a dictionary
from remote op names to a dictionary from tensor names to values. Here,
element[remote_op_name] contains graph inputs.
remote_op_name: The remote op name of the current graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_name_to_specs: A mapping from graph names to a list of ExecutionSpecs,
where the order of the list represents the order of execution.
graph_to_remote_op_input_name_mapping: A mapping from graph names to remote
op names to remote graph placeholder names to parent graph input names. We
don't have this information since it was stored in PyFunc's function.
{graph name: {remote op name: {placeholder name: input name}}}.
Returns:
A PCollection of results of this graph. Each element is a dictionary from
remote op names to a dictionary from tensor names to values. Here,
element[remote_op_name] stores graph inputs, intermediate results, and
graph outputs.
"""
specs = graph_name_to_specs[remote_op_name_to_graph_name[remote_op_name]]
for spec in specs:
# Construct Beam subgraph for a subgraph layer.
if not spec.is_remote_op:
step_name = ("SubgraphLayerDoFn[Graph_%s][Outputs_%s]" %
(remote_op_name, "_".join(spec.output_names)))
pcoll = pcoll | step_name >> beam.ParDo(_SubgraphLayerDoFn(), spec,
remote_op_name)
# Construct Beam subgraph for a remote op.
else:
# ExecutionSpec stores one remote op.
child_remote_op_name = list(spec.output_names)[0]
step_descriptor = ("[Parent_%s][Child_%s]" %
(remote_op_name, child_remote_op_name))
step_name = "LoadRemoteGraphInputs%s" % step_descriptor
pcoll = pcoll | step_name >> _LoadRemoteGraphInputs( # pylint: disable=no-value-for-parameter
remote_op_name, child_remote_op_name, remote_op_name_to_graph_name,
graph_to_remote_op_input_name_mapping)
# A good place to add beam.Reshuffle() to prevent fusion.
step_name = "ExecuteGraph%s" % step_descriptor
pcoll = pcoll | step_name >> ExecuteGraph( # pylint: disable=no-value-for-parameter
child_remote_op_name, remote_op_name_to_graph_name,
graph_name_to_specs, graph_to_remote_op_input_name_mapping)
step_name = "ExtractRemoteGraphOutput%s" % step_descriptor
pcoll = pcoll | step_name >> _ExtractRemoteGraphOutput( # pylint: disable=no-value-for-parameter
remote_op_name, child_remote_op_name, remote_op_name_to_graph_name,
graph_name_to_specs)
return pcoll | A PTransform that executes a graph. Each graph has a list of ExecutionSpecs, in which the order of the list represents the order of execution. An ExecutionSpec can either represent a subgraph layer or a remote op in a remote op layer. When executing a subgraph layer, we can load and execute the subgraph with a beam ParDo. When executing a remote op (which represents another graph), we need to load the remote graph inputs, call ExecuteGraph to recursively execute that graph, and extract the remote graph output. When executing a remote op, we call the current graph "parent" and the remote graph "child". Here, each Beam element is a dictionary from remote op names to a dictionary from tensor names to values, or {remote op name: {tensor name: value}}. Note that at any time, PColl only stores input tensor values and computed tensor values. The input PColl should have the input tensor names and values for the graph ready. As we execute the partitioned subgraphs, we add the intermediate output names and values to PColl. Args: pcoll: A PCollection of inputs to the graph. Each element is a dictionary from remote op names to a dictionary from tensor names to values. Here, element[remote_op_name] contains graph inputs. remote_op_name: The remote op name of the current graph. remote_op_name_to_graph_name: A mapping from remote op names to graph names. graph_name_to_specs: A mapping from graph names to a list of ExecutionSpecs, where the order of the list represents the order of execution. graph_to_remote_op_input_name_mapping: A mapping from graph names to remote op names to remote graph placeholder names to parent graph input names. We don't have this information since it was stored in PyFunc's function. {graph name: {remote op name: {placeholder name: input name}}}. Returns: A PCollection of results of this graph. Each element is a dictionary from remote op names to a dictionary from tensor names to values. Here, element[remote_op_name] stores graph inputs, intermediate results, and graph outputs. |
166,530 | from absl import app
from absl import flags
import tensorflow_docs.api_generator as api_generator
from tensorflow_docs.api_generator import generate_lib
from tfx import v1
from tfx import version
from tfx.utils import doc_controls
from google.protobuf.reflection import GeneratedProtocolMessageType
The provided code snippet includes necessary dependencies for implementing the `ignore_test_objects` function. Write a Python function `def ignore_test_objects(path, parent, children)` to solve the following problem:
Removes "test" and "example" modules. These are not part of the public api. Args: path: A tuple of name parts forming the attribute-lookup path to this object. For `tf.keras.layers.Dense` path is: ("tf","keras","layers","Dense") parent: The parent object. children: A list of (name, value) pairs. The attributes of the patent. Returns: A filtered list of children `(name, value)` pairs. With all test modules removed.
Here is the function:
def ignore_test_objects(path, parent, children):
"""Removes "test" and "example" modules. These are not part of the public api.
Args:
path: A tuple of name parts forming the attribute-lookup path to this
object. For `tf.keras.layers.Dense` path is:
("tf","keras","layers","Dense")
parent: The parent object.
children: A list of (name, value) pairs. The attributes of the patent.
Returns:
A filtered list of children `(name, value)` pairs. With all test modules
removed.
"""
del path
del parent
new_children = []
for (name, obj) in children:
if name.endswith("_test"):
continue
if name.startswith("test_"):
continue
new_children.append((name, obj))
return new_children | Removes "test" and "example" modules. These are not part of the public api. Args: path: A tuple of name parts forming the attribute-lookup path to this object. For `tf.keras.layers.Dense` path is: ("tf","keras","layers","Dense") parent: The parent object. children: A list of (name, value) pairs. The attributes of the patent. Returns: A filtered list of children `(name, value)` pairs. With all test modules removed. |
166,531 | from absl import app
from absl import flags
import tensorflow_docs.api_generator as api_generator
from tensorflow_docs.api_generator import generate_lib
from tfx import v1
from tfx import version
from tfx.utils import doc_controls
from google.protobuf.reflection import GeneratedProtocolMessageType
The provided code snippet includes necessary dependencies for implementing the `ignore_proto_methods` function. Write a Python function `def ignore_proto_methods(path, parent, children)` to solve the following problem:
Remove all the proto inherited methods. Args: path: A tuple of name parts forming the attribute-lookup path to this object. For `tf.keras.layers.Dense` path is: ("tf","keras","layers","Dense") parent: The parent object. children: A list of (name, value) pairs. The attributes of the parent. Returns: A filtered list of children `(name, value)` pairs. With all proto methods removed.
Here is the function:
def ignore_proto_methods(path, parent, children):
"""Remove all the proto inherited methods.
Args:
path: A tuple of name parts forming the attribute-lookup path to this
object. For `tf.keras.layers.Dense` path is:
("tf","keras","layers","Dense")
parent: The parent object.
children: A list of (name, value) pairs. The attributes of the parent.
Returns:
A filtered list of children `(name, value)` pairs. With all proto methods
removed.
"""
del path
if not isinstance(parent, GeneratedProtocolMessageType):
return children
new_children = []
for (name, obj) in children:
if callable(obj):
continue
new_children.append((name, obj))
return new_children | Remove all the proto inherited methods. Args: path: A tuple of name parts forming the attribute-lookup path to this object. For `tf.keras.layers.Dense` path is: ("tf","keras","layers","Dense") parent: The parent object. children: A list of (name, value) pairs. The attributes of the parent. Returns: A filtered list of children `(name, value)` pairs. With all proto methods removed. |
166,532 | from typing import Iterable, Dict
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run()
def run_group() -> None:
pass | null |
166,533 | from typing import Iterable, Dict
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def _parse_runtime_parameters(
runtime_parameters: Iterable[str]) -> Dict[str, str]:
"""Turns runtime parameter into dictionary."""
result = {}
for name_value_pair in runtime_parameters:
if '=' not in name_value_pair:
raise ValueError('Runtime parameter should be <name>=<value> format.')
name, value = name_value_pair.split('=', maxsplit=1)
result[name] = value
return result
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--run_id',
'--run-id',
required=True,
type=str,
help='Unique ID for the run.)')
'--endpoint',
default='',
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default='',
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.'
click.echo('Terminating run.')
handler_factory.create_handler(ctx.flags_dict).terminate_run()
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run()
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `create_run` function. Write a Python function `def create_run(ctx: Context, engine: str, pipeline_name: str, endpoint: str, iap_client_id: str, namespace: str, project: str, region: str, runtime_parameter: Iterable[str]) -> None` to solve the following problem:
Command definition to create a pipeline run.
Here is the function:
def create_run(ctx: Context, engine: str, pipeline_name: str, endpoint: str,
iap_client_id: str, namespace: str, project: str,
region: str, runtime_parameter: Iterable[str]) -> None:
"""Command definition to create a pipeline run."""
click.echo('Creating a run for pipeline: ' + pipeline_name)
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.GCP_PROJECT_ID] = project
ctx.flags_dict[labels.GCP_REGION] = region
ctx.flags_dict[labels.RUNTIME_PARAMETER] = _parse_runtime_parameters(
runtime_parameter)
handler = handler_factory.create_handler(ctx.flags_dict)
if (ctx.flags_dict[labels.ENGINE_FLAG]
not in (labels.KUBEFLOW_ENGINE, labels.AIRFLOW_ENGINE,
labels.VERTEX_ENGINE)) and runtime_parameter:
raise NotImplementedError(
'Currently runtime parameter is only supported in kubeflow, vertex, '
'and airflow.')
handler.create_run() | Command definition to create a pipeline run. |
166,534 | from typing import Iterable, Dict
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def terminate_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to stop a run."""
click.echo('Terminating run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).terminate_run()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
'--endpoint',
default='',
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default='',
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.'
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run()
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `terminate_run` function. Write a Python function `def terminate_run(ctx: Context, engine: str, run_id: str, endpoint: str, iap_client_id: str, namespace: str) -> None` to solve the following problem:
Command definition to stop a run.
Here is the function:
def terminate_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to stop a run."""
click.echo('Terminating run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).terminate_run() | Command definition to stop a run. |
166,535 | from typing import Iterable, Dict
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def terminate_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to stop a run."""
click.echo('Terminating run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).terminate_run()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
'--endpoint',
default='',
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default='',
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.'
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run()
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `list_runs` function. Write a Python function `def list_runs(ctx: Context, engine: str, pipeline_name: str, endpoint: str, iap_client_id: str, namespace: str) -> None` to solve the following problem:
Command definition to list all runs of a pipeline.
Here is the function:
def list_runs(ctx: Context, engine: str, pipeline_name: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to list all runs of a pipeline."""
click.echo('Listing all runs of pipeline: ' + pipeline_name)
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).list_runs() | Command definition to list all runs of a pipeline. |
166,536 | from typing import Iterable, Dict
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def terminate_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to stop a run."""
click.echo('Terminating run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).terminate_run()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
'--endpoint',
default='',
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default='',
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.'
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run()
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `get_run` function. Write a Python function `def get_run(ctx: Context, engine: str, pipeline_name: str, run_id: str, endpoint: str, iap_client_id: str, namespace: str, project: str, region: str) -> None` to solve the following problem:
Command definition to stop a run.
Here is the function:
def get_run(ctx: Context, engine: str, pipeline_name: str, run_id: str,
endpoint: str, iap_client_id: str, namespace: str, project: str,
region: str) -> None:
"""Command definition to stop a run."""
click.echo('Retrieving run status.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.GCP_PROJECT_ID] = project
ctx.flags_dict[labels.GCP_REGION] = region
handler_factory.create_handler(ctx.flags_dict).get_run() | Command definition to stop a run. |
166,537 | from typing import Iterable, Dict
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def terminate_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to stop a run."""
click.echo('Terminating run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).terminate_run()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
'--endpoint',
default='',
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default='',
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.'
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run()
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `delete_run` function. Write a Python function `def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str, iap_client_id: str, namespace: str) -> None` to solve the following problem:
Command definition to delete a run.
Here is the function:
def delete_run(ctx: Context, engine: str, run_id: str, endpoint: str,
iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a run."""
click.echo('Deleting run.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.RUN_ID] = run_id
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_run() | Command definition to delete a run. |
166,538 | import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import template_handler
def template_group() -> None:
pass | null |
166,539 | import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import template_handler
def list_templates() -> None:
click.echo('Available templates:')
for model in template_handler.list_template():
click.echo('- {}'.format(model)) | null |
166,540 | import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import template_handler
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `copy` function. Write a Python function `def copy(ctx: Context, pipeline_name: str, destination_path: str, model: str) -> None` to solve the following problem:
Command definition to copy template to specified directory.
Here is the function:
def copy(ctx: Context, pipeline_name: str, destination_path: str,
model: str) -> None:
"""Command definition to copy template to specified directory."""
click.echo('Copying {} pipeline template'.format(model))
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.DESTINATION_PATH] = destination_path
ctx.flags_dict[labels.MODEL] = model
template_handler.copy_template(ctx.flags_dict) | Command definition to copy template to specified directory. |
166,541 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
def pipeline_group() -> None:
pass | null |
166,542 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def _check_deprecated_image_build_flags(build_target_image=None,
skaffold_cmd=None,
pipeline_package_path=None):
"""Checks and exits if deprecated flags were used."""
if build_target_image is not None:
sys.exit(
'[Error] --build-target-image flag was DELETED. You should specify '
'the build target image at the `KubeflowDagRunnerConfig` class '
'instead, and use --build-image flag without argument to build a '
'container image when creating or updating a pipeline.')
if skaffold_cmd is not None:
sys.exit(
'[Error] --skaffold-cmd flag was DELETED. TFX doesn\'t use skaffold '
'any more. You can delete --skaffold-cmd flag and the auto-genrated '
'build.yaml file. You must specify --build-image to trigger an '
'image build when creating or updating a pipeline.')
if pipeline_package_path is not None:
sys.exit(
'[Error] --pipeline-package-path flag was DELETED. You can specify '
'the package location as `output_filename` and `output_dir` when '
'creating a `KubeflowDagRunner` instance. CLI will read the pacakge '
'path specified there.')
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `create_pipeline` function. Write a Python function `def create_pipeline(ctx: Context, engine: str, pipeline_path: str, package_path: Optional[str], build_target_image: Optional[str], build_base_image: Optional[str], skaffold_cmd: Optional[str], endpoint: Optional[str], iap_client_id: Optional[str], namespace: str, build_image: bool) -> None` to solve the following problem:
Command definition to create a pipeline.
Here is the function:
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline() | Command definition to create a pipeline. |
166,543 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def _check_deprecated_image_build_flags(build_target_image=None,
skaffold_cmd=None,
pipeline_package_path=None):
"""Checks and exits if deprecated flags were used."""
if build_target_image is not None:
sys.exit(
'[Error] --build-target-image flag was DELETED. You should specify '
'the build target image at the `KubeflowDagRunnerConfig` class '
'instead, and use --build-image flag without argument to build a '
'container image when creating or updating a pipeline.')
if skaffold_cmd is not None:
sys.exit(
'[Error] --skaffold-cmd flag was DELETED. TFX doesn\'t use skaffold '
'any more. You can delete --skaffold-cmd flag and the auto-genrated '
'build.yaml file. You must specify --build-image to trigger an '
'image build when creating or updating a pipeline.')
if pipeline_package_path is not None:
sys.exit(
'[Error] --pipeline-package-path flag was DELETED. You can specify '
'the package location as `output_filename` and `output_dir` when '
'creating a `KubeflowDagRunner` instance. CLI will read the pacakge '
'path specified there.')
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `update_pipeline` function. Write a Python function `def update_pipeline(ctx: Context, engine: str, pipeline_path: str, package_path: Optional[str], skaffold_cmd: Optional[str], endpoint: Optional[str], iap_client_id: Optional[str], namespace: str, build_image: bool) -> None` to solve the following problem:
Command definition to update a pipeline.
Here is the function:
def update_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str], skaffold_cmd: Optional[str],
endpoint: Optional[str], iap_client_id: Optional[str],
namespace: str, build_image: bool) -> None:
"""Command definition to update a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(None, skaffold_cmd, package_path)
click.echo('Updating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).update_pipeline() | Command definition to update a pipeline. |
166,544 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `delete_pipeline` function. Write a Python function `def delete_pipeline(ctx: Context, engine: str, pipeline_name: str, endpoint: str, iap_client_id: str, namespace: str) -> None` to solve the following problem:
Command definition to delete a pipeline.
Here is the function:
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline() | Command definition to delete a pipeline. |
166,545 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `list_pipelines` function. Write a Python function `def list_pipelines(ctx: Context, engine: str, endpoint: str, iap_client_id: str, namespace: str) -> None` to solve the following problem:
Command definition to list pipelines.
Here is the function:
def list_pipelines(ctx: Context, engine: str, endpoint: str, iap_client_id: str,
namespace: str) -> None:
"""Command definition to list pipelines."""
click.echo('Listing all pipelines')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).list_pipelines() | Command definition to list pipelines. |
166,546 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def _check_deprecated_image_build_flags(build_target_image=None,
skaffold_cmd=None,
pipeline_package_path=None):
"""Checks and exits if deprecated flags were used."""
if build_target_image is not None:
sys.exit(
'[Error] --build-target-image flag was DELETED. You should specify '
'the build target image at the `KubeflowDagRunnerConfig` class '
'instead, and use --build-image flag without argument to build a '
'container image when creating or updating a pipeline.')
if skaffold_cmd is not None:
sys.exit(
'[Error] --skaffold-cmd flag was DELETED. TFX doesn\'t use skaffold '
'any more. You can delete --skaffold-cmd flag and the auto-genrated '
'build.yaml file. You must specify --build-image to trigger an '
'image build when creating or updating a pipeline.')
if pipeline_package_path is not None:
sys.exit(
'[Error] --pipeline-package-path flag was DELETED. You can specify '
'the package location as `output_filename` and `output_dir` when '
'creating a `KubeflowDagRunner` instance. CLI will read the pacakge '
'path specified there.')
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `compile_pipeline` function. Write a Python function `def compile_pipeline(ctx: Context, engine: str, pipeline_path: str, package_path: str) -> None` to solve the following problem:
Command definition to compile a pipeline.
Here is the function:
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline() | Command definition to compile a pipeline. |
166,547 | import sys
from typing import Optional
import click
from tfx.tools.cli import labels
from tfx.tools.cli.cli_context import Context
from tfx.tools.cli.cli_context import pass_context
from tfx.tools.cli.handler import handler_factory
def create_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: Optional[str],
build_target_image: Optional[str],
build_base_image: Optional[str],
skaffold_cmd: Optional[str], endpoint: Optional[str],
iap_client_id: Optional[str], namespace: str,
build_image: bool) -> None:
"""Command definition to create a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(build_target_image, skaffold_cmd,
package_path)
if build_base_image is not None and not build_image:
sys.exit('--build-base-image used without --build-image. You have to use '
'--build-image flag to build a container image for the pipeline.')
# TODO(b/142358865): Add support for container building for Airflow and Beam
# runners when they support container executors.
click.echo('Creating pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
ctx.flags_dict[labels.BASE_IMAGE] = build_base_image
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
ctx.flags_dict[labels.BUILD_IMAGE] = build_image
handler_factory.create_handler(ctx.flags_dict).create_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_path',
'--pipeline-path',
required=True,
type=str,
help='Path to Python DSL file')
'--package_path',
'--package-path',
type=str,
default=None,
help='[DEPRECATED] Package path specified in a KubeflowDagRunner instace '
'will be used.')
'--skaffold_cmd',
'--skaffold-cmd',
default=None,
type=str,
help='[DEPRECATED] Skaffold is not used any more. Do not use this flag.')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
'--build_image',
'--build-image',
is_flag=True,
default=False,
help='Build a container image for the pipeline using Dockerfile in the '
'current directory.')
def delete_pipeline(ctx: Context, engine: str, pipeline_name: str,
endpoint: str, iap_client_id: str, namespace: str) -> None:
"""Command definition to delete a pipeline."""
click.echo('Deleting pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
ctx.flags_dict[labels.ENDPOINT] = endpoint
ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id
ctx.flags_dict[labels.NAMESPACE] = namespace
handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
'--engine', default='auto', type=str, help='orchestrator for pipelines')
'--endpoint',
default=None,
type=str,
help='Endpoint of the KFP API service to connect.')
'--iap_client_id',
'--iap-client-id',
default=None,
type=str,
help='Client ID for IAP protected endpoint.')
'-n',
'--namespace',
default='kubeflow',
type=str,
help='Kubernetes namespace to connect to the KFP API.')
def compile_pipeline(ctx: Context, engine: str, pipeline_path: str,
package_path: str) -> None:
"""Command definition to compile a pipeline."""
# TODO(b/179847638): Delete checks for deprecated flags.
_check_deprecated_image_build_flags(pipeline_package_path=package_path)
click.echo('Compiling pipeline')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path
handler_factory.create_handler(ctx.flags_dict).compile_pipeline()
'--engine', default='auto', type=str, help='Orchestrator for pipelines')
'--pipeline_name',
'--pipeline-name',
required=True,
type=str,
help='Name of the pipeline')
class Context:
"""Context shared between all command groups.
Attributes :
flags_dict: A dictionary containing the flags of a command.
"""
def __init__(self):
self.flags_dict = {}
The provided code snippet includes necessary dependencies for implementing the `get_schema` function. Write a Python function `def get_schema(ctx: Context, engine: str, pipeline_name: str) -> None` to solve the following problem:
Command definition to infer latest schema.
Here is the function:
def get_schema(ctx: Context, engine: str, pipeline_name: str) -> None:
"""Command definition to infer latest schema."""
click.echo('Getting latest schema.')
ctx.flags_dict[labels.ENGINE_FLAG] = engine
ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name
handler_factory.create_handler(ctx.flags_dict).get_schema() | Command definition to infer latest schema. |
166,548 | import click
from tfx.tools.cli.commands.pipeline import pipeline_group
from tfx.tools.cli.commands.run import run_group
from tfx.tools.cli.commands.template import template_group
def cli_group():
click.echo('CLI') | null |
166,549 | import functools
import os
import sys
import time
from typing import Any, Dict, Optional
import click
import kfp
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.tools.cli import labels
from tfx.tools.cli.container_builder import builder
from tfx.tools.cli.handler import base_handler
from tfx.tools.cli.handler import kubeflow_dag_runner_patcher
def create_container_image(image: str, base_image: Optional[str]) -> str:
if image == kubeflow_dag_runner.DEFAULT_KUBEFLOW_TFX_IMAGE:
sys.exit('Default image for KubeflowDagRunner given and used with '
'--build-image flag. If you want to use your custom image, please '
'specify the image name that will be built at the '
'KubeflowDagRunnerConfig. Otherwise, do not use --build-image '
'flag.')
built_image = builder.build(target_image=image, base_image=base_image)
click.echo(f'New container image "{built_image}" was built.')
return built_image | null |
166,550 | import os
import tempfile
import typing
from typing import Any, Callable, MutableMapping, Optional, Type
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.tools.cli.handler import dag_runner_patcher
def _get_temporary_package_filename(pipeline_name: str, directory: str) -> str:
# mkstemp will create and open a file named 'temp_xxxxx.tar.gz'.
fd, path = tempfile.mkstemp('.tar.gz', f'temp_{pipeline_name}', directory)
os.close(fd)
return os.path.basename(path) | null |
166,551 |
def get_source_path(path: str) -> str:
return path | null |
166,552 | from typing import Any, Dict, List, Mapping, MutableMapping, MutableSequence, Sequence, TypeVar
import tfx.types
from tfx.utils import pure_typing_utils
from typing_extensions import ( # pylint: disable=g-multiple-import
TypeGuard, # New in python 3.10
)
is_compatible = pure_typing_utils.is_compatible
_TArtifact = TypeVar('_TArtifact', bound=tfx.types.Artifact)
The provided code snippet includes necessary dependencies for implementing the `is_homogeneous_artifact_list` function. Write a Python function `def is_homogeneous_artifact_list(value: Any) -> TypeGuard[Sequence[_TArtifact]]` to solve the following problem:
Checks value is Sequence[T] where T is subclass of Artifact.
Here is the function:
def is_homogeneous_artifact_list(value: Any) -> TypeGuard[Sequence[_TArtifact]]:
"""Checks value is Sequence[T] where T is subclass of Artifact."""
return (
is_compatible(value, Sequence[tfx.types.Artifact]) and
all(isinstance(v, type(value[0])) for v in value[1:])) | Checks value is Sequence[T] where T is subclass of Artifact. |
166,553 | from typing import Any, Dict, List, Mapping, MutableMapping, MutableSequence, Sequence, TypeVar
import tfx.types
from tfx.utils import pure_typing_utils
from typing_extensions import ( # pylint: disable=g-multiple-import
TypeGuard, # New in python 3.10
)
is_compatible = pure_typing_utils.is_compatible
def is_artifact_list(value: Any) -> TypeGuard[Sequence[tfx.types.Artifact]]:
return is_compatible(value, Sequence[tfx.types.Artifact]) | null |
166,554 | from typing import Any, Dict, List, Mapping, MutableMapping, MutableSequence, Sequence, TypeVar
import tfx.types
from tfx.utils import pure_typing_utils
from typing_extensions import ( # pylint: disable=g-multiple-import
TypeGuard, # New in python 3.10
)
ArtifactMultiMap = MultiMap[str, tfx.types.Artifact]
is_compatible = pure_typing_utils.is_compatible
The provided code snippet includes necessary dependencies for implementing the `is_list_of_artifact_multimap` function. Write a Python function `def is_list_of_artifact_multimap( value, ) -> TypeGuard[Sequence[Mapping[str, Sequence[tfx.types.Artifact]]]]` to solve the following problem:
Checks value is Sequence[Mapping[str, Sequence[Artifact]]] type.
Here is the function:
def is_list_of_artifact_multimap(
value,
) -> TypeGuard[Sequence[Mapping[str, Sequence[tfx.types.Artifact]]]]:
"""Checks value is Sequence[Mapping[str, Sequence[Artifact]]] type."""
return is_compatible(value, Sequence[ArtifactMultiMap]) | Checks value is Sequence[Mapping[str, Sequence[Artifact]]] type. |
166,555 | import copy
import logging
import os
from typing import Any, Dict, Optional
from tfx.dsl.io import fileio
The provided code snippet includes necessary dependencies for implementing the `get_logger` function. Write a Python function `def get_logger(config)` to solve the following problem:
Create and configure a TFX-specific logger. Args: config: LoggingConfig class used to configure logger Returns: A logger that outputs to log_dir/log_file_name. Raises: RuntimeError: if log dir exists as a file.
Here is the function:
def get_logger(config):
"""Create and configure a TFX-specific logger.
Args:
config: LoggingConfig class used to configure logger
Returns:
A logger that outputs to log_dir/log_file_name.
Raises:
RuntimeError: if log dir exists as a file.
"""
log_path = os.path.join(config.log_root, 'tfx.log')
logger = logging.getLogger(log_path)
logger.setLevel(config.log_level)
if not fileio.exists(config.log_root):
fileio.makedirs(config.log_root)
if not fileio.isdir(config.log_root):
raise RuntimeError('Log dir exists as a file: {}'.format(config.log_root))
# Create logfile handler.
fh = logging.FileHandler(log_path)
# Define logmsg format.
formatter = logging.Formatter(
'%(asctime)s - {}:{} (%(filename)s:%(lineno)s) - %(levelname)s: %(message)s'
.format(config.pipeline_name, config.worker_name))
fh.setFormatter(formatter)
# Add handler to logger.
logger.addHandler(fh)
return logger | Create and configure a TFX-specific logger. Args: config: LoggingConfig class used to configure logger Returns: A logger that outputs to log_dir/log_file_name. Raises: RuntimeError: if log dir exists as a file. |
166,556 | import os
import re
import tempfile
from typing import List, TypeVar, Iterable
from tfx.dsl.io import fileio
from google.protobuf import json_format
from google.protobuf import text_format
from google.protobuf.message import Message
The provided code snippet includes necessary dependencies for implementing the `write_tfrecord_file` function. Write a Python function `def write_tfrecord_file(file_name: str, *proto: Message) -> None` to solve the following problem:
Writes a serialized tfrecord to file.
Here is the function:
def write_tfrecord_file(file_name: str, *proto: Message) -> None:
"""Writes a serialized tfrecord to file."""
try:
import tensorflow as tf # pytype: disable=import-error # pylint: disable=g-import-not-at-top
except ModuleNotFoundError as e:
raise Exception(
'TensorFlow must be installed to use this functionality.') from e
fileio.makedirs(os.path.dirname(file_name))
with tf.io.TFRecordWriter(file_name) as writer:
for message in proto:
writer.write(message.SerializeToString()) | Writes a serialized tfrecord to file. |
166,557 | import os
import re
import tempfile
from typing import List, TypeVar, Iterable
from tfx.dsl.io import fileio
from google.protobuf import json_format
from google.protobuf import text_format
from google.protobuf.message import Message
ProtoMessage = TypeVar('ProtoMessage', bound=Message)
The provided code snippet includes necessary dependencies for implementing the `parse_json_file` function. Write a Python function `def parse_json_file(file_name: str, message: ProtoMessage) -> ProtoMessage` to solve the following problem:
Parses a protobuf message from a JSON file and return itself.
Here is the function:
def parse_json_file(file_name: str, message: ProtoMessage) -> ProtoMessage:
"""Parses a protobuf message from a JSON file and return itself."""
contents = fileio.open(file_name).read()
json_format.Parse(contents, message)
return message | Parses a protobuf message from a JSON file and return itself. |
166,558 | import os
import re
import tempfile
from typing import List, TypeVar, Iterable
from tfx.dsl.io import fileio
from google.protobuf import json_format
from google.protobuf import text_format
from google.protobuf.message import Message
The provided code snippet includes necessary dependencies for implementing the `load_csv_column_names` function. Write a Python function `def load_csv_column_names(csv_file: str) -> List[str]` to solve the following problem:
Parse the first line of a csv file as column names.
Here is the function:
def load_csv_column_names(csv_file: str) -> List[str]:
"""Parse the first line of a csv file as column names."""
with fileio.open(csv_file) as f:
return f.readline().strip().split(',') | Parse the first line of a csv file as column names. |
166,559 | import os
import re
import tempfile
from typing import List, TypeVar, Iterable
from tfx.dsl.io import fileio
from google.protobuf import json_format
from google.protobuf import text_format
from google.protobuf.message import Message
The provided code snippet includes necessary dependencies for implementing the `read_string_file` function. Write a Python function `def read_string_file(file_name: str) -> str` to solve the following problem:
Reads a string from a file.
Here is the function:
def read_string_file(file_name: str) -> str:
"""Reads a string from a file."""
if not fileio.exists(file_name):
msg = '{} does not exist'.format(file_name)
raise FileNotFoundError(msg)
return fileio.open(file_name).read() | Reads a string from a file. |
166,560 | import os
import re
import tempfile
from typing import List, TypeVar, Iterable
from tfx.dsl.io import fileio
from google.protobuf import json_format
from google.protobuf import text_format
from google.protobuf.message import Message
The provided code snippet includes necessary dependencies for implementing the `read_bytes_file` function. Write a Python function `def read_bytes_file(file_name: str) -> bytes` to solve the following problem:
Reads bytes from a file.
Here is the function:
def read_bytes_file(file_name: str) -> bytes:
"""Reads bytes from a file."""
if not fileio.exists(file_name):
msg = '{} does not exist'.format(file_name)
raise FileNotFoundError(msg)
return fileio.open(file_name, 'rb').read() | Reads bytes from a file. |
166,561 | import functools
import time
from typing import Type
from absl import logging
The provided code snippet includes necessary dependencies for implementing the `retry` function. Write a Python function `def retry(max_retries: int = 3, delay_seconds: int = 1, expected_exception: Type[Exception] = Exception, ignore_eventual_failure: bool = False)` to solve the following problem:
Function decorator to retry a function automatically. Example: from tfx.utils import retry @retry.retry() def some_fragile_func(): ... If `ignore_eventual_failure` is False, the last expected exception caught will raised from this function. If `ignore_eventual_failure` is True, no exception will raised and will return None. Args: max_retries: number of retries. Total trial count becomes 1 + max_retries. delay_seconds: there will be a predefined delay between each trial. expected_exception: this exception will be regarded as retriable failures. ignore_eventual_failure: See above description. Returns: A decorator for retrying logic.
Here is the function:
def retry(max_retries: int = 3,
delay_seconds: int = 1,
expected_exception: Type[Exception] = Exception,
ignore_eventual_failure: bool = False):
"""Function decorator to retry a function automatically.
Example:
from tfx.utils import retry
@retry.retry()
def some_fragile_func():
...
If `ignore_eventual_failure` is False, the last expected exception caught
will raised from this function. If `ignore_eventual_failure` is True,
no exception will raised and will return None.
Args:
max_retries: number of retries. Total trial count becomes 1 + max_retries.
delay_seconds: there will be a predefined delay between each trial.
expected_exception: this exception will be regarded as retriable failures.
ignore_eventual_failure: See above description.
Returns:
A decorator for retrying logic.
"""
def decorator_retry(func):
@functools.wraps(func)
def with_retry(*args, **kwargs):
last_exception = None
for retry_no in range(max_retries + 1):
if retry_no > 0:
if delay_seconds > 0:
time.sleep(delay_seconds)
logging.info('[Retrying "%s" %d/%d]', func.__name__, retry_no,
max_retries)
try:
return func(*args, **kwargs)
except expected_exception as err: # pylint:disable=broad-except
logging.info('%s', err)
last_exception = err
logging.info('Max number of retries(%d) reached for %s.', max_retries,
func.__name__)
if not ignore_eventual_failure:
raise last_exception
return with_retry
return decorator_retry | Function decorator to retry a function automatically. Example: from tfx.utils import retry @retry.retry() def some_fragile_func(): ... If `ignore_eventual_failure` is False, the last expected exception caught will raised from this function. If `ignore_eventual_failure` is True, no exception will raised and will return None. Args: max_retries: number of retries. Total trial count becomes 1 + max_retries. delay_seconds: there will be a predefined delay between each trial. expected_exception: this exception will be regarded as retriable failures. ignore_eventual_failure: See above description. Returns: A decorator for retrying logic. |
166,562 | import functools
import inspect
import warnings
def _should_warn(func_or_class, warn_once=True):
"""Check whether to warn or not with side effect."""
if id(func_or_class) in _PRINTED_WARNING:
return False
if warn_once:
_PRINTED_WARNING.add(id(func_or_class))
return True
def _validate_callable(func):
if not hasattr(func, '__call__'):
raise ValueError('%s passed to is not a function for deprecation.' %
(func,))
def _call_location(levels=2):
"""Return caller's location a number of levels up."""
f = inspect.currentframe()
if not f:
return '<unknown location>'
for _ in range(levels):
f = f.f_back or f
return '%s:%s' % (f.f_code.co_filename, f.f_lineno)
def warn_deprecated(msg):
"""Convenient method to warn TfxDeprecationWarning."""
warnings.warn(msg, TfxDeprecationWarning)
The provided code snippet includes necessary dependencies for implementing the `deprecated` function. Write a Python function `def deprecated(date, instructions, warn_once=True)` to solve the following problem:
Decorator marking function or method as deprecated. Note: this function does not currently support deprecation of classes. To perform such deprecation, decorate its constructor instead. Args: date: String date at which function will be removed, or None. instructions: Instructions on updating use of deprecated code. warn_once: Whether only one warning should be emitted for multiple calls to deprecated symbol. Returns: Decorated function or method.
Here is the function:
def deprecated(date, instructions, warn_once=True):
"""Decorator marking function or method as deprecated.
Note: this function does not currently support deprecation of classes. To
perform such deprecation, decorate its constructor instead.
Args:
date: String date at which function will be removed, or None.
instructions: Instructions on updating use of deprecated code.
warn_once: Whether only one warning should be emitted for multiple calls to
deprecated symbol.
Returns:
Decorated function or method.
"""
def deprecated_wrapper(func):
_validate_callable(func)
@functools.wraps(func)
def new_func(*args, **kwargs):
if _should_warn(func, warn_once=warn_once):
call_loc = _call_location()
func_name = getattr(func, '__qualname__', func.__name__)
func_module = func.__module__
removed_at = f'after {date}' if date else 'in a future version'
message = (
f'From {call_loc}: {func_name} (from {func_module}) is deprecated '
f'and will be removed {removed_at}. Instructions for updating:\n'
+ instructions)
warn_deprecated(message)
return func(*args, **kwargs)
return new_func
return deprecated_wrapper | Decorator marking function or method as deprecated. Note: this function does not currently support deprecation of classes. To perform such deprecation, decorate its constructor instead. Args: date: String date at which function will be removed, or None. instructions: Instructions on updating use of deprecated code. warn_once: Whether only one warning should be emitted for multiple calls to deprecated symbol. Returns: Decorated function or method. |
166,563 | import functools
import inspect
import warnings
def _should_warn(func_or_class, warn_once=True):
"""Check whether to warn or not with side effect."""
if id(func_or_class) in _PRINTED_WARNING:
return False
if warn_once:
_PRINTED_WARNING.add(id(func_or_class))
return True
def _validate_callable(func):
if not hasattr(func, '__call__'):
raise ValueError('%s passed to is not a function for deprecation.' %
(func,))
def _call_location(levels=2):
"""Return caller's location a number of levels up."""
f = inspect.currentframe()
if not f:
return '<unknown location>'
for _ in range(levels):
f = f.f_back or f
return '%s:%s' % (f.f_code.co_filename, f.f_lineno)
def _make_alias_docstring(new_name, func_or_class):
"""Make deprecation alias docstring."""
if func_or_class.__doc__:
lines = func_or_class.__doc__.split('\n')
lines[0] += ' (deprecated)'
else:
lines = ['DEPRECATED CLASS']
first_line = lines[0]
notice_lines = [
('Warning: THIS CLASS IS DEPRECATED. It will be removed in a future '
'version.'),
'Please use %s instead.' % new_name
]
remaining_lines = []
remaining_lines_string = '\n'.join(lines[1:]).strip()
if remaining_lines_string:
remaining_lines = remaining_lines_string.split('\n')
lines = ([first_line, ''] + notice_lines +
(([''] + remaining_lines) if remaining_lines else []))
return '\n'.join(lines)
def warn_deprecated(msg):
"""Convenient method to warn TfxDeprecationWarning."""
warnings.warn(msg, TfxDeprecationWarning)
The provided code snippet includes necessary dependencies for implementing the `deprecated_alias` function. Write a Python function `def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True)` to solve the following problem:
Deprecates a symbol in favor of a renamed function or class. Args: deprecated_name: Fully qualified name of deprecated symbol. name: New symbol name. func_or_class: Non-deprecated function or class, to be used as alias. warn_once: Whether only one warning should be emitted for multiple calls to deprecated symbol. Returns: Decorated function or method.
Here is the function:
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecates a symbol in favor of a renamed function or class.
Args:
deprecated_name: Fully qualified name of deprecated symbol.
name: New symbol name.
func_or_class: Non-deprecated function or class, to be used as alias.
warn_once: Whether only one warning should be emitted for multiple calls to
deprecated symbol.
Returns:
Decorated function or method.
"""
if inspect.isclass(func_or_class):
new_doc = _make_alias_docstring(name, func_or_class)
class _NewDeprecatedClass(func_or_class): # pylint: disable=empty-docstring
__doc__ = new_doc
__name__ = func_or_class.__name__
__module__ = _call_location(levels=3)
# Marker so that instrospection can determine that this is a deprecated
# class wrapper.
_TFX_DEPRECATED_CLASS = True
@functools.wraps(func_or_class.__init__)
def __init__(self, *args, **kwargs):
_NewDeprecatedClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _should_warn(_NewDeprecatedClass.__init__, warn_once=warn_once):
call_loc = _call_location()
warn_deprecated(
f'From {call_loc}: The name {deprecated_name} is deprecated. '
f'Please use {name} instead.')
super().__init__(*args, **kwargs)
return _NewDeprecatedClass
else:
_validate_callable(func_or_class)
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=empty-docstring
if _should_warn(new_func, warn_once=warn_once):
call_loc = _call_location()
warn_deprecated(
f'From {call_loc}: The name {deprecated_name} is deprecated. '
f'Please use {name} instead.')
return func_or_class(*args, **kwargs)
return new_func | Deprecates a symbol in favor of a renamed function or class. Args: deprecated_name: Fully qualified name of deprecated symbol. name: New symbol name. func_or_class: Non-deprecated function or class, to be used as alias. warn_once: Whether only one warning should be emitted for multiple calls to deprecated symbol. Returns: Decorated function or method. |
166,564 |
def do_not_doc_in_subclasses(obj):
return obj | null |
166,565 |
def do_not_doc_inheritable(obj):
return obj | null |
166,566 |
def do_not_generate_docs(obj):
return obj | null |
166,567 | EXTRA_DOCS = dict()
The provided code snippet includes necessary dependencies for implementing the `documented` function. Write a Python function `def documented(obj, doc)` to solve the following problem:
Adds a docstring to typealias by overriding the `__doc__` attribute. Note: Overriding `__doc__` is only possible after python 3.7. Args: obj: Typealias object that needs to be documented. doc: Docstring of the typealias. It should follow the standard pystyle docstring rules. Returns: Documented variables.
Here is the function:
def documented(obj, doc):
"""Adds a docstring to typealias by overriding the `__doc__` attribute.
Note: Overriding `__doc__` is only possible after python 3.7.
Args:
obj: Typealias object that needs to be documented.
doc: Docstring of the typealias. It should follow the standard pystyle
docstring rules.
Returns:
Documented variables.
"""
if isinstance(obj, int) or obj in [(), None, ""]:
raise ValueError(f"Can't add docs to singletons: `{obj}`.")
try:
obj.__doc__ = doc
except AttributeError:
EXTRA_DOCS[id(obj)] = doc
return obj | Adds a docstring to typealias by overriding the `__doc__` attribute. Note: Overriding `__doc__` is only possible after python 3.7. Args: obj: Typealias object that needs to be documented. doc: Docstring of the typealias. It should follow the standard pystyle docstring rules. Returns: Documented variables. |
166,568 | import subprocess
import docker
The provided code snippet includes necessary dependencies for implementing the `delete_image` function. Write a Python function `def delete_image(name: str, remote: bool = True)` to solve the following problem:
Delete container image in local and remote registry.
Here is the function:
def delete_image(name: str, remote: bool = True):
"""Delete container image in local and remote registry."""
client = docker.from_env()
# List all images including un-tagged images and delete all of them.
images = client.images.list(name)
# Sort list to delete children images first.
images.sort(key=lambda image: image.attrs['Created'], reverse=True)
for image in images:
client.images.remove(image.id, force=True)
if remote:
# NOTE: RepoDigest != id. Use repo digests when deleting remote images.
remote_image = image.attrs['RepoDigests'][0]
subprocess.check_output([
'gcloud', 'container', 'images', 'delete', remote_image, '--quiet',
'--force-delete-tags'
])
client.close() | Delete container image in local and remote registry. |
166,569 |
def generate_monitoring_metrics(
unused_test_stats_split,
unused_baseline_stats_split,
unused_split_pair,
unused_span,
unused_artifact,
) -> None:
return | null |
166,570 | import os
from typing import Optional, Tuple
The provided code snippet includes necessary dependencies for implementing the `make_model_path` function. Write a Python function `def make_model_path(model_base_path: str, model_name: str, version: int) -> str` to solve the following problem:
Make a TFS-flavored model path. Args: model_base_path: A base path containing the directory of model_name. model_name: A name of the model. version: An integer version of the model. Returns: `{model_base_path}/{model_name}/{version}`.
Here is the function:
def make_model_path(model_base_path: str, model_name: str,
version: int) -> str:
"""Make a TFS-flavored model path.
Args:
model_base_path: A base path containing the directory of model_name.
model_name: A name of the model.
version: An integer version of the model.
Returns:
`{model_base_path}/{model_name}/{version}`.
"""
return os.path.join(model_base_path, model_name, str(version)) | Make a TFS-flavored model path. Args: model_base_path: A base path containing the directory of model_name. model_name: A name of the model. version: An integer version of the model. Returns: `{model_base_path}/{model_name}/{version}`. |
166,571 | import os
from typing import Optional, Tuple
def parse_model_path(
model_path: str,
expected_model_name: Optional[str] = None) -> Tuple[str, str, int]:
"""Parse model_path into parts of TFS flavor.
Args:
model_path: A TFS-flavored model path.
expected_model_name: Expected model_name as defined from the module
docstring. If model_name does not match, parsing will be failed.
Raises:
ValueError: If model path is invalid (not TFS-flavored).
Returns:
Tuple of (model_base_path, model_name, version)
"""
rest, version = os.path.split(model_path)
if not rest:
raise ValueError('model_path is too short ({})'.format(model_path))
if not version.isdigit():
raise ValueError('No version segment ({})'.format(model_path))
version = int(version)
model_base_path, model_name = os.path.split(rest)
if expected_model_name is not None and model_name != expected_model_name:
raise ValueError('model_name does not match (expected={}, actual={})'
.format(expected_model_name, model_path))
return model_base_path, model_name, version
The provided code snippet includes necessary dependencies for implementing the `parse_model_base_path` function. Write a Python function `def parse_model_base_path(model_path: str) -> str` to solve the following problem:
Parse model_base_path from the TFS-flavored model path. Args: model_path: A TFS-flavored model path. Raises: ValueError: If model path is invalid (not TFS-flavored). Returns: model_base_path as defined from the module docstring.
Here is the function:
def parse_model_base_path(model_path: str) -> str:
"""Parse model_base_path from the TFS-flavored model path.
Args:
model_path: A TFS-flavored model path.
Raises:
ValueError: If model path is invalid (not TFS-flavored).
Returns:
model_base_path as defined from the module docstring.
"""
return parse_model_path(model_path)[0] | Parse model_base_path from the TFS-flavored model path. Args: model_path: A TFS-flavored model path. Raises: ValueError: If model path is invalid (not TFS-flavored). Returns: model_base_path as defined from the module docstring. |
166,572 | import os
import absl
from tfx.dsl.io import fileio
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_constants
def eval_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for evaluation purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_EVAL_MODEL_DIR)
return os.path.join(output_uri, path_constants.EVAL_MODEL_DIR)
def serving_model_path(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns path for exported serving model."""
model_dir = serving_model_dir(output_uri, is_old_artifact)
export_dir = os.path.join(model_dir, 'export')
if fileio.exists(export_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning(
'Support for estimator-based executor and model export'
' will be deprecated soon. Please use export structure '
'<ModelExportPath>/serving_model_dir/saved_model.pb"')
model_dir = io_utils.get_only_uri_in_dir(export_dir)
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If dir doesn't match estimator structure, use serving model root directly.
return model_dir
The provided code snippet includes necessary dependencies for implementing the `eval_model_path` function. Write a Python function `def eval_model_path(output_uri: str, is_old_artifact: bool = False) -> str` to solve the following problem:
Returns final path to exported model for evaluation purpose.
Here is the function:
def eval_model_path(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns final path to exported model for evaluation purpose."""
model_dir = eval_model_dir(output_uri, is_old_artifact)
model_file = os.path.join(model_dir, 'saved_model.pb')
if fileio.exists(model_file):
return model_dir
elif fileio.exists(model_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning('Support for estimator-based executor and model'
' export will be deprecated soon. Please use'
' export structure '
'<ModelExportPath>/eval_model_dir/saved_model.pb"')
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If eval model doesn't exist, use serving model for eval.
return serving_model_path(output_uri, is_old_artifact) | Returns final path to exported model for evaluation purpose. |
166,573 | import os
import absl
from tfx.dsl.io import fileio
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_constants
def eval_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for evaluation purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_EVAL_MODEL_DIR)
return os.path.join(output_uri, path_constants.EVAL_MODEL_DIR)
def serving_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for serving purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_SERVING_MODEL_DIR)
return os.path.join(output_uri, path_constants.SERVING_MODEL_DIR)
The provided code snippet includes necessary dependencies for implementing the `get_model_dir_by_type` function. Write a Python function `def get_model_dir_by_type(output_uri: str, model_type: str, is_old_artifact: bool = False) -> str` to solve the following problem:
Returns directly for exported model depending on model_type.
Here is the function:
def get_model_dir_by_type(output_uri: str,
model_type: str,
is_old_artifact: bool = False) -> str:
"""Returns directly for exported model depending on model_type."""
if model_type == path_constants.TFMA_EVAL:
return eval_model_dir(output_uri, is_old_artifact)
else:
return serving_model_dir(output_uri, is_old_artifact) | Returns directly for exported model depending on model_type. |
166,574 | import os
import absl
from tfx.dsl.io import fileio
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_constants
The provided code snippet includes necessary dependencies for implementing the `stamped_model_path` function. Write a Python function `def stamped_model_path(output_uri: str) -> str` to solve the following problem:
Returns path for the stamped model.
Here is the function:
def stamped_model_path(output_uri: str) -> str:
"""Returns path for the stamped model."""
return os.path.join(output_uri, path_constants.STAMPED_MODEL_DIR) | Returns path for the stamped model. |
166,575 | import os
import absl
from tfx.dsl.io import fileio
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_constants
The provided code snippet includes necessary dependencies for implementing the `warmup_file_path` function. Write a Python function `def warmup_file_path(saved_model_path: str) -> str` to solve the following problem:
Returns SavedModel Warmup file path. See https://www.tensorflow.org/tfx/serving/saved_model_warmup. This is a lexical operation, and does not guarantee the path is valid. Args: saved_model_path: A POSIX path to the TensorFlow SavedModel. Returns: A POSIX path to the SavedModel Warmup file.
Here is the function:
def warmup_file_path(saved_model_path: str) -> str:
"""Returns SavedModel Warmup file path.
See https://www.tensorflow.org/tfx/serving/saved_model_warmup.
This is a lexical operation, and does not guarantee the path is valid.
Args:
saved_model_path: A POSIX path to the TensorFlow SavedModel.
Returns:
A POSIX path to the SavedModel Warmup file.
"""
return os.path.join(
saved_model_path,
'assets.extra',
'tf_serving_warmup_requests') | Returns SavedModel Warmup file path. See https://www.tensorflow.org/tfx/serving/saved_model_warmup. This is a lexical operation, and does not guarantee the path is valid. Args: saved_model_path: A POSIX path to the TensorFlow SavedModel. Returns: A POSIX path to the SavedModel Warmup file. |
166,576 | import contextlib
import functools
import re
import sys
import threading
from typing import Dict, List, Any, Callable
from absl import logging
from googleapiclient import http
from tfx import version
def make_labels_dict() -> Dict[str, str]:
"""Get all registered and system generated labels as a dict.
Returns:
All registered and system generated labels as a dict.
"""
result = dict(
{
_LABEL_TFX_VERSION:
version.__version__,
_LABEL_TFX_PY_VERSION:
'%d.%d' % (sys.version_info.major, sys.version_info.minor),
}, **getattr(_thread_local_labels_state, 'dictionary', {}))
# Only first-party tfx component's executor telemetry will be collected.
# All other executors will be recorded as `third_party_executor`.
if (result.get(LABEL_TFX_EXECUTOR) and
not result[LABEL_TFX_EXECUTOR].startswith('tfx.')):
result[LABEL_TFX_EXECUTOR] = 'third_party_executor'
for k, v in result.items():
result[k] = _normalize_label(v)
return result
The provided code snippet includes necessary dependencies for implementing the `make_beam_labels_args` function. Write a Python function `def make_beam_labels_args() -> List[str]` to solve the following problem:
Make Beam arguments for common labels used in TFX pipelines. Returns: New Beam pipeline args with labels.
Here is the function:
def make_beam_labels_args() -> List[str]:
"""Make Beam arguments for common labels used in TFX pipelines.
Returns:
New Beam pipeline args with labels.
"""
labels = make_labels_dict()
# See following file for reference to the '--labels ' flag.
# https://github.com/apache/beam/blob/master/sdks/python/apache_beam/options/pipeline_options.py
result = []
for k in sorted(labels):
result.extend(['--labels', '%s=%s' % (k, labels[k])])
return result | Make Beam arguments for common labels used in TFX pipelines. Returns: New Beam pipeline args with labels. |
166,577 | import contextlib
import functools
import re
import sys
import threading
from typing import Dict, List, Any, Callable
from absl import logging
from googleapiclient import http
from tfx import version
def noop_telemetry(
event_metric: Any
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
del event_metric
def instantiated_decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
return instantiated_decorator | null |
166,578 | import os
import shutil
import subprocess
import sys
import tempfile
from typing import List
import absl
from tfx import dependencies
from tfx import version
from tfx.dsl.io import fileio
from tfx.utils import io_utils
def build_ephemeral_package() -> str:
"""Repackage current installation of TFX into a tfx_ephemeral sdist.
Returns:
Path to ephemeral sdist package.
Raises:
RuntimeError: if dist directory has zero or multiple files.
"""
tmp_dir = os.path.join(tempfile.mkdtemp(), 'build', 'tfx')
# Find the last directory named 'tfx' in this file's path and package it.
path_split = __file__.split(os.path.sep)
last_index = -1
for i in range(len(path_split)):
if path_split[i] == 'tfx':
last_index = i
if last_index < 0:
raise RuntimeError('Cannot locate directory \'tfx\' in the path %s' %
__file__)
tfx_root_dir = os.path.sep.join(path_split[0:last_index + 1])
absl.logging.info('Copying all content from install dir %s to temp dir %s',
tfx_root_dir, tmp_dir)
shutil.copytree(tfx_root_dir, os.path.join(tmp_dir, 'tfx'))
# Source directory default permission is 0555 but we need to be able to create
# new setup.py file.
os.chmod(tmp_dir, 0o720)
setup_file = os.path.join(tmp_dir, 'setup.py')
absl.logging.info('Generating a temp setup file at %s', setup_file)
install_requires = dependencies.make_required_install_packages()
io_utils.write_string_file(
setup_file,
_ephemeral_setup_file.format(
version=version.__version__, install_requires=install_requires))
# Create the package
curdir = os.getcwd()
os.chdir(tmp_dir)
temp_log = os.path.join(tmp_dir, 'setup.log')
with open(temp_log, 'w') as f:
absl.logging.info('Creating temporary sdist package, logs available at %s',
temp_log)
cmd = [sys.executable, setup_file, 'sdist']
subprocess.call(cmd, stdout=f, stderr=f)
os.chdir(curdir)
# Return the package dir+filename
dist_dir = os.path.join(tmp_dir, 'dist')
files = fileio.listdir(dist_dir)
if not files:
raise RuntimeError('Found no package files in %s' % dist_dir)
elif len(files) > 1:
raise RuntimeError('Found multiple package files in %s' % dist_dir)
return os.path.join(dist_dir, files[0])
The provided code snippet includes necessary dependencies for implementing the `make_beam_dependency_flags` function. Write a Python function `def make_beam_dependency_flags(beam_pipeline_args: List[str]) -> List[str]` to solve the following problem:
Make beam arguments for TFX python dependencies, if latter was not set. When TFX executors are used with non-local beam runners (Dataflow, Flink, etc) the remote runner needs to have access to TFX executors. This function acts as a helper to provide TFX source package to Beam if user does not provide that through Beam pipeline args. Args: beam_pipeline_args: original Beam pipeline args. Returns: updated Beam pipeline args with TFX dependencies added.
Here is the function:
def make_beam_dependency_flags(beam_pipeline_args: List[str]) -> List[str]:
"""Make beam arguments for TFX python dependencies, if latter was not set.
When TFX executors are used with non-local beam runners (Dataflow, Flink, etc)
the remote runner needs to have access to TFX executors.
This function acts as a helper to provide TFX source package to Beam if user
does not provide that through Beam pipeline args.
Args:
beam_pipeline_args: original Beam pipeline args.
Returns:
updated Beam pipeline args with TFX dependencies added.
"""
# TODO(b/176857256): Change guidance message once "ml-pipelines-sdk" extra
# package specifiers are available.
try:
import apache_beam as beam # pylint: disable=g-import-not-at-top
except ModuleNotFoundError as e:
raise Exception(
'Apache Beam must be installed to use this functionality.') from e
pipeline_options = beam.options.pipeline_options.PipelineOptions(
flags=beam_pipeline_args)
all_options = pipeline_options.get_all_options()
for flag_name in [
'extra_packages',
'setup_file',
'requirements_file',
'worker_harness_container_image',
'sdk_container_image',
]:
if all_options.get(flag_name):
absl.logging.info('Nonempty beam arg %s already includes dependency',
flag_name)
return beam_pipeline_args
absl.logging.info('Attempting to infer TFX Python dependency for beam')
dependency_flags = []
sdist_file = build_ephemeral_package()
absl.logging.info('Added --extra_package=%s to beam args', sdist_file)
dependency_flags.append('--extra_package=%s' % sdist_file)
return beam_pipeline_args + dependency_flags | Make beam arguments for TFX python dependencies, if latter was not set. When TFX executors are used with non-local beam runners (Dataflow, Flink, etc) the remote runner needs to have access to TFX executors. This function acts as a helper to provide TFX source package to Beam if user does not provide that through Beam pipeline args. Args: beam_pipeline_args: original Beam pipeline args. Returns: updated Beam pipeline args with TFX dependencies added. |
166,579 | from tfx.utils import io_utils
from tensorflow_metadata.proto.v0 import anomalies_pb2
The provided code snippet includes necessary dependencies for implementing the `write_anomalies` function. Write a Python function `def write_anomalies( filepath: str, anomalies: anomalies_pb2.Anomalies, ) -> None` to solve the following problem:
Writes Anomalies to a binary proto file.
Here is the function:
def write_anomalies(
filepath: str,
anomalies: anomalies_pb2.Anomalies,
) -> None:
"""Writes Anomalies to a binary proto file."""
io_utils.write_bytes_file(
filepath, anomalies.SerializeToString()
) | Writes Anomalies to a binary proto file. |
166,580 | from typing import Dict, Iterable, List, Union
from tfx import types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
class Channel(types.Channel):
pass
None, 'tfx.utils.channel.as_channel has been renamed to '
'tfx.types.channel_utils.as_channel as of TFX 0.14.0.')
def as_channel(source: Union[Channel, Iterable[types.Artifact]]) -> Channel:
return channel_utils.as_channel(source) | null |
166,581 | from typing import Dict, Iterable, List, Union
from tfx import types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
class Channel(types.Channel):
pass
None, 'tfx.utils.channel.as_channel has been renamed to '
'tfx.types.channel_utils.as_channel as of TFX 0.14.0.')
def unwrap_channel_dict(
channel_dict: Dict[str, Channel]) -> Dict[str, List[types.Artifact]]:
return channel_utils.unwrap_channel_dict(channel_dict) | null |
166,582 | import abc
from typing import Any
The provided code snippet includes necessary dependencies for implementing the `abstract_property` function. Write a Python function `def abstract_property() -> Any` to solve the following problem:
Returns an abstract property for use in an ABC abstract class.
Here is the function:
def abstract_property() -> Any:
"""Returns an abstract property for use in an ABC abstract class."""
return abc.abstractmethod(lambda: None) | Returns an abstract property for use in an ABC abstract class. |
166,583 | import datetime
import enum
import os
import re
import time
from typing import Callable, Dict, List, Optional
from absl import logging
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
KFP_POD_NAME = 'KFP_POD_NAME'
KFP_NAMESPACE = 'KFP_NAMESPACE'
def is_inside_cluster() -> bool:
"""Whether current running environment is inside the kubernetes cluster."""
return _factory.inside_cluster
The provided code snippet includes necessary dependencies for implementing the `is_inside_kfp` function. Write a Python function `def is_inside_kfp() -> bool` to solve the following problem:
Whether current running environment is inside the KFP runtime.
Here is the function:
def is_inside_kfp() -> bool:
"""Whether current running environment is inside the KFP runtime."""
return (
is_inside_cluster()
and KFP_POD_NAME in os.environ
and KFP_NAMESPACE in os.environ
) | Whether current running environment is inside the KFP runtime. |
166,584 | import datetime
import enum
import os
import re
import time
from typing import Callable, Dict, List, Optional
from absl import logging
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
KFP_NAMESPACE = 'KFP_NAMESPACE'
The provided code snippet includes necessary dependencies for implementing the `get_kfp_namespace` function. Write a Python function `def get_kfp_namespace() -> str` to solve the following problem:
Get kubernetes namespace for the KFP. Raises: RuntimeError: If KFP pod cannot be determined from the environment, i.e. this program is not running inside the KFP. Returns: The namespace of the KFP app, to which the pod this program is running on belongs.
Here is the function:
def get_kfp_namespace() -> str:
"""Get kubernetes namespace for the KFP.
Raises:
RuntimeError: If KFP pod cannot be determined from the environment, i.e.
this program is not running inside the KFP.
Returns:
The namespace of the KFP app, to which the pod this program is running on
belongs.
"""
try:
return os.environ[KFP_NAMESPACE]
except KeyError:
raise RuntimeError(
'Cannot determine KFP namespace from the environment.') | Get kubernetes namespace for the KFP. Raises: RuntimeError: If KFP pod cannot be determined from the environment, i.e. this program is not running inside the KFP. Returns: The namespace of the KFP app, to which the pod this program is running on belongs. |
166,585 | import datetime
import enum
import os
import re
import time
from typing import Callable, Dict, List, Optional
from absl import logging
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
KFP_POD_NAME = 'KFP_POD_NAME'
KFP_NAMESPACE = 'KFP_NAMESPACE'
The provided code snippet includes necessary dependencies for implementing the `get_current_kfp_pod` function. Write a Python function `def get_current_kfp_pod(client: k8s_client.CoreV1Api) -> k8s_client.V1Pod` to solve the following problem:
Get manifest of the KFP pod in which this program is running. Args: client: A kubernetes CoreV1Api client. Raises: RuntimeError: If KFP pod cannot be determined from the environment, i.e. this program is not running inside the KFP. Returns: The manifest of the pod this program is running on.
Here is the function:
def get_current_kfp_pod(client: k8s_client.CoreV1Api) -> k8s_client.V1Pod:
"""Get manifest of the KFP pod in which this program is running.
Args:
client: A kubernetes CoreV1Api client.
Raises:
RuntimeError: If KFP pod cannot be determined from the environment, i.e.
this program is not running inside the KFP.
Returns:
The manifest of the pod this program is running on.
"""
try:
namespace = os.environ[KFP_NAMESPACE]
pod_name = os.environ[KFP_POD_NAME]
return client.read_namespaced_pod(name=pod_name, namespace=namespace)
except KeyError:
raise RuntimeError('Cannot determine KFP pod from the environment.') | Get manifest of the KFP pod in which this program is running. Args: client: A kubernetes CoreV1Api client. Raises: RuntimeError: If KFP pod cannot be determined from the environment, i.e. this program is not running inside the KFP. Returns: The manifest of the pod this program is running on. |
166,586 | from typing import Any, Dict, Iterator, TypeVar, Optional
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import descriptor_pool
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import message_factory
The provided code snippet includes necessary dependencies for implementing the `proto_to_dict` function. Write a Python function `def proto_to_dict(proto: message.Message) -> Dict[str, Any]` to solve the following problem:
Simple JSON Formatter wrapper for consistent formatting.
Here is the function:
def proto_to_dict(proto: message.Message) -> Dict[str, Any]:
"""Simple JSON Formatter wrapper for consistent formatting."""
return json_format.MessageToDict(
message=proto, preserving_proto_field_name=True) | Simple JSON Formatter wrapper for consistent formatting. |
166,587 | from typing import Any, Dict, Iterator, TypeVar, Optional
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import descriptor_pool
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import message_factory
ProtoMessage = TypeVar('ProtoMessage', bound=message.Message)
The provided code snippet includes necessary dependencies for implementing the `dict_to_proto` function. Write a Python function `def dict_to_proto(json_dict: Dict[Any, Any], proto: ProtoMessage) -> ProtoMessage` to solve the following problem:
Simple JSON Parser wrapper for consistent parsing.
Here is the function:
def dict_to_proto(json_dict: Dict[Any, Any],
proto: ProtoMessage) -> ProtoMessage:
"""Simple JSON Parser wrapper for consistent parsing."""
return json_format.ParseDict(json_dict, proto, ignore_unknown_fields=True) | Simple JSON Parser wrapper for consistent parsing. |
166,588 | import abc
import functools
import inspect
import sys
from typing import Any, Callable, Generic, Optional, TypeVar, Union, get_args, get_origin
from tfx.utils import pure_typing_utils
The provided code snippet includes necessary dependencies for implementing the `_is_subclass` function. Write a Python function `def _is_subclass(cls: type[Any], type_hint: Any) -> bool` to solve the following problem:
issubclass that supports Union and Optional correctly.
Here is the function:
def _is_subclass(cls: type[Any], type_hint: Any) -> bool:
"""issubclass that supports Union and Optional correctly."""
if inspect.isclass(type_hint) or sys.version_info >= (3, 10):
# issubclass recognizes Optional / Union type in python>=3.10
return issubclass(cls, type_hint)
origin = get_origin(type_hint)
args = get_args(type_hint)
if origin is Union:
if any(_is_subclass(cls, arg) for arg in args):
return True
return False | issubclass that supports Union and Optional correctly. |
166,589 | import re
from absl import logging
from tfx import version
_REGULAR_NIGHTLY_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+(\.dev\d{8}){0,1}')
_RC_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\-rc\d+')
The provided code snippet includes necessary dependencies for implementing the `get_image_version` function. Write a Python function `def get_image_version(version_str: str = version.__version__) -> str` to solve the following problem:
Gets the version for image tag based on SDK version. Args: version_str: The SDK version. Returns: Version string representing the image version should be used. For offcially released version of TFX SDK, we'll align the SDK and the image versions; For 'dev' or customized versions we'll use the latest image version.
Here is the function:
def get_image_version(version_str: str = version.__version__) -> str:
"""Gets the version for image tag based on SDK version.
Args:
version_str: The SDK version.
Returns:
Version string representing the image version should be used. For offcially
released version of TFX SDK, we'll align the SDK and the image versions; For
'dev' or customized versions we'll use the latest image version.
"""
if _REGULAR_NIGHTLY_VERSION_PATTERN.fullmatch(version_str):
# This SDK is a released version.
return version_str
elif _RC_VERSION_PATTERN.fullmatch(version_str):
# For RC versions the hiphen needs to be removed.
return version_str.replace('-', '')
logging.info('custom/dev SDK version detected: %s, using latest image '
'version', version_str)
return 'latest' | Gets the version for image tag based on SDK version. Args: version_str: The SDK version. Returns: Version string representing the image version should be used. For offcially released version of TFX SDK, we'll align the SDK and the image versions; For 'dev' or customized versions we'll use the latest image version. |
166,590 | import pathway.internals as pw
from pathway.internals import ColumnReference, Table
from pathway.xpacks.llm.llms import prompt_chat_single_qa
from pathway.xpacks.llm.prompts import prompt_qa
def _query_chat_with_k_documents(chat: pw.UDF, k: int, t: pw.Table) -> pw.Table:
limited_documents = t.select(
pw.this.query, documents=_limit_documents(t.documents, k)
)
result = _query_chat(chat, limited_documents)
return result
The provided code snippet includes necessary dependencies for implementing the `answer_with_geometric_rag_strategy` function. Write a Python function `def answer_with_geometric_rag_strategy( questions: ColumnReference, documents: ColumnReference, llm_chat_model: pw.UDF, n_starting_documents: int, factor: int, max_iterations: int, ) -> ColumnReference` to solve the following problem:
Function for querying LLM chat while providing increasing number of documents until an answer is found. Documents are taken from `documents` argument. Initially first `n_starting_documents` documents are embedded in the query. If the LLM chat fails to find an answer, the number of documents is multiplied by `factor` and the question is asked again. Args: questions (ColumnReference[str]): Column with questions to be asked to the LLM chat. documents (ColumnReference[list[str]]): Column with documents to be provided along with a question to the LLM chat. llm_chat_model: Chat model which will be queried for answers n_starting_documents: Number of documents embedded in the first query. factor: Factor by which a number of documents increases in each next query, if an answer is not found. max_iterations: Number of times to ask a question, with the increasing number of documents. Returns: A column with answers to the question. If answer is not found, then None is returned. Example: >>> import pandas as pd >>> import pathway as pw >>> from pathway.xpacks.llm.llms import OpenAIChat >>> from pathway.xpacks.llm.question_answering import answer_with_geometric_rag_strategy >>> chat = OpenAIChat() >>> df = pd.DataFrame( ... { ... "question": ["How do you connect to Kafka from Pathway?"], ... "documents": [ ... [ ... "`pw.io.csv.read reads a table from one or several files with delimiter-separated values.", ... "`pw.io.kafka.read` is a seneralized method to read the data from the given topic in Kafka.", ... ] ... ], ... } ... ) >>> t = pw.debug.table_from_pandas(df) >>> answers = answer_with_geometric_rag_strategy(t.question, t.documents, chat, 1, 2, 2)
Here is the function:
def answer_with_geometric_rag_strategy(
questions: ColumnReference,
documents: ColumnReference,
llm_chat_model: pw.UDF,
n_starting_documents: int,
factor: int,
max_iterations: int,
) -> ColumnReference:
"""
Function for querying LLM chat while providing increasing number of documents until an answer
is found. Documents are taken from `documents` argument. Initially first `n_starting_documents` documents
are embedded in the query. If the LLM chat fails to find an answer, the number of documents
is multiplied by `factor` and the question is asked again.
Args:
questions (ColumnReference[str]): Column with questions to be asked to the LLM chat.
documents (ColumnReference[list[str]]): Column with documents to be provided along
with a question to the LLM chat.
llm_chat_model: Chat model which will be queried for answers
n_starting_documents: Number of documents embedded in the first query.
factor: Factor by which a number of documents increases in each next query, if
an answer is not found.
max_iterations: Number of times to ask a question, with the increasing number of documents.
Returns:
A column with answers to the question. If answer is not found, then None is returned.
Example:
>>> import pandas as pd
>>> import pathway as pw
>>> from pathway.xpacks.llm.llms import OpenAIChat
>>> from pathway.xpacks.llm.question_answering import answer_with_geometric_rag_strategy
>>> chat = OpenAIChat()
>>> df = pd.DataFrame(
... {
... "question": ["How do you connect to Kafka from Pathway?"],
... "documents": [
... [
... "`pw.io.csv.read reads a table from one or several files with delimiter-separated values.",
... "`pw.io.kafka.read` is a seneralized method to read the data from the given topic in Kafka.",
... ]
... ],
... }
... )
>>> t = pw.debug.table_from_pandas(df)
>>> answers = answer_with_geometric_rag_strategy(t.question, t.documents, chat, 1, 2, 2)
"""
n_documents = n_starting_documents
t = Table.from_columns(query=questions, documents=documents)
t = t.with_columns(answer=None)
for _ in range(max_iterations):
rows_without_answer = t.filter(pw.this.answer.is_none())
results = _query_chat_with_k_documents(
llm_chat_model, n_documents, rows_without_answer
)
new_answers = rows_without_answer.with_columns(answer=results.answer)
t = t.update_rows(new_answers)
n_documents *= factor
return t.answer | Function for querying LLM chat while providing increasing number of documents until an answer is found. Documents are taken from `documents` argument. Initially first `n_starting_documents` documents are embedded in the query. If the LLM chat fails to find an answer, the number of documents is multiplied by `factor` and the question is asked again. Args: questions (ColumnReference[str]): Column with questions to be asked to the LLM chat. documents (ColumnReference[list[str]]): Column with documents to be provided along with a question to the LLM chat. llm_chat_model: Chat model which will be queried for answers n_starting_documents: Number of documents embedded in the first query. factor: Factor by which a number of documents increases in each next query, if an answer is not found. max_iterations: Number of times to ask a question, with the increasing number of documents. Returns: A column with answers to the question. If answer is not found, then None is returned. Example: >>> import pandas as pd >>> import pathway as pw >>> from pathway.xpacks.llm.llms import OpenAIChat >>> from pathway.xpacks.llm.question_answering import answer_with_geometric_rag_strategy >>> chat = OpenAIChat() >>> df = pd.DataFrame( ... { ... "question": ["How do you connect to Kafka from Pathway?"], ... "documents": [ ... [ ... "`pw.io.csv.read reads a table from one or several files with delimiter-separated values.", ... "`pw.io.kafka.read` is a seneralized method to read the data from the given topic in Kafka.", ... ] ... ], ... } ... ) >>> t = pw.debug.table_from_pandas(df) >>> answers = answer_with_geometric_rag_strategy(t.question, t.documents, chat, 1, 2, 2) |
166,591 | import re
import pathway as pw
def prompt_citing_qa(query: str, docs: list[pw.Json]):
context_pieces = []
for i, doc in enumerate(docs, 1):
context_pieces.append(f"# Source {i}")
context_pieces.append(doc["text"]) # type: ignore
context_pieces.append("")
context_str = "\n".join(context_pieces)
prompt = (
"Please provide an answer based solely on the provided sources. "
"When referencing information from a source, "
"cite the appropriate source(s) using their corresponding numbers. "
"Every answer should include at least one source citation. "
"Only cite a source when you are explicitly referencing it. "
"If exists, mention specific article/section header you use at the beginning of answer, such as '4.a Client has rights to...'" # noqa: E501
"Article headers may or may not be in docs, dont mention it if there is none."
# "If none of the sources are helpful, you should indicate that. "
# "For example:\n"
# "# Source 1:\n"
# "4.a The sky is red in the evening and blue in the morning.\n"
# "# Source 2:\n"
# "5.c Water is wet when the sky is red.\n"
# "Query: When is water wet?\n"
# "Answer: *5.c* Water will be wet when the sky is red [2], "
# "which occurs in the evening [1].\n"
# "If several citations are used, separate them with comma such as, '*5.c,4.a*'\n"
"If question cannot be inferred from documents SAY `No information found`"
"Now it's your turn. Below are several numbered sources of information:"
"\n------\n"
f"{context_str}"
"\n------\n"
f"Query: {query}\n"
"Answer: "
)
return prompt | null |
166,592 | import re
import pathway as pw
def prompt_short_qa(query: str, docs: list[pw.Json]):
context_pieces = []
for i, doc in enumerate(docs, 1):
context_pieces.append(doc["text"])
context_pieces.append("") # type: ignore
context_str = "\n".join(context_pieces) # type: ignore
prompt = (
"Please provide an answer based solely on the provided sources. "
"Keep your answer concise and accurate. Make sure that it starts with an expression in standardized format."
"Only respond without any explanation, for example questions asking for date should be answered in strictly date format: `05 January 2011`" # noqa: E501
"Yes or No questions should be responded with simple `Yes` or `No` and so on."
"If question cannot be inferred from documents SAY `No information found`"
"Now it's your turn. Below are several sources of information:"
"\n------\n"
f"{context_str}"
"\n------\n"
f"Query: {query}\n"
"Answer: "
)
return prompt | null |
166,593 | import re
import pathway as pw
def prompt_summarize(text_list: list[str]):
text = "\n".join(text_list)
prompt = f"""Given a list of documents, summarize them in few sentences \
while preserving important points and entities.
Documents: {text}
Summary:"""
return prompt | null |
166,594 | import re
import pathway as pw
def prompt_query_rewrite_hyde(query: str) -> str:
prompt = f"""Write 4 responses to answer the given question with hypothetical data.
Try to include as many key details as possible.
Question: `{query}`.
Responses:"""
return prompt | null |
166,595 | import re
import pathway as pw
def prompt_query_rewrite(query: str, *additional_args: str) -> str:
prompt = f"""Given a question that will be used to retrieve similar documents for RAG application.
Rewrite question to be better usable in retrieval search.
Use important entities, words that may be related to query and other entity names.
Your response should be three queries based on the question provided, separated by comma.
Question: `{query}`
"""
if additional_args:
prompt += """If any of the provided sections are related to question, write section name in the query as well.
Here is additional info that you can include in search: """
for arg in additional_args:
prompt += f" `{arg}`\n"
prompt += "Rewritten query:"
return prompt | null |
166,596 | import re
import pathway as pw
def parse_cited_response(response_text, docs):
cited_docs = [
int(cite[1:-1]) - 1
for cite in set(re.findall("\[\d+\]", response_text)) # noqa: W605
]
start_index = response_text.find("*") + 1
end_index = response_text.find("*", start_index)
citations = [docs[i] for i in cited_docs if i in cited_docs]
cleaned_citations = []
if (
start_index != -1 and end_index != -1
): # doing this for the GIF, we need a better way to do this, TODO: redo
cited = response_text[start_index:end_index]
response_text = response_text[end_index:].strip()
cited = (
cited.replace(" ", "")
.replace(",,", ",")
.replace(",", ",\n")
.replace(" ", "\n")
)
text_body = citations[0]["text"]
new_text = f"<b>{cited}</b>\n\n".replace("\n\n\n", "\n") + text_body
citations[0]["text"] = new_text
cleaned_citations.append(citations[0])
if len(citations) > 1:
for doc in citations[1:]:
text_body = doc["text"] # TODO: unformat and clean the text
doc["text"] = text_body
cleaned_citations.append(doc)
return response_text, cleaned_citations | null |
166,597 | import unicodedata
import pathway as pw
The provided code snippet includes necessary dependencies for implementing the `null_splitter` function. Write a Python function `def null_splitter(txt: str) -> list[tuple[str, dict]]` to solve the following problem:
A splitter which returns its argument as one long text ith null metadata. Args: txt: text to be split Returns: list of pairs: chunk text and metadata. The null splitter always return a list of length one containing the full text and empty metadata.
Here is the function:
def null_splitter(txt: str) -> list[tuple[str, dict]]:
"""A splitter which returns its argument as one long text ith null metadata.
Args:
txt: text to be split
Returns:
list of pairs: chunk text and metadata.
The null splitter always return a list of length one containing the full text and empty metadata.
"""
return [(txt, {})] | A splitter which returns its argument as one long text ith null metadata. Args: txt: text to be split Returns: list of pairs: chunk text and metadata. The null splitter always return a list of length one containing the full text and empty metadata. |
166,598 | import unicodedata
import pathway as pw
The provided code snippet includes necessary dependencies for implementing the `_normalize_unicode` function. Write a Python function `def _normalize_unicode(text: str)` to solve the following problem:
Get rid of ligatures
Here is the function:
def _normalize_unicode(text: str):
"""
Get rid of ligatures
"""
return unicodedata.normalize("NFKC", text) | Get rid of ligatures |
166,599 | import asyncio
import functools
import json
import logging
import threading
from collections.abc import Callable, Coroutine
from typing import TYPE_CHECKING
import jmespath
import numpy as np
import requests
import pathway as pw
import pathway.xpacks.llm.parsers
import pathway.xpacks.llm.splitters
from pathway.stdlib.ml import index
from pathway.stdlib.ml.classifiers import _knn_lsh
def _unwrap_udf(func):
if isinstance(func, pw.UDF):
return func.__wrapped__
return func | null |
166,600 | import asyncio
import functools
import json
import logging
import threading
from collections.abc import Callable, Coroutine
from typing import TYPE_CHECKING
import jmespath
import numpy as np
import requests
import pathway as pw
import pathway.xpacks.llm.parsers
import pathway.xpacks.llm.splitters
from pathway.stdlib.ml import index
from pathway.stdlib.ml.classifiers import _knn_lsh
def _run_async(coroutine):
def _coerce_sync(func: Callable) -> Callable:
if asyncio.iscoroutinefunction(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return _run_async(func(*args, **kwargs))
return wrapper
else:
return func | null |
166,601 | import asyncio
import openai as openai_mod
import pathway as pw
from pathway.internals import udfs
async def _safe_aclose(self):
try:
await self.aclose()
except RuntimeError:
pass
The provided code snippet includes necessary dependencies for implementing the `_mokeypatch_openai_async` function. Write a Python function `def _mokeypatch_openai_async()` to solve the following problem:
Be more permissive on errors happening in httpx loop closing. Without this patch, many runtime errors appear while the server is running in a thread. The errors can be ignored, but look scary.
Here is the function:
def _mokeypatch_openai_async():
"""Be more permissive on errors happening in httpx loop closing.
Without this patch, many runtime errors appear while the server is running in a thread.
The errors can be ignored, but look scary.
"""
try:
import openai._base_client
if hasattr(openai._base_client, "OrigAsyncHttpxClientWrapper"):
return
openai._base_client.OrigAsyncHttpxClientWrapper = ( # type:ignore
openai._base_client.AsyncHttpxClientWrapper
)
class AsyncHttpxClientWrapper(
openai._base_client.OrigAsyncHttpxClientWrapper # type:ignore
):
def __del__(self) -> None:
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(_safe_aclose(self))
except Exception:
pass
openai._base_client.AsyncHttpxClientWrapper = ( # type:ignore
AsyncHttpxClientWrapper
)
except Exception:
pass | Be more permissive on errors happening in httpx loop closing. Without this patch, many runtime errors appear while the server is running in a thread. The errors can be ignored, but look scary. |
166,602 | import os
import subprocess
import sys
import uuid
from typing import NoReturn
import click
import pathway as pw
def cli() -> None:
pass | null |
166,603 | import os
import subprocess
import sys
import uuid
from typing import NoReturn
import click
import pathway as pw
def spawn_program(threads, processes, first_port, program, arguments, env_base):
processes_str = plural(processes, "process", "processes")
workers_str = plural(processes * threads, "total worker", "total workers")
click.echo(f"Preparing {processes_str} ({workers_str})", err=True)
run_id = uuid.uuid4()
process_handles = []
try:
for process_id in range(processes):
env = env_base.copy()
env["PATHWAY_THREADS"] = str(threads)
env["PATHWAY_PROCESSES"] = str(processes)
env["PATHWAY_FIRST_PORT"] = str(first_port)
env["PATHWAY_PROCESS_ID"] = str(process_id)
env["PATHWAY_RUN_ID"] = str(run_id)
handle = subprocess.Popen([program] + list(arguments), env=env)
process_handles.append(handle)
for handle in process_handles:
handle.wait()
finally:
for handle in process_handles:
handle.terminate()
sys.exit(max(handle.returncode for handle in process_handles))
def spawn(threads, processes, first_port, record, record_path, program, arguments):
env = os.environ.copy()
if record:
env["PATHWAY_REPLAY_STORAGE"] = record_path
env["PATHWAY_SNAPSHOT_ACCESS"] = "record"
env["PATHWAY_CONTINUE_AFTER_REPLAY"] = "true"
spawn_program(threads, processes, first_port, program, arguments, env) | null |
166,604 | import os
import subprocess
import sys
import uuid
from typing import NoReturn
import click
import pathway as pw
def spawn_program(threads, processes, first_port, program, arguments, env_base):
def replay(
threads,
processes,
first_port,
record_path,
mode,
continue_after_replay,
program,
arguments,
):
env = os.environ.copy()
env["PATHWAY_REPLAY_STORAGE"] = record_path
env["PATHWAY_SNAPSHOT_ACCESS"] = "replay"
env["PATHWAY_PERSISTENCE_MODE"] = mode
env["PATHWAY_REPLAY_MODE"] = mode
if continue_after_replay:
env["PATHWAY_CONTINUE_AFTER_REPLAY"] = "true"
spawn_program(threads, processes, first_port, program, arguments, env) | null |
166,605 | from __future__ import annotations
from collections.abc import Callable
import pathway.internals as pw
def classifier_accuracy(predicted_labels, exact_labels):
pw.universes.promise_is_subset_of(predicted_labels, exact_labels)
comparative_results = predicted_labels.select(
predicted_label=predicted_labels.predicted_label,
label=exact_labels.restrict(predicted_labels).label,
)
comparative_results = comparative_results + comparative_results.select(
match=comparative_results.label == comparative_results.predicted_label
)
accuracy = comparative_results.groupby(comparative_results.match).reduce(
cnt=pw.reducers.count(),
value=comparative_results.match,
)
pw.universes.promise_is_subset_of(predicted_labels, accuracy)
return accuracy | null |
166,606 | from __future__ import annotations
from collections.abc import Callable
import pathway.internals as pw
The provided code snippet includes necessary dependencies for implementing the `_predict_asof_now` function. Write a Python function `def _predict_asof_now( prediction_function: Callable[..., pw.Table], *queries: pw.ColumnReference, with_queries_universe: bool = False, ) -> pw.Table` to solve the following problem:
A helper function used to predict answers to queries without updating them in the future. It passes a query and its forgetting counterpart through the prediction function. Parameters: prediction_function: A function that is called with transformed column reference `query`. queries: References to a column/columns with query data. with_queries_universe: Whether the result should have the same universe (set of keys) as the table with queries. Returns: pw.Table: A table created by applying `prediction_function` on `query`.
Here is the function:
def _predict_asof_now(
prediction_function: Callable[..., pw.Table],
*queries: pw.ColumnReference,
with_queries_universe: bool = False,
) -> pw.Table:
"""
A helper function used to predict answers to queries without updating them in the future.
It passes a query and its forgetting counterpart through the prediction function.
Parameters:
prediction_function: A function that is called with transformed column reference `query`.
queries: References to a column/columns with query data.
with_queries_universe: Whether the result should have the same universe (set of keys)
as the table with queries.
Returns:
pw.Table: A table created by applying `prediction_function` on `query`.
"""
assert len(queries) > 0
cols = {f"_pw_{i}": q for i, q in enumerate(queries)}
queries_table = queries[0].table.select(**cols)
queries_table = queries_table._forget_immediately()
results = prediction_function(*(queries_table[name] for name in cols))
results = results._filter_out_results_of_forgetting()
if with_queries_universe:
# FIXME assert that query.table is append-only,
# then results should also be append-only (promise that)
# then we should have a version of with_universe_of for both append only tables
# that frees memory when records are joined
results = results.with_universe_of(queries[0].table)
return results | A helper function used to predict answers to queries without updating them in the future. It passes a query and its forgetting counterpart through the prediction function. Parameters: prediction_function: A function that is called with transformed column reference `query`. queries: References to a column/columns with query data. with_queries_universe: Whether the result should have the same universe (set of keys) as the table with queries. Returns: pw.Table: A table created by applying `prediction_function` on `query`. |
166,607 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
def _tokenize(obj: Any) -> Any:
return str(obj).split() | null |
166,608 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
def _letters(obj: Any) -> Any:
return [c.lower() for c in str(obj) if c.isalnum()] | null |
166,609 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
def _discrete_weight(cnt: float) -> float:
if cnt == 0:
return 0.0
else:
return 1 / (2 ** math.ceil(math.log2(cnt))) | null |
166,610 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
def _discrete_logweight(cnt: float) -> float:
if cnt == 0:
return 0.0
else:
return 1 / (math.ceil(math.log2(cnt + 1))) | null |
166,611 | from __future__ import annotations
import math
from collections.abc import Callable
from enum import IntEnum, auto
from typing import Any
import pathway.internals as pw
from pathway.internals.helpers import StableSet
def _none(cnt: float) -> float:
return cnt | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.